/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/Reassociate/ |
D | fast-basictest.ll | 16 ; Both 'reassoc' and 'nsz' are required. 19 ; CHECK-NEXT: [[ARG_NEG:%.*]] = fsub reassoc nsz float -0.000000e+00, [[ARG:%.*]] 22 %t1 = fsub reassoc nsz float -1.200000e+01, %arg 23 %t2 = fadd reassoc nsz float %t1, 1.200000e+01 27 ; Verify the fold is not done with only 'reassoc' ('nsz' is required). 30 ; CHECK-NEXT: [[T1:%.*]] = fsub reassoc float -1.200000e+01, [[ARG:%.*]] 31 ; CHECK-NEXT: [[T2:%.*]] = fadd reassoc float [[T1]], 1.200000e+01 34 %t1 = fsub reassoc float -1.200000e+01, %arg 35 %t2 = fadd reassoc float %t1, 1.200000e+01 65 ; CHECK-NEXT: [[REG115:%.*]] = fadd reassoc float [[REG109:%.*]], -3.000000e+01 [all …]
|
D | fast-SubReassociate.ll | 32 ; Both 'reassoc' and 'nsz' are required. 35 ; CHECK-NEXT: [[Z:%.*]] = fsub reassoc nsz float %A, %B 38 %W = fadd reassoc nsz float %B, 5.000000e+00 39 %X = fadd reassoc nsz float %A, -7.000000e+00 40 %Y = fsub reassoc nsz float %X, %W 41 %Z = fadd reassoc nsz float %Y, 1.200000e+01 45 ; Verify the fold is not done with only 'reassoc' ('nsz' is required). 48 ; CHECK-NEXT: [[W:%.*]] = fadd reassoc float %B, 5.000000e+00 49 ; CHECK-NEXT: [[X:%.*]] = fadd reassoc float %A, -7.000000e+00 50 ; CHECK-NEXT: [[Y:%.*]] = fsub reassoc float [[X]], [[W]] [all …]
|
D | fast-ReassociateVector.ll | 22 ; CHECK-NEXT: [[MUL:%.*]] = fmul reassoc <4 x float> [[A:%.*]], [[C:%.*]] 23 ; CHECK-NEXT: [[MUL1:%.*]] = fmul reassoc <4 x float> [[B:%.*]], [[C]] 24 ; CHECK-NEXT: [[ADD:%.*]] = fadd reassoc <4 x float> [[MUL]], [[MUL1]] 27 %mul = fmul reassoc <4 x float> %a, %c 28 %mul1 = fmul reassoc <4 x float> %b, %c 29 %add = fadd reassoc <4 x float> %mul, %mul1 54 ; CHECK-NEXT: [[T0:%.*]] = fmul reassoc <2 x float> [[A:%.*]], [[B:%.*]] 55 ; CHECK-NEXT: [[T1:%.*]] = fmul reassoc <2 x float> [[A]], [[T0]] 56 ; CHECK-NEXT: [[T2:%.*]] = fmul reassoc <2 x float> [[A]], [[C:%.*]] 57 ; CHECK-NEXT: [[T3:%.*]] = fmul reassoc <2 x float> [[A]], [[T2]] [all …]
|
D | fast-MissedTree.ll | 15 ; Both 'reassoc' and 'nsz' are required. 18 ; CHECK-NEXT: [[Z:%.*]] = fadd reassoc nsz float %A, %B 21 %W = fadd reassoc nsz float %B, -5.0 22 %Y = fadd reassoc nsz float %A, 5.0 23 %Z = fadd reassoc nsz float %W, %Y 27 ; Verify the fold is not done with only 'reassoc' ('nsz' is required). 30 ; CHECK-NEXT: [[W:%.*]] = fadd reassoc float %B, -5.000000e+00 31 ; CHECK-NEXT: [[Y:%.*]] = fadd reassoc float %A, 5.000000e+00 32 ; CHECK-NEXT: [[Z:%.*]] = fadd reassoc float [[Y]], [[W]] 35 %W = fadd reassoc float %B, -5.0 [all …]
|
D | fast-AgressiveSubMove.ll | 30 ; CHECK-NEXT: [[X:%.*]] = fadd reassoc float %A, 1.000000e+00 31 ; CHECK-NEXT: [[Y:%.*]] = fadd reassoc float %A, 1.000000e+00 32 ; CHECK-NEXT: [[R:%.*]] = fsub reassoc float [[X]], [[Y]] 35 %X = fadd reassoc float 1.000000e+00, %A 36 %Y = fadd reassoc float 1.000000e+00, %A 37 %r = fsub reassoc float %X, %Y
|
D | mixed-fast-nonfast-fp.ll | 24 ; CHECK-NEXT: [[MUL1:%.*]] = fmul reassoc float %a, %c 27 ; CHECK-NEXT: [[MUL4:%.*]] = fmul reassoc float %a, %c 29 ; CHECK-NEXT: [[ADD2:%.*]] = fadd reassoc float [[MUL2]], [[MUL4]] 33 %mul1 = fmul reassoc float %a, %c 36 %mul4 = fmul reassoc float %a, %c 38 %add2 = fadd reassoc float %mul4, %mul2
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/InstCombine/ |
D | fdiv-cos-sin.ll | 20 ; CHECK-NEXT: [[TMP2:%.*]] = call reassoc double @llvm.sin.f64(double [[A]]) 25 %2 = call reassoc double @llvm.sin.f64(double %a) 32 ; CHECK-NEXT: [[TAN:%.*]] = call reassoc double @tan(double [[A:%.*]]) #1 33 ; CHECK-NEXT: [[TMP1:%.*]] = fdiv reassoc double 1.000000e+00, [[TAN]] 38 %div = fdiv reassoc double %1, %2 44 ; CHECK-NEXT: [[TAN:%.*]] = call reassoc double @tan(double [[A:%.*]]) #1 45 ; CHECK-NEXT: [[TMP1:%.*]] = fdiv reassoc double 1.000000e+00, [[TAN]] 48 %1 = call reassoc double @llvm.cos.f64(double %a) 50 %div = fdiv reassoc double %1, %2 56 ; CHECK-NEXT: [[TMP1:%.*]] = call reassoc double @llvm.cos.f64(double [[A:%.*]]) [all …]
|
D | fdiv-sin-cos.ll | 20 ; CHECK-NEXT: [[TMP2:%.*]] = call reassoc double @llvm.cos.f64(double [[A]]) 25 %2 = call reassoc double @llvm.cos.f64(double %a) 32 ; CHECK-NEXT: [[TAN:%.*]] = call reassoc double @tan(double [[A:%.*]]) #1 37 %div = fdiv reassoc double %1, %2 43 ; CHECK-NEXT: [[TAN:%.*]] = call reassoc double @tan(double [[A:%.*]]) #1 46 %1 = call reassoc double @llvm.sin.f64(double %a) 48 %div = fdiv reassoc double %1, %2 54 ; CHECK-NEXT: [[TMP1:%.*]] = call reassoc double @llvm.sin.f64(double [[A:%.*]]) 55 ; CHECK-NEXT: [[TMP2:%.*]] = call reassoc double @llvm.cos.f64(double [[A]]) 56 ; CHECK-NEXT: [[DIV:%.*]] = fdiv reassoc double [[TMP1]], [[TMP2]] [all …]
|
D | fast-math.ll | 53 ; Check again with 'reassoc' and 'nsz' ('nsz' not technically required). 56 ; CHECK-NEXT: [[TMP1:%.*]] = fmul reassoc nsz double [[F1:%.*]], 6.000000e+00 59 %t1 = fmul reassoc nsz double 5.000000e+00, %f1 60 %t2 = fadd reassoc nsz double %f1, %t1 67 ; CHECK-NEXT: [[TMP1:%.*]] = fmul reassoc double [[F1:%.*]], 5.000000e+00 68 ; CHECK-NEXT: [[TMP2:%.*]] = fadd reassoc double [[TMP1]], [[F1]] 71 %t1 = fmul reassoc double 5.000000e+00, %f1 72 %t2 = fadd reassoc double %f1, %t1 89 ; Check again with 'reassoc' and 'nsz' ('nsz' not technically required). 92 ; CHECK-NEXT: [[TMP1:%.*]] = fadd reassoc nsz float [[F1:%.*]], [[F2:%.*]] [all …]
|
D | 2006-10-26-VectorReassoc.ll | 27 ; Verify this folds with 'reassoc' and 'nsz' ('nsz' not technically required) 30 ; CHECK-NEXT: [[TMP1:%.*]] = fmul reassoc nsz <4 x float> [[V:%.*]], <float 1.000000e+00, float… 32 …%Y = fmul reassoc nsz <4 x float> %V, < float 1.000000e+00, float 2.000000e+00, float 3.000000e+00… 33 …%Z = fmul reassoc nsz <4 x float> %Y, < float 1.000000e+00, float 2.000000e+05, float -3.000000e+0… 41 ; CHECK-NEXT: [[TMP1:%.*]] = fmul reassoc <4 x float> [[V:%.*]], <float 1.000000e+00, float 2.0… 42 ; CHECK-NEXT: [[TMP2:%.*]] = fmul reassoc <4 x float> [[TMP1]], <float 1.000000e+00, float 2.00… 44 …%Y = fmul reassoc <4 x float> %V, < float 1.000000e+00, float 2.000000e+00, float 3.000000e+00, fl… 45 …%Z = fmul reassoc <4 x float> %Y, < float 1.000000e+00, float 2.000000e+05, float -3.000000e+00, f… 73 ; Verify this folds with 'reassoc' and 'nsz' ('nsz' not technically required) 76 ; CHECK-NEXT: [[TMP1:%.*]] = fadd reassoc nsz <4 x float> [[V:%.*]], <float 2.000000e+00, float… [all …]
|
D | fmul-sqrt.ll | 43 ; CHECK-NEXT: [[TMP1:%.*]] = fmul reassoc nnan double [[A:%.*]], [[B:%.*]] 44 ; CHECK-NEXT: [[TMP2:%.*]] = call reassoc nnan double @llvm.sqrt.f64(double [[TMP1]]) 49 %mul = fmul reassoc nnan double %1, %2 60 ; CHECK-NEXT: [[MUL:%.*]] = fmul reassoc double [[TMP1]], [[TMP2]] 65 %mul = fmul reassoc double %1, %2 70 ; 'reassoc nnan' on the fmuls is all that is required, but check propagation of other FMF. 74 ; CHECK-NEXT: [[TMP1:%.*]] = fmul reassoc nnan arcp double [[A:%.*]], [[B:%.*]] 75 ; CHECK-NEXT: [[TMP2:%.*]] = fmul reassoc nnan double [[TMP1]], [[C:%.*]] 76 ; CHECK-NEXT: [[TMP3:%.*]] = fmul reassoc nnan ninf double [[TMP2]], [[D:%.*]] 77 ; CHECK-NEXT: [[TMP4:%.*]] = call reassoc nnan ninf double @llvm.sqrt.f64(double [[TMP3]]) [all …]
|
D | fmul.ll | 295 ; The transform only requires 'reassoc', but test other FMF in 300 ; CHECK-NEXT: [[TMP1:%.*]] = fmul reassoc float [[X:%.*]], [[X]] 301 ; CHECK-NEXT: [[MUL2:%.*]] = fmul reassoc float [[TMP1]], [[Y:%.*]] 305 %mul2 = fmul reassoc float %mul1, %x 327 ; CHECK-NEXT: [[TMP1:%.*]] = fmul reassoc nnan float [[X]], [[X]] 328 ; CHECK-NEXT: [[MUL2:%.*]] = fmul reassoc nnan float [[TMP1]], [[Y:%.*]] 333 %mul2 = fmul reassoc nnan float %x, %mul1 342 ; CHECK-NEXT: [[TMP1:%.*]] = fmul reassoc ninf float [[X]], [[X]] 343 ; CHECK-NEXT: [[MUL2:%.*]] = fmul reassoc ninf float [[TMP1]], [[Y:%.*]] 348 %mul2 = fmul reassoc ninf float %x, %mul1 [all …]
|
D | fdiv.ll | 138 ; CHECK-NEXT: [[TMP1:%.*]] = fmul reassoc arcp float [[Y:%.*]], [[Z:%.*]] 139 ; CHECK-NEXT: [[DIV2:%.*]] = fdiv reassoc arcp float [[X:%.*]], [[TMP1]] 143 %div2 = fdiv arcp reassoc float %div1, %z 151 ; CHECK-NEXT: [[TMP1:%.*]] = fmul reassoc arcp <2 x float> [[Y:%.*]], [[Z:%.*]] 152 ; CHECK-NEXT: [[DIV2:%.*]] = fdiv reassoc arcp <2 x float> [[TMP1]], [[X:%.*]] 156 %div2 = fdiv arcp reassoc <2 x float> %z, %div1 290 ; CHECK-NEXT: [[D:%.*]] = fdiv reassoc nnan float 1.000000e+00, [[Y:%.*]] 294 %d = fdiv nnan reassoc float %x, %m 303 ; CHECK-NEXT: [[D:%.*]] = fdiv reassoc float [[X]], [[M]] 307 %d = fdiv reassoc float %x, %m [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/InstSimplify/ |
D | log-exp-intrinsic.ll | 14 %2 = call reassoc double @llvm.log.f64(double %1) 20 ; CHECK-NEXT: [[TMP1:%.*]] = call reassoc double @llvm.exp.f64(double [[A:%.*]]) 24 %1 = call reassoc double @llvm.exp.f64(double %a) 49 %2 = call reassoc double @llvm.log.f64(double %1) 51 %4 = call reassoc double @llvm.log.f64(double %3) 60 %2 = call reassoc double @llvm.log2.f64(double %1) 66 ; CHECK-NEXT: [[TMP1:%.*]] = call reassoc double @llvm.exp2.f64(double [[A:%.*]]) 70 %1 = call reassoc double @llvm.exp2.f64(double %a) 95 %2 = call reassoc double @llvm.log2.f64(double %1) 97 %4 = call reassoc double @llvm.log2.f64(double %3) [all …]
|
D | fdiv.ll | 25 %d = fdiv reassoc nnan double %m, %y 34 ; CHECK-NEXT: [[D:%.*]] = fdiv reassoc double [[M]], %y 38 %d = fdiv reassoc double %m, %y
|
D | fast-math.ll | 360 %mul = fmul reassoc nnan nsz double %sqrt, %sqrt 380 ; CHECK-NEXT: [[MUL:%.*]] = fmul reassoc nnan double [[SQRT]], [[SQRT]] 384 %mul = fmul reassoc nnan double %sqrt, %sqrt 391 ; CHECK-NEXT: [[MUL:%.*]] = fmul reassoc nsz double [[SQRT]], [[SQRT]] 395 %mul = fmul reassoc nsz double %sqrt, %sqrt
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/X86/ |
D | sqrt-fastmath-mir.ll | 34 ; CHECK: %3:fr32 = nnan ninf nsz arcp contract afn reassoc VMULSSrr %0, %1 36 ; CHECK: %5:fr32 = nnan ninf nsz arcp contract afn reassoc VFMADD213SSr %1, killed %3, %4 38 ; CHECK: %7:fr32 = nnan ninf nsz arcp contract afn reassoc VMULSSrr %1, %6 39 ; CHECK: %8:fr32 = nnan ninf nsz arcp contract afn reassoc VMULSSrr killed %7, killed %5 40 ; CHECK: %9:fr32 = nnan ninf nsz arcp contract afn reassoc VMULSSrr %0, %8 41 ; CHECK: %10:fr32 = nnan ninf nsz arcp contract afn reassoc VFMADD213SSr %8, killed %9, %4 42 ; CHECK: %11:fr32 = nnan ninf nsz arcp contract afn reassoc VMULSSrr %8, %6 43 ; CHECK: %12:fr32 = nnan ninf nsz arcp contract afn reassoc VMULSSrr killed %11, killed %10
|
D | fmf-propagation.ll | 14 ; CHECK-NEXT: t11: f32 = fadd reassoc t10, t4 15 ; CHECK-NEXT: t12: f32 = fadd nnan ninf nsz arcp contract afn reassoc t11, t4 26 %f7 = fadd reassoc float %f6, %y
|
D | fp-fold.ll | 43 %r = fadd reassoc nsz float %sum, 12.0 152 %mul = fmul reassoc float %x, 9.0 153 %r = fmul reassoc float %mul, 4.0
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/MIR/X86/ |
D | fastmath.mir | 25 ; CHECK: %7:fr32 = reassoc VMULSSrr %6, %6 26 %7:fr32 = reassoc VMULSSrr %6, %6 27 ; CHECK: %8:fr32 = nsz arcp contract afn reassoc VMULSSrr %7, %7 28 %8:fr32 = nsz arcp contract afn reassoc VMULSSrr %7, %7 29 ; CHECK: %9:fr32 = contract afn reassoc VMULSSrr %8, %8 30 %9:fr32 = contract afn reassoc VMULSSrr %8, %8
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/PowerPC/ |
D | fmf-propagation.ll | 64 ; FMFDEBUG: fma reassoc {{t[0-9]+}}, {{t[0-9]+}}, {{t[0-9]+}} 80 %add = fadd reassoc float %mul, %z 87 ; FMFDEBUG: fma reassoc {{t[0-9]+}}, {{t[0-9]+}} 102 %mul = fmul reassoc float %x, %y 103 %add = fadd reassoc float %mul, %z 110 ; FMFDEBUG: fma nnan ninf nsz arcp contract afn reassoc {{t[0-9]+}}, {{t[0-9]+}}, {{t[0-9]+… 133 ; FMFDEBUG: fma nnan ninf nsz arcp contract afn reassoc {{t[0-9]+}}, {{t[0-9]+}}, {{t[0-9]+… 157 ; FMFDEBUG: fmul reassoc {{t[0-9]+}}, 161 ; GLOBALDEBUG: fmul reassoc {{t[0-9]+}} 181 %fma = call reassoc float @llvm.fma.f32(float %x, float 7.0, float %mul) [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Assembler/ |
D | fast-math-flags.ll | 91 ; CHECK: @reassoc( 92 define float @reassoc(float %x, float %y) { 93 ; CHECK: %a = fsub reassoc float %x, %y 94 %a = fsub reassoc float %x, %y 95 ; CHECK: %b = fmul reassoc float %x, %y 96 %b = fmul reassoc float %x, %y 97 ; CHECK: %c = call reassoc float @foo(float %b) 98 %c = call reassoc float @foo(float %b) 159 ; CHECK: %a_vec = fadd reassoc nnan <3 x float> %vec, %vec 160 %a_vec = fadd reassoc nnan <3 x float> %vec, %vec
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AMDGPU/ |
D | buffer-schedule.ll | 30 %tmp22 = fadd reassoc nnan arcp contract float %tmp21, 1.000000e+00 34 %tmp27 = fadd reassoc nnan arcp contract float %tmp26, 1.000000e+00 38 %tmp32 = fadd reassoc nnan arcp contract float %tmp31, 1.000000e+00 42 %tmp37 = fadd reassoc nnan arcp contract float %tmp36, 1.000000e+00
|
D | couldnt-join-subrange-3.mir | 65 %17 = fmul reassoc nnan arcp contract <3 x float> %16, zeroinitializer 74 %26 = fmul reassoc nnan arcp contract <3 x float> %25, zeroinitializer 75 %27 = fadd reassoc nnan arcp contract <3 x float> zeroinitializer, %26 76 %28 = fmul reassoc nnan arcp contract <3 x float> %27, zeroinitializer 77 %29 = fadd reassoc nnan arcp contract <3 x float> %28, zeroinitializer 86 %38 = fmul reassoc nnan arcp contract <3 x float> %37, zeroinitializer 87 %39 = fadd reassoc nnan arcp contract <3 x float> %38, zeroinitializer 309 … %42:vgpr_32 = nnan arcp contract reassoc V_MAD_F32 0, killed %41, 0, 0, 0, 0, 0, 0, implicit $exec 310 … %43:vgpr_32 = nnan arcp contract reassoc V_MAD_F32 0, killed %42, 0, 0, 0, 0, 0, 0, implicit $exec
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AArch64/ |
D | vecreduce-propagate-sd-flags.ll | 14 ; CHECK: Legalizing node: t26: v2f64 = fmaxnum nnan reassoc [[VTWO]], [[VTWO]] 27 %4 = call nnan reassoc double @llvm.experimental.vector.reduce.fmax.f64.v4f64(<4 x double> %3)
|