Searched refs:fabs (Results 1 – 25 of 1525) sorted by relevance
12345678910>>...61
/external/llvm/test/Analysis/CostModel/AMDGPU/ |
D | fabs.ll | 4 ; CHECK: estimated cost of 0 for {{.*}} call float @llvm.fabs.f32 7 %fabs = call float @llvm.fabs.f32(float %vec) #1 8 store float %fabs, float addrspace(1)* %out 13 ; CHECK: estimated cost of 0 for {{.*}} call <2 x float> @llvm.fabs.v2f32 16 %fabs = call <2 x float> @llvm.fabs.v2f32(<2 x float> %vec) #1 17 store <2 x float> %fabs, <2 x float> addrspace(1)* %out 22 ; CHECK: estimated cost of 0 for {{.*}} call <3 x float> @llvm.fabs.v3f32 25 %fabs = call <3 x float> @llvm.fabs.v3f32(<3 x float> %vec) #1 26 store <3 x float> %fabs, <3 x float> addrspace(1)* %out 31 ; CHECK: estimated cost of 0 for {{.*}} call double @llvm.fabs.f64 [all …]
|
/external/llvm-project/llvm/test/Analysis/CostModel/AMDGPU/ |
D | fabs.ll | 5 ; CHECK: estimated cost of 0 for {{.*}} call float @llvm.fabs.f32 8 %fabs = call float @llvm.fabs.f32(float %vec) #1 9 store float %fabs, float addrspace(1)* %out 14 ; CHECK: estimated cost of 0 for {{.*}} call <2 x float> @llvm.fabs.v2f32 17 %fabs = call <2 x float> @llvm.fabs.v2f32(<2 x float> %vec) #1 18 store <2 x float> %fabs, <2 x float> addrspace(1)* %out 23 ; CHECK: estimated cost of 0 for {{.*}} call <3 x float> @llvm.fabs.v3f32 26 %fabs = call <3 x float> @llvm.fabs.v3f32(<3 x float> %vec) #1 27 store <3 x float> %fabs, <3 x float> addrspace(1)* %out 32 ; CHECK: estimated cost of 0 for {{.*}} call <5 x float> @llvm.fabs.v5f32 [all …]
|
/external/llvm-project/llvm/test/Transforms/InstCombine/ |
D | fabs.ll | 6 declare float @llvm.fabs.f32(float) 7 declare <2 x float> @llvm.fabs.v2f32(<2 x float>) 8 declare double @llvm.fabs.f64(double) 9 declare fp128 @llvm.fabs.f128(fp128) 12 declare double @fabs(double) 21 ; CHECK-NEXT: [[FABSF:%.*]] = call float @llvm.fabs.f32(float [[X:%.*]]) 30 ; CHECK-NEXT: [[FABS:%.*]] = call double @llvm.fabs.f64(double [[X:%.*]]) 33 %fabs = tail call double @fabs(double %x) 34 ret double %fabs 39 ; CHECK-NEXT: [[FABSL:%.*]] = call fp128 @llvm.fabs.f128(fp128 [[X:%.*]]) [all …]
|
/external/llvm/test/CodeGen/AMDGPU/ |
D | fabs.f64.ll | 5 declare double @fabs(double) readnone 6 declare double @llvm.fabs.f64(double) readnone 7 declare <2 x double> @llvm.fabs.v2f64(<2 x double>) readnone 8 declare <4 x double> @llvm.fabs.v4f64(<4 x double>) readnone 18 %fabs = call double @llvm.fabs.f64(double %val) 19 store double %fabs, double addrspace(1)* %out 28 %fabs = call double @llvm.fabs.f64(double %in) 29 store double %fabs, double addrspace(1)* %out 38 %fabs = call <2 x double> @llvm.fabs.v2f64(<2 x double> %in) 39 store <2 x double> %fabs, <2 x double> addrspace(1)* %out [all …]
|
D | fabs.ll | 7 ; (fabs (f32 bitcast (i32 a))) => (f32 bitcast (and (i32 a), 0x7FFFFFFF)) 18 %fabs = call float @fabs(float %bc) 19 store float %fabs, float addrspace(1)* %out 31 %fabs = call float @llvm.fabs.f32(float %bc) 32 store float %fabs, float addrspace(1)* %out 41 %fabs = call float @llvm.fabs.f32(float %in) 42 store float %fabs, float addrspace(1)* %out 53 %fabs = call <2 x float> @llvm.fabs.v2f32(<2 x float> %in) 54 store <2 x float> %fabs, <2 x float> addrspace(1)* %out 69 %fabs = call <4 x float> @llvm.fabs.v4f32(<4 x float> %in) [all …]
|
D | fneg-fabs.ll | 9 %fabs = call float @llvm.fabs.f32(float %x) 10 %fsub = fsub float -0.000000e+00, %fabs 21 %fabs = call float @llvm.fabs.f32(float %x) 22 %fsub = fsub float -0.000000e+00, %fabs 29 ; (fabs (f32 bitcast (i32 a))) => (f32 bitcast (and (i32 a), 0x7FFFFFFF)) 40 %fabs = call float @llvm.fabs.f32(float %bc) 41 %fsub = fsub float -0.000000e+00, %fabs 54 %fabs = call float @fabs(float %bc) 55 %fsub = fsub float -0.000000e+00, %fabs 63 %fabs = call float @llvm.fabs.f32(float %in) [all …]
|
D | fneg-fabs.f64.ll | 4 ; FIXME: Check something here. Currently it seems fabs + fneg aren't 10 %fabs = call double @llvm.fabs.f64(double %x) 11 %fsub = fsub double -0.000000e+00, %fabs 20 %fabs = call double @llvm.fabs.f64(double %x) 21 %fsub = fsub double -0.000000e+00, %fabs 30 %fabs = call double @llvm.fabs.f64(double %x) 31 %fsub = fsub double -0.000000e+00, %fabs 40 %fabs = call double @llvm.fabs.f64(double %bc) 41 %fsub = fsub double -0.000000e+00, %fabs 51 %fabs = call double @fabs(double %bc) [all …]
|
D | fp-classify.ll | 4 declare float @llvm.fabs.f32(float) #1 5 declare double @llvm.fabs.f64(double) #1 13 %fabs = tail call float @llvm.fabs.f32(float %x) #1 14 %cmp = fcmp oeq float %fabs, 0x7FF0000000000000 24 %fabs = tail call float @llvm.fabs.f32(float %x) #1 25 %cmp = fcmp ueq float %fabs, 0x7FF0000000000000 35 %fabs = tail call float @llvm.fabs.f32(float %x) #1 36 %cmp = fcmp oeq float %fabs, 0xFFF0000000000000 50 %x.fabs = tail call float @llvm.fabs.f32(float %x) #1 51 %ninf = fcmp une float %x.fabs, 0x7FF0000000000000 [all …]
|
D | llvm.amdgcn.frexp.mant.ll | 4 declare float @llvm.fabs.f32(float) #0 5 declare double @llvm.fabs.f64(double) #0 20 %fabs.src = call float @llvm.fabs.f32(float %src) 21 %frexp.mant = call float @llvm.amdgcn.frexp.mant.f32(float %fabs.src) 29 %fabs.src = call float @llvm.fabs.f32(float %src) 30 %fneg.fabs.src = fsub float -0.0, %fabs.src 31 %frexp.mant = call float @llvm.amdgcn.frexp.mant.f32(float %fneg.fabs.src) 47 %fabs.src = call double @llvm.fabs.f64(double %src) 48 %frexp.mant = call double @llvm.amdgcn.frexp.mant.f64(double %fabs.src) 56 %fabs.src = call double @llvm.fabs.f64(double %src) [all …]
|
D | llvm.amdgcn.frexp.exp.ll | 4 declare float @llvm.fabs.f32(float) #0 5 declare double @llvm.fabs.f64(double) #0 20 %fabs.src = call float @llvm.fabs.f32(float %src) 21 %frexp.exp = call i32 @llvm.amdgcn.frexp.exp.f32(float %fabs.src) 29 %fabs.src = call float @llvm.fabs.f32(float %src) 30 %fneg.fabs.src = fsub float -0.0, %fabs.src 31 %frexp.exp = call i32 @llvm.amdgcn.frexp.exp.f32(float %fneg.fabs.src) 47 %fabs.src = call double @llvm.fabs.f64(double %src) 48 %frexp.exp = call i32 @llvm.amdgcn.frexp.exp.f64(double %fabs.src) 56 %fabs.src = call double @llvm.fabs.f64(double %src) [all …]
|
/external/llvm-project/llvm/test/CodeGen/AMDGPU/ |
D | fneg-fabs.f16.ll | 13 %fabs = call half @llvm.fabs.f16(half %x) 14 %fsub = fsub half -0.0, %fabs 31 %fabs = call half @llvm.fabs.f16(half %x) 32 %fsub = fsub half -0.0, %fabs 39 ; (fabs (f16 bitcast (i16 a))) => (f16 bitcast (and (i16 a), 0x7FFFFFFF)) 46 %fabs = call half @llvm.fabs.f16(half %bc) 47 %fsub = fsub half -0.0, %fabs 55 %fabs = call half @llvm.fabs.f16(half %in) 56 %fsub = fsub half -0.0, %fabs 65 %fabs = call half @llvm.fabs.f16(half %val) [all …]
|
D | fabs.f64.ll | 5 declare double @fabs(double) readnone 6 declare double @llvm.fabs.f64(double) readnone 7 declare <2 x double> @llvm.fabs.v2f64(<2 x double>) readnone 8 declare <4 x double> @llvm.fabs.v4f64(<4 x double>) readnone 18 %fabs = call double @llvm.fabs.f64(double %val) 19 store double %fabs, double addrspace(1)* %out 28 %fabs = call double @llvm.fabs.f64(double %in) 29 store double %fabs, double addrspace(1)* %out 38 %fabs = call <2 x double> @llvm.fabs.v2f64(<2 x double> %in) 39 store <2 x double> %fabs, <2 x double> addrspace(1)* %out [all …]
|
D | fneg-fabs.f64.ll | 4 ; FIXME: Check something here. Currently it seems fabs + fneg aren't 10 %fabs = call double @llvm.fabs.f64(double %x) 11 %fsub = fsub double -0.000000e+00, %fabs 20 %fabs = call double @llvm.fabs.f64(double %x) 21 %fsub = fsub double -0.000000e+00, %fabs 30 %fabs = call double @llvm.fabs.f64(double %x) 31 %fsub = fsub double -0.000000e+00, %fabs 40 %fabs = call double @llvm.fabs.f64(double %bc) 41 %fsub = fsub double -0.000000e+00, %fabs 51 %fabs = call double @fabs(double %bc) [all …]
|
D | fneg-fabs.ll | 9 %fabs = call float @llvm.fabs.f32(float %x) 10 %fsub = fsub float -0.000000e+00, %fabs 21 %fabs = call float @llvm.fabs.f32(float %x) 22 %fsub = fsub float -0.000000e+00, %fabs 29 ; (fabs (f32 bitcast (i32 a))) => (f32 bitcast (and (i32 a), 0x7FFFFFFF)) 41 %fabs = call float @llvm.fabs.f32(float %bc) 42 %fsub = fsub float -0.000000e+00, %fabs 55 %fabs = call float @fabs(float %bc) 56 %fsub = fsub float -0.000000e+00, %fabs 64 %fabs = call float @llvm.fabs.f32(float %in) [all …]
|
D | fabs.ll | 7 ; (fabs (f32 bitcast (i32 a))) => (f32 bitcast (and (i32 a), 0x7FFFFFFF)) 18 %fabs = call float @fabs(float %bc) 19 store float %fabs, float addrspace(1)* %out 31 %fabs = call float @llvm.fabs.f32(float %bc) 32 store float %fabs, float addrspace(1)* %out 42 %fabs = call float @llvm.fabs.f32(float %in) 43 store float %fabs, float addrspace(1)* %out 54 %fabs = call <2 x float> @llvm.fabs.v2f32(<2 x float> %in) 55 store <2 x float> %fabs, <2 x float> addrspace(1)* %out 70 %fabs = call <4 x float> @llvm.fabs.v4f32(<4 x float> %in) [all …]
|
D | fp-classify.ll | 4 declare float @llvm.fabs.f32(float) #1 5 declare double @llvm.fabs.f64(double) #1 13 %fabs = tail call float @llvm.fabs.f32(float %x) #1 14 %cmp = fcmp oeq float %fabs, 0x7FF0000000000000 24 %fabs = tail call float @llvm.fabs.f32(float %x) #1 25 %cmp = fcmp ueq float %fabs, 0x7FF0000000000000 35 %fabs = tail call float @llvm.fabs.f32(float %x) #1 36 %cmp = fcmp oeq float %fabs, 0xFFF0000000000000 50 %x.fabs = tail call float @llvm.fabs.f32(float %x) #1 51 %ninf = fcmp une float %x.fabs, 0x7FF0000000000000 [all …]
|
D | llvm.amdgcn.frexp.mant.ll | 4 declare float @llvm.fabs.f32(float) #0 5 declare double @llvm.fabs.f64(double) #0 20 %fabs.src = call float @llvm.fabs.f32(float %src) 21 %frexp.mant = call float @llvm.amdgcn.frexp.mant.f32(float %fabs.src) 29 %fabs.src = call float @llvm.fabs.f32(float %src) 30 %fneg.fabs.src = fsub float -0.0, %fabs.src 31 %frexp.mant = call float @llvm.amdgcn.frexp.mant.f32(float %fneg.fabs.src) 47 %fabs.src = call double @llvm.fabs.f64(double %src) 48 %frexp.mant = call double @llvm.amdgcn.frexp.mant.f64(double %fabs.src) 56 %fabs.src = call double @llvm.fabs.f64(double %src) [all …]
|
D | llvm.amdgcn.frexp.exp.ll | 4 declare float @llvm.fabs.f32(float) #0 5 declare double @llvm.fabs.f64(double) #0 20 %fabs.src = call float @llvm.fabs.f32(float %src) 21 %frexp.exp = call i32 @llvm.amdgcn.frexp.exp.i32.f32(float %fabs.src) 29 %fabs.src = call float @llvm.fabs.f32(float %src) 30 %fneg.fabs.src = fsub float -0.0, %fabs.src 31 %frexp.exp = call i32 @llvm.amdgcn.frexp.exp.i32.f32(float %fneg.fabs.src) 47 %fabs.src = call double @llvm.fabs.f64(double %src) 48 %frexp.exp = call i32 @llvm.amdgcn.frexp.exp.i32.f64(double %fabs.src) 56 %fabs.src = call double @llvm.fabs.f64(double %src) [all …]
|
D | fabs.f16.ll | 6 ; (fabs (f16 bitcast (i16 a))) => (f16 bitcast (and (i16 a), 0x7FFFFFFF)) 16 %fabs = call half @llvm.fabs.f16(half %bc) 17 store half %fabs, half addrspace(1)* %out 27 %fabs = call half @llvm.fabs.f16(half %in) 28 store half %fabs, half addrspace(1)* %out 36 %fabs = call <2 x half> @llvm.fabs.v2f16(<2 x half> %in) 37 store <2 x half> %fabs, <2 x half> addrspace(1)* %out 50 %fabs = call <4 x half> @llvm.fabs.v4f16(<4 x half> %in) 51 store <4 x half> %fabs, <4 x half> addrspace(1)* %out 70 %fabs = call half @llvm.fabs.f16(half %in0) [all …]
|
/external/llvm/test/CodeGen/X86/ |
D | fnabs.ll | 12 %fabs = tail call float @fabsf(float %a) #1 13 %fsub = fsub float -0.0, %fabs 23 %fabs = tail call float @fabsf(float %a) #1 24 %fsub = fsub float -0.0, %fabs 25 %fmul = fmul float %fsub, %fabs 33 %fabs = tail call <4 x float> @llvm.fabs.v4f32(< 4 x float> %a) #1 34 %fsub = fsub <4 x float> <float -0.0, float -0.0, float -0.0, float -0.0>, %fabs 44 %fabs = tail call <4 x float> @llvm.fabs.v4f32(<4 x float> %a) #1 45 %fsub = fsub <4 x float> <float -0.0, float -0.0, float -0.0, float -0.0>, %fabs 46 %fmul = fmul <4 x float> %fsub, %fabs [all …]
|
D | fabs.ll | 1 ; Make sure this testcase codegens to the fabs instruction, not a call to fabsf 17 ; CHECK: {{^[ \t]+fabs$}} 18 ; UNSAFE: {{^[ \t]+fabs$}} 20 ; CHECK-NOT: fabs 21 ; UNSAFE-NOT: fabs 33 ; fabs is not used here. 34 ; CHECK-NOT: fabs 35 ; NOOPT-NOT: fabs 37 ; UNSAFE: {{^[ \t]+fabs$}} 39 ; UNSAFE-NOT: fabs [all …]
|
/external/llvm-project/llvm/test/CodeGen/X86/ |
D | fnabs.ll | 12 %fabs = tail call float @fabsf(float %a) #1 13 %fsub = fsub float -0.0, %fabs 23 %fabs = tail call float @fabsf(float %a) #1 24 %fsub = fsub float -0.0, %fabs 25 %fmul = fmul float %fsub, %fabs 33 %fabs = tail call <4 x float> @llvm.fabs.v4f32(< 4 x float> %a) #1 34 %fsub = fsub <4 x float> <float -0.0, float -0.0, float -0.0, float -0.0>, %fabs 44 %fabs = tail call <4 x float> @llvm.fabs.v4f32(<4 x float> %a) #1 45 %fsub = fsub <4 x float> <float -0.0, float -0.0, float -0.0, float -0.0>, %fabs 46 %fmul = fmul <4 x float> %fsub, %fabs [all …]
|
/external/llvm-project/llvm/test/Transforms/InstSimplify/ |
D | floating-point-arithmetic.ll | 223 declare float @llvm.fabs.f32(float) 224 declare <2 x float> @llvm.fabs.v2f32(<2 x float>) 236 %fabs = call float @llvm.fabs.f32(float %select) 237 ret float %fabs 248 %fabs = call <2 x float> @llvm.fabs.v2f32(<2 x float> %select) 249 ret <2 x float> %fabs 256 ; CHECK-NEXT: [[FABS:%.*]] = call float @llvm.fabs.f32(float [[SELECT]]) 261 %fabs = call float @llvm.fabs.f32(float %select) 262 ret float %fabs 269 ; CHECK-NEXT: [[FABS:%.*]] = call <2 x float> @llvm.fabs.v2f32(<2 x float> [[SELECT]]) [all …]
|
/external/llvm-project/llvm/test/CodeGen/PowerPC/ |
D | vec_abs.ll | 14 %0 = tail call <4 x float> @llvm.fabs.v4f32(<4 x float> %aa) #2 18 declare <4 x float> @llvm.fabs.v4f32(<4 x float>) #1 22 ; CHECK-NOVSX: fabs 23 ; CHECK-NOVSX: fabs 24 ; CHECK-NOVSX: fabs 25 ; CHECK-NOVSX: fabs 34 %0 = tail call <4 x float> @llvm.fabs.v4f32(<4 x float> %aa) #2 43 ; CHECK-NOVSX: fabs 44 ; CHECK-NOVSX: fabs 45 ; CHECK-NOVSX: fabs [all …]
|
/external/llvm/test/CodeGen/PowerPC/ |
D | vec_abs.ll | 14 %0 = tail call <4 x float> @llvm.fabs.v4f32(<4 x float> %aa) #2 18 declare <4 x float> @llvm.fabs.v4f32(<4 x float>) #1 22 ; CHECK-NOVSX: fabs 23 ; CHECK-NOVSX: fabs 24 ; CHECK-NOVSX: fabs 25 ; CHECK-NOVSX: fabs 34 %0 = tail call <4 x float> @llvm.fabs.v4f32(<4 x float> %aa) #2 43 ; CHECK-NOVSX: fabs 44 ; CHECK-NOVSX: fabs 45 ; CHECK-NOVSX: fabs [all …]
|
12345678910>>...61