Home
last modified time | relevance | path

Searched refs:ninf (Results 1 – 25 of 153) sorted by relevance

1234567

/external/icu/icu4c/source/test/intltest/
Dtsputil.cpp37 double ninf = -uprv_getInfinity();
103 double ninf = -uprv_getInfinity(); in testMaxMin() local
111 maxMinTest(pinf, ninf, pinf, TRUE); in testMaxMin()
112 maxMinTest(pinf, ninf, ninf, FALSE); in testMaxMin()
121 maxMinTest(ninf, pzero, pzero, TRUE); in testMaxMin()
122 maxMinTest(ninf, pzero, ninf, FALSE); in testMaxMin()
123 maxMinTest(ninf, nzero, nzero, TRUE); in testMaxMin()
124 maxMinTest(ninf, nzero, ninf, FALSE); in testMaxMin()
129 maxMinTest(ninf, nan, nan, TRUE); in testMaxMin()
130 maxMinTest(ninf, nan, nan, FALSE); in testMaxMin()
[all …]
/external/llvm-project/llvm/test/CodeGen/ARM/
Dpow.75.ll11 ; CHECK: Combining: {{.*}}: f32 = fpow ninf nsz afn [[X:t[0-9]+]], ConstantFP:f32<7.500000e-01>
12 ; CHECK-NEXT: Creating new node: [[SQRT:t[0-9]+]]: f32 = fsqrt ninf nsz afn [[X]]
13 ; CHECK-NEXT: Creating new node: [[SQRTSQRT:t[0-9]+]]: f32 = fsqrt ninf nsz afn [[SQRT]]
14 ; CHECK-NEXT: Creating new node: [[R:t[0-9]+]]: f32 = fmul ninf nsz afn [[SQRT]], [[SQRTSQRT]]
15 ; CHECK-NEXT: ... into: [[R]]: f32 = fmul ninf nsz afn [[SQRT]], [[SQRTSQRT]]
16 %r = call nsz ninf afn float @llvm.pow.f32(float %x, float 7.5e-01)
21 ; CHECK: Combining: {{.*}}: f64 = fpow ninf nsz afn t2, ConstantFP:f64<7.500000e-01>
22 ; CHECK-NEXT: Creating new node: [[SQRT:t[0-9]+]]: f64 = fsqrt ninf nsz afn [[X]]
23 ; CHECK-NEXT: Creating new node: [[SQRTSQRT:t[0-9]+]]: f64 = fsqrt ninf nsz afn [[SQRT]]
24 ; CHECK-NEXT: Creating new node: [[R:t[0-9]+]]: f64 = fmul ninf nsz afn [[SQRT]], [[SQRTSQRT]]
[all …]
/external/llvm-project/llvm/test/CodeGen/PowerPC/
Dpow.75.ll11 ; CHECK: Combining: {{.*}}: f32 = fpow ninf nsz afn [[X:t[0-9]+]], ConstantFP:f32<7.500000e-01>
12 ; CHECK-NEXT: Creating new node: [[SQRT:t[0-9]+]]: f32 = fsqrt ninf nsz afn [[X]]
13 ; CHECK-NEXT: Creating new node: [[SQRTSQRT:t[0-9]+]]: f32 = fsqrt ninf nsz afn [[SQRT]]
14 ; CHECK-NEXT: Creating new node: [[R:t[0-9]+]]: f32 = fmul ninf nsz afn [[SQRT]], [[SQRTSQRT]]
15 ; CHECK-NEXT: ... into: [[R]]: f32 = fmul ninf nsz afn [[SQRT]], [[SQRTSQRT]]
16 %r = call nsz ninf afn float @llvm.pow.f32(float %x, float 7.5e-01)
21 ; CHECK: Combining: {{.*}}: f64 = fpow ninf nsz afn [[X:t[0-9]+]], ConstantFP:f64<7.500000e-01>
22 ; CHECK-NEXT: Creating new node: [[SQRT:t[0-9]+]]: f64 = fsqrt ninf nsz afn [[X]]
23 ; CHECK-NEXT: Creating new node: [[SQRTSQRT:t[0-9]+]]: f64 = fsqrt ninf nsz afn [[SQRT]]
24 ; CHECK-NEXT: Creating new node: [[R:t[0-9]+]]: f64 = fmul ninf nsz afn [[SQRT]], [[SQRTSQRT]]
[all …]
/external/llvm-project/llvm/test/CodeGen/X86/
Dpow.75.ll11 ; CHECK: Combining: {{.*}}: f32 = fpow ninf nsz afn [[X:t[0-9]+]], ConstantFP:f32<7.500000e-01>
12 ; CHECK-NEXT: Creating new node: [[SQRT:t[0-9]+]]: f32 = fsqrt ninf nsz afn [[X]]
13 ; CHECK-NEXT: Creating new node: [[SQRTSQRT:t[0-9]+]]: f32 = fsqrt ninf nsz afn [[SQRT]]
14 ; CHECK-NEXT: Creating new node: [[R:t[0-9]+]]: f32 = fmul ninf nsz afn [[SQRT]], [[SQRTSQRT]]
15 ; CHECK-NEXT: ... into: [[R]]: f32 = fmul ninf nsz afn [[SQRT]], [[SQRTSQRT]]
16 %r = call nsz ninf afn float @llvm.pow.f32(float %x, float 7.5e-01)
21 ; CHECK: Combining: {{.*}}: f64 = fpow ninf nsz afn [[X:t[0-9]+]], ConstantFP:f64<7.500000e-01>
22 ; CHECK-NEXT: Creating new node: [[SQRT:t[0-9]+]]: f64 = fsqrt ninf nsz afn [[X]]
23 ; CHECK-NEXT: Creating new node: [[SQRTSQRT:t[0-9]+]]: f64 = fsqrt ninf nsz afn [[SQRT]]
24 ; CHECK-NEXT: Creating new node: [[R:t[0-9]+]]: f64 = fmul ninf nsz afn [[SQRT]], [[SQRTSQRT]]
[all …]
Dsqrt-fastmath-mir.ll26 ; CHECK: %3:fr32 = ninf nofpexcept VMULSSrr [[COPY]], [[VRSQRTSSr]], implicit $mxcsr
28 …; CHECK: %5:fr32 = ninf nofpexcept VFMADD213SSr [[VRSQRTSSr]], killed %3, [[VMOVSSrm_alt]], impl…
30 ; CHECK: %7:fr32 = ninf nofpexcept VMULSSrr [[VRSQRTSSr]], [[VMOVSSrm_alt1]], implicit $mxcsr
31 ; CHECK: %8:fr32 = ninf nofpexcept VMULSSrr killed %7, killed %5, implicit $mxcsr
32 ; CHECK: %9:fr32 = ninf nofpexcept VMULSSrr [[COPY]], %8, implicit $mxcsr
33 ; CHECK: %10:fr32 = ninf nofpexcept VFMADD213SSr %8, %9, [[VMOVSSrm_alt]], implicit $mxcsr
34 ; CHECK: %11:fr32 = ninf nofpexcept VMULSSrr %9, [[VMOVSSrm_alt1]], implicit $mxcsr
35 ; CHECK: %12:fr32 = ninf nofpexcept VMULSSrr killed %11, killed %10, implicit $mxcsr
47 %call = tail call ninf float @llvm.sqrt.f32(float %f)
71 ; CHECK: %3:fr32 = ninf nofpexcept VMULSSrr [[COPY]], [[VRSQRTSSr]], implicit $mxcsr
[all …]
/external/llvm-project/llvm/test/CodeGen/AArch64/
Dpow.75.ll11 ; CHECK: Combining: {{.*}}: f32 = fpow ninf nsz afn [[X:t[0-9]+]], ConstantFP:f32<7.500000e-01>
12 ; CHECK-NEXT: Creating new node: [[SQRT:t[0-9]+]]: f32 = fsqrt ninf nsz afn [[X]]
13 ; CHECK-NEXT: Creating new node: [[SQRTSQRT:t[0-9]+]]: f32 = fsqrt ninf nsz afn [[SQRT]]
14 ; CHECK-NEXT: Creating new node: [[R:t[0-9]+]]: f32 = fmul ninf nsz afn [[SQRT]], [[SQRTSQRT]]
15 ; CHECK-NEXT: ... into: [[R]]: f32 = fmul ninf nsz afn [[SQRT]], [[SQRTSQRT]]
16 %r = call nsz ninf afn float @llvm.pow.f32(float %x, float 7.5e-01)
21 ; CHECK: Combining: {{.*}}: f64 = fpow ninf nsz afn [[X:t[0-9]+]], ConstantFP:f64<7.500000e-01>
22 ; CHECK-NEXT: Creating new node: [[SQRT:t[0-9]+]]: f64 = fsqrt ninf nsz afn [[X]]
23 ; CHECK-NEXT: Creating new node: [[SQRTSQRT:t[0-9]+]]: f64 = fsqrt ninf nsz afn [[SQRT]]
24 ; CHECK-NEXT: Creating new node: [[R:t[0-9]+]]: f64 = fmul ninf nsz afn [[SQRT]], [[SQRTSQRT]]
[all …]
/external/llvm-project/llvm/test/Transforms/InstCombine/
Dpow-exp.ll216 ; CHECK-NEXT: [[MUL:%.*]] = fmul nnan ninf afn double [[E:%.*]], 0xBFE0776{{.*}}
217 ; CHECK-NEXT: [[EXP2:%.*]] = call nnan ninf afn double @exp2(double [[MUL]])
220 %call = tail call afn nnan ninf double @pow(double 0x3FE6666666666666, double %e)
236 ; CHECK-NEXT: [[MUL:%.*]] = fmul nnan ninf afn double [[E:%.*]], 0x4010952{{.*}}
237 ; CHECK-NEXT: [[EXP2:%.*]] = call nnan ninf afn double @exp2(double [[MUL]])
240 %call = tail call afn nnan ninf double @pow(double 1.770000e+01, double %e)
246 ; CHECK-NEXT: [[MUL:%.*]] = fmul nnan ninf afn double [[E:%.*]], 0x400AB0B5{{.*}}
247 ; CHECK-NEXT: [[EXP2:%.*]] = call nnan ninf afn double @exp2(double [[MUL]])
250 %call = tail call afn nnan ninf double @pow(double 1.010000e+01, double %e)
256 ; CHECK-NEXT: [[MUL:%.*]] = fmul nnan ninf afn double [[E:%.*]], 0x400A934F{{.*}}
[all …]
Dfcmp.ll21 ; CHECK-NEXT: [[CMP:%.*]] = fcmp ninf ogt float [[A:%.*]], 1.000000e+00
25 %cmp = fcmp ninf ogt double %ext, 1.000000e+00
191 ; CHECK-NEXT: [[CMP:%.*]] = fcmp ninf ogt <2 x float> [[X:%.*]], [[Y:%.*]]
196 %cmp = fcmp ninf olt <2 x float> %neg1, %neg2
202 ; CHECK-NEXT: [[CMP:%.*]] = fcmp ninf ogt <2 x float> [[X:%.*]], [[Y:%.*]]
207 %cmp = fcmp ninf olt <2 x float> %neg1, %neg2
213 ; CHECK-NEXT: [[CMP:%.*]] = fcmp ninf ogt <2 x float> [[X:%.*]], [[Y:%.*]]
218 %cmp = fcmp ninf olt <2 x float> %neg1, %neg2
224 ; CHECK-NEXT: [[CMP:%.*]] = fcmp ninf ogt <2 x float> [[X:%.*]], [[Y:%.*]]
229 %cmp = fcmp ninf olt <2 x float> %neg1, %neg2
[all …]
Dfabs-copysign.ll14 ; CHECK-NEXT: [[TMP1:%.*]] = call nnan ninf double @llvm.copysign.f64(double 1.000000e+00, doubl…
18 %div = fdiv nnan ninf double %x, %f
24 ; CHECK-NEXT: [[TMP1:%.*]] = call nnan ninf double @llvm.copysign.f64(double 1.000000e+00, doubl…
28 %div = fdiv nnan ninf double %f, %x
34 ; CHECK-NEXT: [[TMP1:%.*]] = call nnan ninf <4 x double> @llvm.copysign.v4f64(<4 x double> <doub…
38 %div = fdiv nnan ninf <4 x double> %x, %f
44 ; CHECK-NEXT: [[TMP1:%.*]] = call nnan ninf <4 x double> @llvm.copysign.v4f64(<4 x double> <doub…
48 %div = fdiv nnan ninf <4 x double> %f, %x
54 ; CHECK-NEXT: [[TMP1:%.*]] = call nnan ninf float @llvm.copysign.f32(float 1.000000e+00, float […
58 %div = fdiv nnan ninf float %x, %f
[all …]
Dpow-sqrt.ll87 ; CHECK-NEXT: [[SQRT:%.*]] = call ninf double @sqrt(double [[X:%.*]])
88 ; CHECK-NEXT: [[ABS:%.*]] = call ninf double @llvm.fabs.f64(double [[SQRT]])
91 %pow = call ninf double @pow(double %x, double 5.0e-01)
97 ; CHECK-NEXT: [[SQRT:%.*]] = call ninf <2 x double> @llvm.sqrt.v2f64(<2 x double> [[X:%.*]])
98 ; CHECK-NEXT: [[ABS:%.*]] = call ninf <2 x double> @llvm.fabs.v2f64(<2 x double> [[SQRT]])
101 …%pow = call ninf <2 x double> @llvm.pow.v2f64(<2 x double> %x, <2 x double> <double 5.0e-01, doubl…
131 ; CHECK-NEXT: [[SQRTF:%.*]] = call ninf nsz float @sqrtf(float [[X:%.*]])
134 %pow = call ninf nsz float @powf(float %x, float 5.0e-01)
140 ; CHECK-NEXT: [[SQRT:%.*]] = call ninf nsz double @llvm.sqrt.f64(double [[X:%.*]])
143 %pow = call ninf nsz double @llvm.pow.f64(double %x, double 5.0e-01)
[all …]
Dcopysign.ll20 ; CHECK-NEXT: [[TMP1:%.*]] = call ninf <3 x double> @llvm.fabs.v3f64(<3 x double> [[X:%.*]])
23 …%r = call ninf <3 x double> @llvm.copysign.v3f64(<3 x double> %x, <3 x double> <double 42.0, doubl…
49 ; CHECK-NEXT: [[TMP1:%.*]] = call ninf float @llvm.fabs.f32(float [[X:%.*]])
53 %r = call ninf float @llvm.copysign.f32(float %x, float %fabs)
72 ; CHECK-NEXT: [[R:%.*]] = call ninf float @llvm.copysign.f32(float [[Y:%.*]], float [[MAX]])
76 %r = call ninf float @llvm.copysign.f32(float %y, float %max)
85 ; CHECK-NEXT: [[R:%.*]] = call ninf float @llvm.copysign.f32(float [[X:%.*]], float [[Z:%.*]])
89 %r = call ninf float @llvm.copysign.f32(float %x, float %s)
/external/llvm-project/llvm/test/CodeGen/AMDGPU/GlobalISel/
Dlegalize-amdgcn.rsq.clamp.mir19 …; SI: [[INT:%[0-9]+]]:_(s32) = nnan ninf nsz G_INTRINSIC intrinsic(@llvm.amdgcn.rsq.clamp), [[COPY…
24 …; VI: [[INT:%[0-9]+]]:_(s32) = nnan ninf nsz G_INTRINSIC intrinsic(@llvm.amdgcn.rsq), [[COPY]](s32)
26 ; VI: [[FMINNUM_IEEE:%[0-9]+]]:_(s32) = nnan ninf nsz G_FMINNUM_IEEE [[INT]], [[C]]
28 ; VI: [[FMAXNUM_IEEE:%[0-9]+]]:_(s32) = nnan ninf nsz G_FMAXNUM_IEEE [[FMINNUM_IEEE]], [[C1]]
31 %1:_(s32) = nnan ninf nsz G_INTRINSIC intrinsic(@llvm.amdgcn.rsq.clamp), %0
49 …; SI: [[INT:%[0-9]+]]:_(s32) = nnan ninf nsz G_INTRINSIC intrinsic(@llvm.amdgcn.rsq.clamp), [[COPY…
54 …; VI: [[INT:%[0-9]+]]:_(s32) = nnan ninf nsz G_INTRINSIC intrinsic(@llvm.amdgcn.rsq), [[COPY]](s32)
56 ; VI: [[FMINNUM:%[0-9]+]]:_(s32) = nnan ninf nsz G_FMINNUM [[INT]], [[C]]
58 ; VI: [[FMAXNUM:%[0-9]+]]:_(s32) = nnan ninf nsz G_FMAXNUM [[FMINNUM]], [[C1]]
61 %1:_(s32) = nnan ninf nsz G_INTRINSIC intrinsic(@llvm.amdgcn.rsq.clamp), %0
/external/llvm-project/libcxx/test/std/utilities/function.objects/unord.hash/
Dfloating.pass.cpp44 std::size_t ninf = h(-INFINITY); in test() local
50 assert(t0 != ninf); in test()
55 assert(tp1 != ninf); in test()
59 assert(t1 != ninf); in test()
62 assert(tn1 != ninf); in test()
64 assert(pinf != ninf); in test()
/external/libcxx/test/std/utilities/function.objects/unord.hash/
Dfloating.pass.cpp45 std::size_t ninf = h(-INFINITY); in test() local
51 assert(t0 != ninf); in test()
56 assert(tp1 != ninf); in test()
60 assert(t1 != ninf); in test()
63 assert(tn1 != ninf); in test()
65 assert(pinf != ninf); in test()
/external/llvm-project/llvm/test/CodeGen/Thumb2/LowOverheadLoops/
Dit-block-mov.mir73 ; CHECK: renamable $q1 = nnan ninf nsz MVE_VLDRWU32 renamable $r3, 0, 0, $noreg
74 …; CHECK: renamable $q0 = nnan ninf nsz MVE_VADDf32 killed renamable $q0, killed renamable $q1, 0…
80 …; CHECK: renamable $s4 = nnan ninf nsz VADDS renamable $s0, renamable $s1, 14 /* CC::al */, $nor…
83 …; CHECK: renamable $s4 = nnan ninf nsz VADDS renamable $s2, killed renamable $s4, 14 /* CC::al *…
84 …; CHECK: renamable $s0 = nnan ninf nsz VADDS killed renamable $s3, killed renamable $s4, 14 /* C…
88 …; CHECK: renamable $s4 = nnan ninf nsz VDIVS killed renamable $s0, killed renamable $s2, 14 /* C…
96 ; CHECK: renamable $q2 = nnan ninf nsz MVE_VLDRWU32 renamable $r0, 0, 1, renamable $vpr
97 …; CHECK: renamable $q2 = nnan ninf nsz MVE_VSUB_qr_f32 killed renamable $q2, killed renamable $r…
98 …; CHECK: renamable $q0 = nnan ninf nsz MVE_VFMAf32 killed renamable $q0, killed renamable $q2, k…
104 …; CHECK: renamable $s4 = nnan ninf nsz VADDS renamable $s0, renamable $s1, 14 /* CC::al */, $nor…
[all …]
/external/llvm-project/llvm/test/CodeGen/Thumb2/
Dmve-vpt-2-blocks.mir11 …%0 = tail call nnan ninf nsz <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x f…
12 …%1 = tail call nnan ninf nsz <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x f…
13 …%2 = tail call nnan ninf nsz <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x f…
14 …%3 = tail call nnan ninf nsz <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x f…
15 …%4 = tail call nnan ninf nsz <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x f…
74 …; CHECK: renamable $q2 = nnan ninf nsz MVE_VMINNMf32 killed renamable $q2, renamable $q3, 1, ren…
75 …; CHECK: renamable $q2 = nnan ninf nsz MVE_VMINNMf32 internal killed renamable $q2, internal ren…
76 …; CHECK: renamable $q0 = nnan ninf nsz MVE_VMINNMf32 internal killed renamable $q2, renamable $q…
77 …; CHECK: renamable $q0 = nnan ninf nsz MVE_VMINNMf32 internal killed renamable $q0, renamable $q…
81 …; CHECK: renamable $q1 = nnan ninf nsz MVE_VMINNMf32 killed renamable $q0, killed renamable $q3,…
[all …]
Dmve-vpt-block-4-ins.mir11 …%0 = tail call nnan ninf nsz <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x f…
12 …%1 = tail call nnan ninf nsz <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x f…
13 …%2 = tail call nnan ninf nsz <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x f…
14 …%3 = tail call nnan ninf nsz <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x f…
73 …; CHECK: renamable $q2 = nnan ninf nsz MVE_VMINNMf32 killed renamable $q2, renamable $q3, 1, ren…
74 …; CHECK: renamable $q2 = nnan ninf nsz MVE_VMINNMf32 internal killed renamable $q2, internal ren…
75 …; CHECK: renamable $q0 = nnan ninf nsz MVE_VMINNMf32 internal killed renamable $q2, renamable $q…
76 …; CHECK: renamable $q1 = nnan ninf nsz MVE_VMINNMf32 internal killed renamable $q0, killed renam…
81 …renamable $q2 = nnan ninf nsz MVE_VMINNMf32 killed renamable $q2, renamable $q3, 1, renamable $vpr…
82 …renamable $q2 = nnan ninf nsz MVE_VMINNMf32 killed renamable $q2, renamable $q2, 1, renamable $vpr…
[all …]
Dmve-vpt-3-blocks-kill-vpr.mir11 …%0 = tail call nnan ninf nsz <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x f…
12 …%1 = tail call nnan ninf nsz <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x f…
13 …%2 = tail call nnan ninf nsz <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x f…
14 …%3 = tail call nnan ninf nsz <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x f…
73 …; CHECK: renamable $q3 = nnan ninf nsz MVE_VMINNMf32 killed renamable $q1, renamable $q2, 1, ren…
74 …; CHECK: renamable $q1 = nnan ninf nsz MVE_VMINNMf32 internal killed renamable $q3, internal ren…
79 …; CHECK: renamable $q3 = nnan ninf nsz MVE_VMINNMf32 killed renamable $q1, renamable $q2, 1, ren…
80 …; CHECK: renamable $q0 = nnan ninf nsz MVE_VMINNMf32 internal killed renamable $q3, killed renam…
85 …renamable $q3 = nnan ninf nsz MVE_VMINNMf32 killed renamable $q1, renamable $q2, 1, renamable $vpr…
87 …renamable $q1 = nnan ninf nsz MVE_VMINNMf32 killed renamable $q3, renamable $q3, 1, renamable $vpr…
[all …]
Dmve-vpt-2-blocks-ctrl-flow.mir11 …%0 = tail call nnan ninf nsz <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x f…
12 …%1 = tail call nnan ninf nsz <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x f…
15 …%2 = tail call nnan ninf nsz <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x f…
16 …%3 = tail call nnan ninf nsz <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x f…
74 …; CHECK: renamable $q3 = nnan ninf nsz MVE_VMINNMf32 killed renamable $q1, renamable $q2, 1, r…
75 …; CHECK: renamable $q1 = nnan ninf nsz MVE_VMINNMf32 internal renamable $q3, internal renamabl…
81 …; CHECK: renamable $q3 = nnan ninf nsz MVE_VMINNMf32 killed renamable $q1, renamable $q2, 1, r…
82 …; CHECK: renamable $q0 = nnan ninf nsz MVE_VMINNMf32 internal killed renamable $q3, killed ren…
90 …renamable $q3 = nnan ninf nsz MVE_VMINNMf32 killed renamable $q1, renamable $q2, 1, renamable $vpr…
91 …renamable $q1 = nnan ninf nsz MVE_VMINNMf32 renamable $q3, renamable $q3, 1, renamable $vpr, undef…
[all …]
Dmve-vpt-2-blocks-non-consecutive-ins.mir11 …%0 = tail call nnan ninf nsz <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x f…
12 …%1 = tail call nnan ninf nsz <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x f…
13 …%2 = tail call nnan ninf nsz <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x f…
14 …%3 = tail call nnan ninf nsz <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x f…
74 …; CHECK: renamable $q3 = nnan ninf nsz MVE_VMINNMf32 killed renamable $q1, renamable $q2, 1, ren…
75 …; CHECK: renamable $q1 = nnan ninf nsz MVE_VMINNMf32 internal killed renamable $q3, internal ren…
80 …; CHECK: renamable $q3 = nnan ninf nsz MVE_VMINNMf32 killed renamable $q1, renamable $q2, 1, ren…
81 …; CHECK: renamable $q0 = nnan ninf nsz MVE_VMINNMf32 internal killed renamable $q3, killed renam…
86 …renamable $q3 = nnan ninf nsz MVE_VMINNMf32 killed renamable $q1, renamable $q2, 1, renamable $vpr…
87 …renamable $q1 = nnan ninf nsz MVE_VMINNMf32 killed renamable $q3, renamable $q3, 1, renamable $vpr…
[all …]
/external/llvm/test/CodeGen/AMDGPU/
Dfp-classify.ll51 %ninf = fcmp une float %x.fabs, 0x7FF0000000000000
52 %and = and i1 %ord, %ninf
65 %ninf = fcmp une float %x.fabs, 0xFFF0000000000000
66 %and = and i1 %ord, %ninf
78 %ninf = fcmp une float %x, 0x7FF0000000000000
79 %and = and i1 %ord, %ninf
92 %ninf = fcmp une float %x.fabs, 0x7FF0000000000000
93 %and = and i1 %ord, %ninf
106 %ninf = fcmp une float %x.fabs, 0x7FF0000000000000
107 %and = and i1 %ord, %ninf
[all …]
/external/llvm-project/llvm/test/CodeGen/AMDGPU/
Dfp-classify.ll51 %ninf = fcmp une float %x.fabs, 0x7FF0000000000000
52 %and = and i1 %ord, %ninf
79 %ninf = fcmp une float %x.fabs, 0xFFF0000000000000
80 %and = and i1 %ord, %ninf
92 %ninf = fcmp une float %x, 0x7FF0000000000000
93 %and = and i1 %ord, %ninf
106 %ninf = fcmp une float %x.fabs, 0x7FF0000000000000
107 %and = and i1 %ord, %ninf
120 %ninf = fcmp une float %x.fabs, 0x7FF0000000000000
121 %and = and i1 %ord, %ninf
[all …]
/external/llvm/test/Transforms/InstSimplify/
Dfast-math.ll49 ; fadd [nnan ninf] X, (fsub [nnan ninf] 0, X) ==> 0
50 ; where nnan and ninf have to occur at least once somewhere in this
59 %t1 = fsub nnan ninf float 0.0, %a
60 %zero1 = fadd nnan ninf float %t1, %a
63 %zero2 = fadd ninf float %t2, %a
65 %t3 = fsub nnan ninf float 0.0, %a
69 %zero4 = fadd nnan ninf float %t4, %a
89 ; CHECK: [[NO_ZERO1:%.*]] = fsub ninf float %a, %a
98 %no_zero1 = fsub ninf float %a, %a
121 ; CHECK: [[NO_ZERO1:%.*]] = fadd ninf float %a, 0.000000e+00
[all …]
/external/llvm/test/Assembler/
Dfast-math-flags.ll87 ; CHECK: %a = fadd nnan ninf float %x, %y
88 %a = fadd ninf nnan float %x, %y
93 ; CHECK: %b_vec = fsub nnan ninf <3 x float> %vec, %vec
94 %b_vec = fsub ninf nnan <3 x float> %vec, %vec
99 ; CHECK: %d = fdiv nnan ninf float %x, %y
100 %d = fdiv ninf nnan float %x, %y
105 ; CHECK: %e_vec = frem nnan ninf <3 x float> %vec, %vec
106 %e_vec = frem ninf nnan <3 x float> %vec, %vec
121 ; CHECK: %a = fadd nnan ninf float %x, %y
122 %a = fadd ninf nnan float %x, %y
[all …]
/external/llvm-project/llvm/test/Assembler/
Dfast-math-flags.ll131 ; CHECK: %a = fadd nnan ninf float %x, %y
132 %a = fadd ninf nnan float %x, %y
137 ; CHECK: %b_vec = fsub nnan ninf <3 x float> %vec, %vec
138 %b_vec = fsub ninf nnan <3 x float> %vec, %vec
143 ; CHECK: %d = fdiv nnan ninf float %x, %y
144 %d = fdiv ninf nnan float %x, %y
149 ; CHECK: %e_vec = frem nnan ninf <3 x float> %vec, %vec
150 %e_vec = frem ninf nnan <3 x float> %vec, %vec
165 ; CHECK: %a = fadd nnan ninf afn float %x, %y
166 %a = fadd ninf nnan afn float %x, %y
[all …]

1234567