/external/llvm-project/llvm/test/Transforms/InstCombine/ |
D | unfold-masked-merge-with-const-mask-scalar.ll | 16 %n0 = xor i4 %x, %y 17 %n1 = and i4 %n0, 1 29 %n0 = xor i4 %x, %y 30 %n1 = and i4 %n0, -2 44 %n0 = xor i4 %x, -1 ; %x 45 %n1 = and i4 %n0, 1 55 %n0 = xor i4 %x, 14 ; %x 56 %n1 = and i4 %n0, 1 68 %n0 = xor i4 %y, -1 ; %x 69 %n1 = and i4 %n0, 1 [all …]
|
D | invert-variable-mask-in-masked-merge-scalar.ll | 17 %n0 = xor i4 %x, %y 18 %n1 = and i4 %n0, %im 33 %n0 = xor i4 %x, -1 ; %x 34 %n1 = and i4 %n0, %notmask 47 %n0 = xor i4 %x, 6 ; %x 48 %n1 = and i4 %n0, %notmask 61 %n0 = xor i4 -1, %y ; %x 62 %n1 = and i4 %n0, %notmask 75 %n0 = xor i4 %y, 6 ; %x 76 %n1 = and i4 %n0, %notmask [all …]
|
D | unfold-masked-merge-with-const-mask-vector.ll | 16 %n0 = xor <2 x i4> %x, %y 17 %n1 = and <2 x i4> %n0, <i4 -2, i4 -2> 29 %n0 = xor <3 x i4> %x, %y 30 %n1 = and <3 x i4> %n0, <i4 -2, i4 undef, i4 -2> 42 %n0 = xor <2 x i4> %x, %y 43 %n1 = and <2 x i4> %n0, <i4 -2, i4 1> 57 %n0 = xor <2 x i4> %x, <i4 -1, i4 -1> ; %x 58 %n1 = and <2 x i4> %n0, <i4 1, i4 1> 68 %n0 = xor <2 x i4> %x, <i4 14, i4 14> ; %x 69 %n1 = and <2 x i4> %n0, <i4 1, i4 1> [all …]
|
D | invert-variable-mask-in-masked-merge-vector.ll | 17 %n0 = xor <2 x i4> %x, %y 18 %n1 = and <2 x i4> %n0, %im 31 %n0 = xor <3 x i4> %x, %y 32 %n1 = and <3 x i4> %n0, %im 47 %n0 = xor <2 x i4> %x, <i4 -1, i4 -1> ; %x 48 %n1 = and <2 x i4> %n0, %notmask 61 %n0 = xor <2 x i4> %x, <i4 6, i4 6> ; %x 62 %n1 = and <2 x i4> %n0, %notmask 75 %n0 = xor <2 x i4> %x, <i4 6, i4 7> ; %x 76 %n1 = and <2 x i4> %n0, %notmask [all …]
|
/external/llvm-project/mlir/test/Dialect/Linalg/ |
D | tile-indexed-generic.mlir | 2 // RUN: mlir-opt %s -linalg-tile="linalg-tile-sizes=25,0" | FileCheck %s -check-prefix=TILE-25n0 34 // TILE-25n0-LABEL: func @indexed_generic_vector 35 // TILE-25n0: %[[C25:.*]] = constant 25 : index 36 // TILE-25n0: scf.for %[[J:.*]] = {{.*}} step %[[C25]] 37 // TILE-25n0: linalg.indexed_generic 38 // TILE-25n0: ^bb0(%[[I:.*]]: index, %[[IN:.*]]: f32, %[[OUT:.*]]: f32) 39 // TILE-25n0: %[[NEW_I:.*]] = addi %[[I]], %[[J]] : index 40 // TILE-25n0: %[[NEW_I_INT:.*]] = index_cast %[[NEW_I]] : index to i32 41 // TILE-25n0: %[[NEW_I_FLOAT:.*]] = sitofp %[[NEW_I_INT]] : i32 to f32 42 // TILE-25n0: %[[OUT:.*]] = addf %[[IN]], %[[NEW_I_FLOAT]] : f32 [all …]
|
/external/eigen/test/ |
D | sparse_block.cpp | 192 Index n0 = internal::random<Index>(1,outer-(std::max)(j0,j1)); in sparse_block() local 194 VERIFY_IS_APPROX(m2.innerVectors(j0,n0), refMat2.block(j0,0,n0,cols)); in sparse_block() 196 VERIFY_IS_APPROX(m2.innerVectors(j0,n0), refMat2.block(0,j0,rows,n0)); in sparse_block() 198 VERIFY_IS_APPROX(m2.innerVectors(j0,n0)+m2.innerVectors(j1,n0), in sparse_block() 199 refMat2.middleRows(j0,n0)+refMat2.middleRows(j1,n0)); in sparse_block() 201 VERIFY_IS_APPROX(m2.innerVectors(j0,n0)+m2.innerVectors(j1,n0), in sparse_block() 202 refMat2.block(0,j0,rows,n0)+refMat2.block(0,j1,rows,n0)); in sparse_block() 206 VERIFY(m2.innerVectors(j0,n0).nonZeros() == m2.transpose().innerVectors(j0,n0).nonZeros()); in sparse_block() 208 m2.innerVectors(j0,n0) = m2.innerVectors(j0,n0) + m2.innerVectors(j1,n0); in sparse_block() 210 refMat2.middleRows(j0,n0) = (refMat2.middleRows(j0,n0) + refMat2.middleRows(j1,n0)).eval(); in sparse_block() [all …]
|
/external/boringssl/src/crypto/fipsmodule/bn/asm/ |
D | armv8-mont.pl | 63 $n0="x4"; # const BN_ULONG *n0, 91 ldr $n0,[$n0] // *n0 101 mul $m1,$lo0,$n0 // "tp[0]"*n0 172 mul $m1,$lo0,$n0 312 ldr $n0,[$n0] // *n0 342 str $n0,[x29,#112] // offload n0 510 mov $n0,$a0 555 mul $t0,$a0,$n0 557 mul $t1,$a1,$n0 559 mul $t2,$a2,$n0 [all …]
|
D | armv4-mont.pl | 82 $n0="r8"; 163 ldr $n0,[$_n0] @ &n0 167 ldr $n0,[$n0] @ *n0 171 str $n0,[$_n0] @ save n0 value 172 mul $n0,$alo,$n0 @ "tp[0]"*n0 174 umlal $alo,$nlo,$nj,$n0 @ np[0]*n0+"t[0]" 184 umlal $nlo,$nhi,$nj,$n0 @ np[j]*n0 194 ldr $n0,[$_n0] @ restore n0 213 mul $n0,$alo,$n0 215 umlal $alo,$nlo,$nj,$n0 @ np[0]*n0+"tp[0]" [all …]
|
/external/rust/crates/ring/crypto/fipsmodule/bn/asm/ |
D | armv8-mont.pl | 63 $n0="x4"; # const BN_ULONG *n0, 91 ldr $n0,[$n0] // *n0 101 mul $m1,$lo0,$n0 // "tp[0]"*n0 172 mul $m1,$lo0,$n0 312 ldr $n0,[$n0] // *n0 342 str $n0,[x29,#112] // offload n0 510 mov $n0,$a0 555 mul $t0,$a0,$n0 557 mul $t1,$a1,$n0 559 mul $t2,$a2,$n0 [all …]
|
D | armv4-mont.pl | 82 $n0="r8"; 165 ldr $n0,[$_n0] @ &n0 169 ldr $n0,[$n0] @ *n0 173 str $n0,[$_n0] @ save n0 value 174 mul $n0,$alo,$n0 @ "tp[0]"*n0 176 umlal $alo,$nlo,$nj,$n0 @ np[0]*n0+"t[0]" 186 umlal $nlo,$nhi,$nj,$n0 @ np[j]*n0 196 ldr $n0,[$_n0] @ restore n0 215 mul $n0,$alo,$n0 217 umlal $alo,$nlo,$nj,$n0 @ np[0]*n0+"tp[0]" [all …]
|
/external/eigen/unsupported/Eigen/src/FFT/ |
D | ei_fftw_impl.h | 82 void fwd2( complex_type * dst,complex_type * src,int n0,int n1) { 83 …if (m_plan==NULL) m_plan = fftwf_plan_dft_2d(n0,n1,src,dst,FFTW_FORWARD,FFTW_ESTIMATE|FFTW_PRESERV… 87 void inv2( complex_type * dst,complex_type * src,int n0,int n1) { 88 …if (m_plan==NULL) m_plan = fftwf_plan_dft_2d(n0,n1,src,dst,FFTW_BACKWARD,FFTW_ESTIMATE|FFTW_PRESER… 124 void fwd2( complex_type * dst,complex_type * src,int n0,int n1) { 125 …if (m_plan==NULL) m_plan = fftw_plan_dft_2d(n0,n1,src,dst,FFTW_FORWARD,FFTW_ESTIMATE|FFTW_PRESERVE… 129 void inv2( complex_type * dst,complex_type * src,int n0,int n1) { 130 …if (m_plan==NULL) m_plan = fftw_plan_dft_2d(n0,n1,src,dst,FFTW_BACKWARD,FFTW_ESTIMATE|FFTW_PRESERV… 165 void fwd2( complex_type * dst,complex_type * src,int n0,int n1) { 166 …if (m_plan==NULL) m_plan = fftwl_plan_dft_2d(n0,n1,src,dst,FFTW_FORWARD,FFTW_ESTIMATE|FFTW_PRESERV… [all …]
|
/external/llvm-project/llvm/test/CodeGen/AArch64/ |
D | unfold-masked-merge-scalar-variablemask.ll | 72 %n0 = xor i8 %x, %y 73 %n1 = and i8 %n0, %mask 85 %n0 = xor i16 %x, %y 86 %n1 = and i16 %n0, %mask 98 %n0 = xor i32 %x, %y 99 %n1 = and i32 %n0, %mask 111 %n0 = xor i64 %x, %y 112 %n1 = and i64 %n0, %mask 126 %n0 = xor i32 %x, %y 127 %n1 = and i32 %mask, %n0 ; swapped [all …]
|
D | unfold-masked-merge-scalar-constmask-lowhigh.ll | 68 %n0 = xor i8 %x, %y 69 %n1 = and i8 %n0, 15 81 %n0 = xor i16 %x, %y 82 %n1 = and i16 %n0, 255 94 %n0 = xor i32 %x, %y 95 %n1 = and i32 %n0, 65535 106 %n0 = xor i64 %x, %y 107 %n1 = and i64 %n0, 4294967295 123 %n0 = xor i32 %x, %y 124 %n1 = and i32 %n0, 65535 [all …]
|
D | unfold-masked-merge-scalar-constmask-innerouter.ll | 72 %n0 = xor i8 %x, %y 73 %n1 = and i8 %n0, 60 85 %n0 = xor i16 %x, %y 86 %n1 = and i16 %n0, 4080 98 %n0 = xor i32 %x, %y 99 %n1 = and i32 %n0, 16776960 111 %n0 = xor i64 %x, %y 112 %n1 = and i64 %n0, 281474976645120 128 %n0 = xor i32 %x, %y 129 %n1 = and i32 %n0, 16776960 [all …]
|
D | unfold-masked-merge-scalar-constmask-interleavedbits.ll | 77 %n0 = xor i8 %x, %y 78 %n1 = and i8 %n0, 85 91 %n0 = xor i16 %x, %y 92 %n1 = and i16 %n0, 21845 104 %n0 = xor i32 %x, %y 105 %n1 = and i32 %n0, 1431655765 117 %n0 = xor i64 %x, %y 118 %n1 = and i64 %n0, 6148914691236517205 134 %n0 = xor i32 %x, %y 135 %n1 = and i32 %n0, 1431655765 [all …]
|
D | unfold-masked-merge-scalar-constmask-interleavedbytehalves.ll | 73 %n0 = xor i8 %x, %y 74 %n1 = and i8 %n0, 15 87 %n0 = xor i16 %x, %y 88 %n1 = and i16 %n0, 3855 100 %n0 = xor i32 %x, %y 101 %n1 = and i32 %n0, 252645135 113 %n0 = xor i64 %x, %y 114 %n1 = and i64 %n0, 1085102592571150095 130 %n0 = xor i32 %x, %y 131 %n1 = and i32 %n0, 252645135 [all …]
|
/external/apache-commons-math/src/main/java/org/apache/commons/math/stat/descriptive/moment/ |
D | Kurtosis.java | 185 double n0 = length; in evaluate() local 188 (n0 * (n0 + 1)) / ((n0 - 1) * (n0 - 2) * (n0 - 3)); in evaluate() 190 (3 * FastMath.pow(n0 - 1, 2.0)) / ((n0 - 2) * (n0 - 3)); in evaluate()
|
D | Skewness.java | 111 double n0 = moment.getN(); in getResult() local 112 return (n0 * moment.m3) / in getResult() 113 ((n0 - 1) * (n0 -2) * FastMath.sqrt(variance) * variance); in getResult() 182 double n0 = length; in evaluate() local 185 skew = (n0 / ((n0 - 1) * (n0 - 2))) * accum3; in evaluate()
|
D | FourthMoment.java | 97 double n0 = n; in increment() local 100 ((n0 * n0) - 3 * (n0 -1)) * (nDevSq * nDevSq * (n0 - 1) * n0); in increment()
|
/external/boringssl/src/crypto/fipsmodule/bn/ |
D | montgomery.c | 158 to->n0[0] = from->n0[0]; in BN_MONT_CTX_copy() 159 to->n0[1] = from->n0[1]; in BN_MONT_CTX_copy() 198 uint64_t n0 = bn_mont_n0(&mont->N); in bn_mont_ctx_set_N_and_n0() local 199 mont->n0[0] = (BN_ULONG)n0; in bn_mont_ctx_set_N_and_n0() 201 mont->n0[1] = (BN_ULONG)(n0 >> BN_BITS2); in bn_mont_ctx_set_N_and_n0() 203 mont->n0[1] = 0; in bn_mont_ctx_set_N_and_n0() 300 BN_ULONG n0 = mont->n0[0]; in bn_from_montgomery_in_place() local 303 BN_ULONG v = bn_mul_add_words(a + i, n, num_n, a[i] * n0); in bn_from_montgomery_in_place() 433 if (!bn_mul_mont(r->d, a->d, b->d, mont->N.d, mont->n0, num)) { in BN_mod_mul_montgomery() 482 if (!bn_mul_mont(r, a, b, mont->N.d, mont->n0, num)) { in bn_mod_mul_montgomery_small()
|
/external/rust/crates/quiche/deps/boringssl/src/crypto/fipsmodule/bn/ |
D | montgomery.c | 158 to->n0[0] = from->n0[0]; in BN_MONT_CTX_copy() 159 to->n0[1] = from->n0[1]; in BN_MONT_CTX_copy() 198 uint64_t n0 = bn_mont_n0(&mont->N); in bn_mont_ctx_set_N_and_n0() local 199 mont->n0[0] = (BN_ULONG)n0; in bn_mont_ctx_set_N_and_n0() 201 mont->n0[1] = (BN_ULONG)(n0 >> BN_BITS2); in bn_mont_ctx_set_N_and_n0() 203 mont->n0[1] = 0; in bn_mont_ctx_set_N_and_n0() 300 BN_ULONG n0 = mont->n0[0]; in bn_from_montgomery_in_place() local 303 BN_ULONG v = bn_mul_add_words(a + i, n, num_n, a[i] * n0); in bn_from_montgomery_in_place() 433 if (!bn_mul_mont(r->d, a->d, b->d, mont->N.d, mont->n0, num)) { in BN_mod_mul_montgomery() 482 if (!bn_mul_mont(r, a, b, mont->N.d, mont->n0, num)) { in bn_mod_mul_montgomery_small()
|
/external/fft2d/src/fft2d/fft2d/ |
D | fftsg2d.c | 843 int n0; member 870 ag[i].n0 = i; in xdft2d0_subth() 904 ag[i].n0 = i; in cdft2d_subth() 934 ag[i].n0 = i; in ddxt2d0_subth() 968 ag[i].n0 = i; in ddxt2d_subth() 989 int nthread, n0, n1, n2, icr, isgn, *ip, i; in xdft2d0_th() local 993 n0 = ((fft2d_arg_t *) p)->n0; in xdft2d0_th() 1002 for (i = n0; i < n1; i += nthread) { in xdft2d0_th() 1006 for (i = n0; i < n1; i += nthread) { in xdft2d0_th() 1017 int nthread, n0, n1, n2, isgn, *ip, i, j; in cdft2d_th() local [all …]
|
/external/rust/crates/ring/src/arithmetic/ |
D | bigint.rs | 219 n0: N0, field 266 let n0 = { in from_boxed_limbs() localVariable 287 n0: n0.clone(), in from_boxed_limbs() 297 n0, in from_boxed_limbs() 346 n0: self.n0.clone(), in as_partial() 354 n0: N0, field 414 limbs_mont_mul(&mut limbs, &one, &m.limbs, &m.n0); in decode_once() 476 limbs_mont_mul(&mut b.limbs, &a.limbs, &m.limbs, &m.n0); in elem_mul_() 523 limbs_from_mont_in_place(&mut r.limbs, tmp, &m.limbs, &m.n0); in elem_reduced() 534 limbs_mont_square(&mut a.limbs, &m.limbs, &m.n0); in elem_squared() [all …]
|
/external/llvm-project/libcxx/test/std/thread/thread.mutex/thread.lock/thread.lock.scoped/ |
D | mutex.fail.cpp | 28 M n0, n1, n2; in main() local 38 test_conversion<LG>({n0}); // expected-error{{no matching function for call}} in main() 44 test_conversion<LG>({n0, n1}); // expected-error{{no matching function for call}} in main() 50 test_conversion<LG>({n0, n1, n2}); // expected-error{{no matching function for call}} in main()
|
/external/libcxx/test/std/thread/thread.mutex/thread.lock/thread.lock.scoped/ |
D | mutex.fail.cpp | 29 M n0, n1, n2; in main() local 39 test_conversion<LG>({n0}); // expected-error{{no matching function for call}} in main() 45 test_conversion<LG>({n0, n1}); // expected-error{{no matching function for call}} in main() 51 test_conversion<LG>({n0, n1, n2}); // expected-error{{no matching function for call}} in main()
|