/external/musl/src/math/ |
D | exp2l.c | 40 redux = 0x1.8p63 / TBLSIZE, variable 237 u.f = x + redux; in exp2l() 242 u.f -= redux; in exp2l() 271 redux = 0x1.8p112 / TBLSIZE; variable 603 u.f = x + redux; in exp2l() 608 u.f -= redux; in exp2l()
|
/external/trusty/musl/src/math/ |
D | exp2l.c | 40 redux = 0x1.8p63 / TBLSIZE, variable 237 u.f = x + redux; in exp2l() 242 u.f -= redux; in exp2l() 271 redux = 0x1.8p112 / TBLSIZE; variable 603 u.f = x + redux; in exp2l() 608 u.f -= redux; in exp2l()
|
/external/llvm/test/Transforms/LoopVectorize/ |
D | reverse_induction.ll | 26 %redux5 = phi i32 [ 0, %entry ], [ %inc.redux, %for.body ] 30 %inc.redux = add i32 %tmp.i1, %redux5 36 ret i32 %inc.redux 58 %redux5 = phi i32 [ 0, %entry ], [ %inc.redux, %for.body ] 62 %inc.redux = add i32 %tmp.i1, %redux5 68 ret i32 %inc.redux 90 %redux5 = phi i32 [ 0, %entry ], [ %inc.redux, %for.body ] 94 %inc.redux = add i32 %tmp.i1, %redux5 100 ret i32 %inc.redux
|
/external/rust/android-crates-io/crates/libm/src/math/ |
D | exp2f.rs | 78 let redux = f32::from_bits(0x4b400000) / TBLSIZE as f32; in exp2f() localVariable 119 let ui = f32::to_bits(x + redux); in exp2f() 126 uf -= redux; in exp2f()
|
D | exp2.rs | 327 let redux = f64::from_bits(0x4338000000000000) / TBLSIZE as f64; in exp2() localVariable 373 let ui = f64::to_bits(x + redux); in exp2() 379 let uf = f64::from_bits(ui) - redux; in exp2()
|
/external/eigen/unsupported/test/ |
D | cxx11_tensor_reduction_sycl.cpp | 453 Tensor<DataType, 2, DataLayout, IndexType> redux(reduced_tensorRange); in test_first_dim_reductions_max_sycl() local 458 redux = in.maximum(red_axis); in test_first_dim_reductions_max_sycl() 480 VERIFY_IS_APPROX(redux_gpu(j, k), redux(j, k)); in test_first_dim_reductions_max_sycl() 500 reduced_tensor redux; in test_first_dim_reductions_max_with_offset_sycl() local 516 redux = in_offset.maximum(red_axis); in test_first_dim_reductions_max_with_offset_sycl() 518 VERIFY_IS_NOT_EQUAL(redux(i), in(i)); in test_first_dim_reductions_max_with_offset_sycl() 536 VERIFY_IS_APPROX(redux_gpu(i), redux(i)); in test_first_dim_reductions_max_with_offset_sycl() 558 reduced_tensor redux(full_reduced_range); in test_last_dim_reductions_max_with_offset_sycl() local 562 redux.setZero(); in test_last_dim_reductions_max_with_offset_sycl() 576 TensorMap<reduced_tensor> red_offset(redux.data() + 1, reduced_range); in test_last_dim_reductions_max_with_offset_sycl() [all …]
|
D | cxx11_tensor_reduction_gpu.cu | 67 Tensor<Type, 2, DataLayout> redux = in.sum(red_axis); in test_first_dim_reductions() local 89 VERIFY_IS_APPROX(2*redux(i), redux_gpu(i)); in test_first_dim_reductions() 107 Tensor<Type, 2, DataLayout> redux = in.sum(red_axis); in test_last_dim_reductions() local 129 VERIFY_IS_APPROX(2*redux(i), redux_gpu(i)); in test_last_dim_reductions()
|
D | cxx11_tensor_of_float16_gpu.cu | 322 void test_gpu_reductions(int size1, int size2, int redux) { in test_gpu_reductions() argument 325 << " tensor along dim " << redux << std::endl; in test_gpu_reductions() 330 int result_size = (redux == 1 ? size1 : size2); in test_gpu_reductions() 345 Eigen::array<int, 1> redux_dim = {redux}; in test_gpu_reductions()
|
/external/wpa_supplicant_8/src/tls/ |
D | libtommath.c | 1880 int (*redux)(mp_int*,mp_int*,mp_int*); in s_mp_exptmod() local 1932 redux = mp_reduce; in s_mp_exptmod() 1937 redux = mp_reduce_2k_l; in s_mp_exptmod() 1967 if ((err = redux (&M[1 << (winsize - 1)], P, &mu)) != MP_OKAY) { in s_mp_exptmod() 1979 if ((err = redux (&M[x], P, &mu)) != MP_OKAY) { in s_mp_exptmod() 2028 if ((err = redux (&res, P, &mu)) != MP_OKAY) { in s_mp_exptmod() 2045 if ((err = redux (&res, P, &mu)) != MP_OKAY) { in s_mp_exptmod() 2054 if ((err = redux (&res, P, &mu)) != MP_OKAY) { in s_mp_exptmod() 2072 if ((err = redux (&res, P, &mu)) != MP_OKAY) { in s_mp_exptmod() 2082 if ((err = redux (&res, P, &mu)) != MP_OKAY) { in s_mp_exptmod() [all …]
|
/external/eigen/Eigen/src/Core/ |
D | Redux.h | 409 DenseBase<Derived>::redux(const Func& func) const 433 return derived().redux(Eigen::internal::scalar_min_op<Scalar,Scalar, NaNPropagation>()); 448 return derived().redux(Eigen::internal::scalar_max_op<Scalar,Scalar, NaNPropagation>()); 463 return derived().redux(Eigen::internal::scalar_sum_op<Scalar,Scalar>()); 478 …return Scalar(derived().redux(Eigen::internal::scalar_sum_op<Scalar,Scalar>())) / Scalar(this->siz… 497 return derived().redux(Eigen::internal::scalar_product_op<Scalar>());
|
D | VectorwiseOp.h | 143 { return mat.redux(m_functor); } 337 redux(const BinaryOp& func = BinaryOp()) const
|
D | StableNorm.h | 246 return this->cwiseAbs().redux(internal::scalar_hypot_op<RealScalar>()); in hypotNorm()
|
D | DenseBase.h | 506 Scalar redux(const BinaryOp& func) const;
|
/external/libtextclassifier/native/tensorflow_models/seq_flow_lite/tflite_ops/ |
D | layer_norm.cc | 153 for (int redux = 0; redux < axis_size; ++redux) { in GetOffset() local 154 if (idx == axis[redux]) { in GetOffset()
|
/external/tensorflow/tensorflow/core/kernels/ |
D | bias_op.cc | 206 redux; in Compute() local 209 redux(context->eigen_device<Device>(), flat_outer.dimensions(), in Compute() 214 redux; in Compute() local 217 redux(context->eigen_device<Device>(), flat_inner.dimensions(), in Compute()
|
/external/eigen/test/ |
D | gpu_basic.cu | 256 struct redux { struct 431 CALL_SUBTEST( run_and_compare_to_gpu(redux<Array4f>(), nthreads, in, out) ); in EIGEN_DECLARE_TEST() 432 CALL_SUBTEST( run_and_compare_to_gpu(redux<Matrix3f>(), nthreads, in, out) ); in EIGEN_DECLARE_TEST()
|
D | redux.cpp | 154 EIGEN_DECLARE_TEST(redux) in EIGEN_DECLARE_TEST() argument
|
D | vectorwiseop.cpp | 230 …TEST_PARTIAL_REDUX_BASIC(redux(internal::scalar_sum_op<Scalar,Scalar>()),rowvec,colvec,EIGEN_EMPTY… in vectorwiseop_matrix()
|
D | array_for_matrix.cpp | 47 …VERIFY_IS_APPROX(m1.colwise().sum(), m1.colwise().redux(internal::scalar_sum_op<Scalar,Scalar>())); in array_for_matrix()
|
D | CMakeLists.txt | 176 ei_add_test(redux)
|
D | array_cwise.cpp | 148 …VERIFY_IS_APPROX(m1.colwise().sum(), m1.colwise().redux(internal::scalar_sum_op<Scalar,Scalar>())); in array()
|
/external/eigen/Eigen/src/Geometry/ |
D | Homogeneous.h | 108 redux(const Func& func) const 110 return func(m_matrix.redux(func), Scalar(1));
|
/external/eigen/doc/ |
D | TutorialReductionsVisitorsBroadcasting.dox | 81 In the meantime you can have a look at the DenseBase::redux() function.
|
/external/swiftshader/third_party/llvm-16.0/llvm/include/llvm/IR/ |
D | IntrinsicsNVVM.td | 4532 // redux.sync.min.u32 dst, src, membermask; 4537 // redux.sync.max.u32 dst, src, membermask; 4542 // redux.sync.add.s32 dst, src, membermask; 4547 // redux.sync.min.s32 dst, src, membermask; 4552 // redux.sync.max.s32 dst, src, membermask; 4557 // redux.sync.and.b32 dst, src, membermask; 4562 // redux.sync.xor.b32 dst, src, membermask; 4567 // redux.sync.or.b32 dst, src, membermask;
|
/external/cronet/stable/third_party/libc++/src/docs/Status/ |
D | Cxx17Issues.csv | 43 "`LWG2427 <https://wg21.link/LWG2427>`__","Container adaptors as sequence containers, redux","2015-…
|