/external/eigen/unsupported/test/ |
D | cxx11_tensor_contract_gpu.cu | 26 void test_gpu_contraction(int m_size, int k_size, int n_size) in test_gpu_contraction() argument 28 std::cout << "Testing for (" << m_size << "," << k_size << "," << n_size << ")" << std::endl; in test_gpu_contraction() 32 Tensor<float, 2, DataLayout> t_left(m_size, k_size); in test_gpu_contraction() 33 Tensor<float, 2, DataLayout> t_right(k_size, n_size); in test_gpu_contraction() 60 gpu_t_left(d_t_left, Eigen::array<int, 2>(m_size, k_size)); in test_gpu_contraction() 62 gpu_t_right(d_t_right, Eigen::array<int, 2>(k_size, n_size)); in test_gpu_contraction() 90 void test_scalar(int m_size, int k_size, int n_size) in test_scalar() argument 92 std::cout << "Testing for (" << m_size << "," << k_size << "," << n_size << ")" << std::endl; in test_scalar() 96 Tensor<float, 2, DataLayout> t_left(m_size, k_size); in test_scalar() 97 Tensor<float, 2, DataLayout> t_right(k_size, n_size); in test_scalar() [all …]
|
D | cxx11_tensor_scan_gpu.cu | 25 void test_gpu_cumsum(int m_size, int k_size, int n_size) in test_gpu_cumsum() argument 27 std::cout << "Testing for (" << m_size << "," << k_size << "," << n_size << ")" << std::endl; in test_gpu_cumsum() 28 Tensor<float, 3, DataLayout> t_input(m_size, k_size, n_size); in test_gpu_cumsum() 29 Tensor<float, 3, DataLayout> t_result(m_size, k_size, n_size); in test_gpu_cumsum() 30 Tensor<float, 3, DataLayout> t_result_gpu(m_size, k_size, n_size); in test_gpu_cumsum() 49 gpu_t_input(d_t_input, Eigen::array<int, 3>(m_size, k_size, n_size)); in test_gpu_cumsum() 51 gpu_t_result(d_t_result, Eigen::array<int, 3>(m_size, k_size, n_size)); in test_gpu_cumsum()
|
D | cxx11_tensor_contract_sycl.cpp | 37 IndexType k_size, IndexType n_size) { in test_sycl_contraction() argument 44 Tensor<DataType, 2, DataLayout, IndexType> t_left(m_size, k_size); in test_sycl_contraction() 45 Tensor<DataType, 2, DataLayout, IndexType> t_right(k_size, n_size); in test_sycl_contraction() 49 Eigen::array<IndexType, 2> left_dims = {{m_size, k_size}}; in test_sycl_contraction() 50 Eigen::array<IndexType, 2> right_dims = {{k_size, n_size}}; in test_sycl_contraction() 93 std::cout << "M : " << m_size << ", N : " << n_size << ", K : " << k_size in test_sycl_contraction() 155 IndexType k_size, IndexType n_size) { in test_no_out_of_bounds() argument 159 Tensor<DataType, 2, DataLayout, IndexType> t_left(m_size, k_size); in test_no_out_of_bounds() 160 Tensor<DataType, 2, DataLayout, IndexType> t_right(k_size, n_size); in test_no_out_of_bounds() 164 Eigen::array<IndexType, 2> left_dims = {{m_size, k_size}}; in test_no_out_of_bounds() [all …]
|
D | cxx11_tensor_scan_sycl.cpp | 27 IndexType k_size, IndexType n_size, int consume_dim, in test_sycl_cumsum() argument 30 std::cout << "Testing for (" << m_size << "," << k_size << "," << n_size in test_sycl_cumsum() 32 Tensor<DataType, 3, DataLayout, IndexType> t_input(m_size, k_size, n_size); in test_sycl_cumsum() 33 Tensor<DataType, 3, DataLayout, IndexType> t_result(m_size, k_size, n_size); in test_sycl_cumsum() 34 Tensor<DataType, 3, DataLayout, IndexType> t_result_gpu(m_size, k_size, in test_sycl_cumsum() 46 array<IndexType, 3> tensorRange = {{m_size, k_size, n_size}}; in test_sycl_cumsum()
|
/external/eigen/unsupported/Eigen/CXX11/src/Tensor/ |
D | TensorContractionGpu.h | 24 const Index m_size, const Index n_size, const Index k_size) { in EigenContractionKernelInternal() argument 130 if (!needs_edge_check || lhs_horiz_7 < k_size) { \ in EigenContractionKernelInternal() 139 } else if (lhs_horiz_6 < k_size) { \ in EigenContractionKernelInternal() 147 } else if (lhs_horiz_5 < k_size) { \ in EigenContractionKernelInternal() 154 } else if (lhs_horiz_4 < k_size) { \ in EigenContractionKernelInternal() 160 } else if (lhs_horiz_3 < k_size) { \ in EigenContractionKernelInternal() 165 } else if (lhs_horiz_2 < k_size) { \ in EigenContractionKernelInternal() 169 } else if (lhs_horiz_1 < k_size) { \ in EigenContractionKernelInternal() 172 } else if (lhs_horiz_0 < k_size) { \ in EigenContractionKernelInternal() 178 if (!needs_edge_check || rhs_vert < k_size) { \ in EigenContractionKernelInternal() [all …]
|
D | TensorContractionThreadPool.h | 1119 Index m_size, Index n_size, Index k_size, 1128 k(k_size),
|
/external/ComputeLibrary/src/core/NEON/kernels/arm_gemm/ |
D | gemm_interleaved_pretransposed_2d.hpp | 466 unsigned int k_size = (current.kmax() - current.k0()); in get_B_pretransposed_array_size() local 472 k_size = iceildiv(k_size, strategy::k_unroll()); in get_B_pretransposed_array_size() 473 k_size *= strategy::k_unroll(); in get_B_pretransposed_array_size() 475 total += x_size * k_size * sizeof(Toi); in get_B_pretransposed_array_size() 490 unsigned int k_size = (current.kmax() - current.k0()); in pretranspose_B_array() local 496 k_size = iceildiv(k_size, strategy::k_unroll()); in pretranspose_B_array() 497 k_size *= strategy::k_unroll(); in pretranspose_B_array() 502 buffer += (x_size * k_size); in pretranspose_B_array()
|
D | gemm_hybrid_quantized_inline.hpp | 241 const unsigned int k_size = roundup(kmax-k0, strategy::k_unroll()); in pretranspose_B_array() local 246 const unsigned int size = roundup(xmax-x0, strategy::out_width()) * k_size; in pretranspose_B_array()
|
D | gemm_hybrid.hpp | 232 const unsigned int k_size = roundup(kmax-k0, strategy::k_unroll()); in pretranspose_B_array() local 237 const unsigned int size = roundup(xmax-x0, strategy::out_width()) * k_size; in pretranspose_B_array()
|
D | gemm_hybrid_quantized.hpp | 291 const unsigned int k_size = roundup(kmax-k0, strategy::k_unroll()); in pretranspose_B_array() local 296 const unsigned int size = roundup(xmax-x0, strategy::out_width()) * k_size; in pretranspose_B_array()
|
D | gemm_hybrid_indirect.hpp | 645 unsigned int k_size = kmax - k0; in pretranspose_B_array() local 665 unsigned int kleft = k_size; in pretranspose_B_array()
|
D | gemm_interleaved.hpp | 1062 unsigned int k_size = (current.kmax() - current.k0()); in pretranspose_B_array() local 1082 unsigned int kleft = k_size; in pretranspose_B_array()
|
/external/XNNPACK/src/ |
D | operator-run.c | 965 const size_t k_size = context->input_size[3]; in xnn_compute_pad_5d() local 969 if XNN_LIKELY(i - i_padding < i_size && j - j_padding < j_size && k - k_padding < k_size && in xnn_compute_pad_5d()
|
/external/rust/crates/ash/src/vk/ |
D | definitions.rs | 35228 pub k_size: u32, field 35242 k_size: u32::default(), in default() 35284 pub fn k_size(mut self, k_size: u32) -> Self { in k_size() method 35285 self.inner.k_size = k_size; in k_size()
|