/external/tensorflow/tensorflow/python/kernel_tests/linalg/ |
D | linear_operator_identity_test.py | 58 num_rows = shape[-1] 61 num_rows, batch_shape=batch_shape, dtype=dtype) 62 mat = linalg_ops.eye(num_rows, batch_shape=batch_shape, dtype=dtype) 68 operator = linalg_lib.LinearOperatorIdentity(num_rows=2) 73 operator = linalg_lib.LinearOperatorIdentity(num_rows=2) 78 operator = linalg_lib.LinearOperatorIdentity(num_rows=2) 86 num_rows=2, dtype=dtypes.float16) 93 linalg_lib.LinearOperatorIdentity(num_rows=[2]) 97 linalg_lib.LinearOperatorIdentity(num_rows=2.) 101 linalg_lib.LinearOperatorIdentity(num_rows=-2) [all …]
|
D | linear_operator_zeros_test.py | 68 num_rows = shape[-1] 71 num_rows, batch_shape=batch_shape, dtype=dtype) 77 operator = linalg_lib.LinearOperatorZeros(num_rows=2) 83 operator = linalg_lib.LinearOperatorZeros(num_rows=2) 88 operator = linalg_lib.LinearOperatorZeros(num_rows=2) 93 linalg_lib.LinearOperatorZeros(num_rows=[2]) 95 linalg_lib.LinearOperatorZeros(num_rows=2, num_columns=[2]) 99 linalg_lib.LinearOperatorZeros(num_rows=2.) 101 linalg_lib.LinearOperatorZeros(num_rows=2, num_columns=2.) 105 linalg_lib.LinearOperatorZeros(num_rows=-2) [all …]
|
/external/libjpeg-turbo/simd/ |
D | jsimd.h | 37 JDIMENSION output_row, int num_rows); 40 JDIMENSION output_row, int num_rows); 43 JDIMENSION output_row, int num_rows); 46 JDIMENSION output_row, int num_rows); 49 JDIMENSION output_row, int num_rows); 52 JDIMENSION output_row, int num_rows); 55 JDIMENSION output_row, int num_rows); 60 JDIMENSION output_row, int num_rows); 63 JDIMENSION output_row, int num_rows); 66 JDIMENSION output_row, int num_rows); [all …]
|
/external/libjpeg-turbo/ |
D | jdcolor.c | 255 JDIMENSION input_row, JSAMPARRAY output_buf, int num_rows) in ycc_rgb_convert() argument 260 num_rows); in ycc_rgb_convert() 265 num_rows); in ycc_rgb_convert() 269 num_rows); in ycc_rgb_convert() 274 num_rows); in ycc_rgb_convert() 279 num_rows); in ycc_rgb_convert() 284 num_rows); in ycc_rgb_convert() 288 num_rows); in ycc_rgb_convert() 327 JDIMENSION input_row, JSAMPARRAY output_buf, int num_rows) in rgb_gray_convert() argument 337 while (--num_rows >= 0) { in rgb_gray_convert() [all …]
|
D | jdpostct.c | 138 JDIMENSION num_rows, max_rows; in post_process_1pass() local 145 num_rows = 0; in post_process_1pass() 147 in_row_groups_avail, post->buffer, &num_rows, in post_process_1pass() 152 (int)num_rows); in post_process_1pass() 153 *out_row_ctr += num_rows; in post_process_1pass() 170 JDIMENSION old_next_row, num_rows; in post_process_prepass() local 188 num_rows = post->next_row - old_next_row; in post_process_prepass() 190 (JSAMPARRAY)NULL, (int)num_rows); in post_process_prepass() 191 *out_row_ctr += num_rows; in post_process_prepass() 213 JDIMENSION num_rows, max_rows; in post_process_2pass() local [all …]
|
D | jccolor.c | 235 JSAMPIMAGE output_buf, JDIMENSION output_row, int num_rows) in rgb_ycc_convert() argument 240 num_rows); in rgb_ycc_convert() 245 num_rows); in rgb_ycc_convert() 249 num_rows); in rgb_ycc_convert() 254 num_rows); in rgb_ycc_convert() 259 num_rows); in rgb_ycc_convert() 264 num_rows); in rgb_ycc_convert() 268 num_rows); in rgb_ycc_convert() 283 JSAMPIMAGE output_buf, JDIMENSION output_row, int num_rows) in rgb_gray_convert() argument 288 num_rows); in rgb_gray_convert() [all …]
|
/external/tensorflow/tensorflow/python/kernel_tests/ |
D | bincount_op_test.py | 213 def _test_bincount_col_count(self, num_rows, num_cols, size, dtype): argument 215 inp = np.random.randint(0, size, (num_rows, num_cols), dtype=dtype) 218 [np.bincount(inp[j, :], minlength=size) for j in range(num_rows)], 219 axis=0), (num_rows, size)) 226 def _test_bincount_col_binary(self, num_rows, num_cols, size, dtype): argument 228 inp = np.random.randint(0, size, (num_rows, num_cols), dtype=dtype) 232 for j in range(num_rows) 234 axis=0), (num_rows, size)) 242 def _test_bincount_col_count_with_weights(self, num_rows, num_cols, size, argument 245 inp = np.random.randint(0, size, (num_rows, num_cols), dtype=dtype) [all …]
|
D | checkpoint_ops_test.py | 144 num_rows=2, 159 num_rows=len(row_remapping), 174 num_rows=len(row_remapping), 189 num_rows=3, 201 num_rows = 7 202 initializing_values = [42] * num_rows * self.old_num_cols 206 row_remapping=[-1] * num_rows, 209 num_rows=num_rows, 213 np.reshape(initializing_values, (num_rows, self.old_num_cols)), 218 num_rows = 7 [all …]
|
D | fractional_avg_pool_op_test.py | 151 num_rows = 6 153 tensor_shape = (1, num_rows, num_cols, 1) 178 for i in range(num_rows): 200 num_rows = 20 204 tensor_shape = (num_batches, num_rows, num_cols, num_channels) 240 for num_rows in [10, 20, 50]: 242 tensor_shape = (num_batches, num_rows, num_cols, num_channels) 256 num_rows = 30 258 tensor_shape = (num_batches, num_rows, num_cols, num_channels) 277 num_rows = 30 [all …]
|
D | fractional_max_pool_op_test.py | 151 num_rows = 6 153 tensor_shape = (1, num_rows, num_cols, 1) 180 for i in range(num_rows): 202 num_rows = 20 206 tensor_shape = (num_batches, num_rows, num_cols, num_channels) 218 num_rows = 20 222 tensor_shape = (num_batches, num_rows, num_cols, num_channels) 237 for num_rows in [10, 20, 50]: 239 tensor_shape = (num_batches, num_rows, num_cols, num_channels) 253 num_rows = 30 [all …]
|
/external/tensorflow/tensorflow/core/kernels/linalg/ |
D | lu_op_gpu.cu.cc | 40 int64 num_rows, const int* __restrict__ pivots, in ComputePermutationFromTranspositions() argument 43 for (int i = 0; i < num_rows; ++i) { in ComputePermutationFromTranspositions() 50 for (int i = 0; i < num_rows; ++i) { in ComputePermutationFromTranspositions() 65 GpuLaunchConfig config, const int64 num_rows, in ComputePermutationFromTranspositionsKernel() argument 73 num_rows, all_pivots + index * num_rows, in ComputePermutationFromTranspositionsKernel() 74 all_permutation_indices + index * num_rows); in ComputePermutationFromTranspositionsKernel() 94 const int64 num_rows = input.dim_size(input_rank - 2); in ComputeAsync() local 98 context, num_rows == num_cols, in ComputeAsync() 99 errors::InvalidArgument("Input matrices must be squares, got", num_rows, in ComputeAsync() 108 permutation_indices_shape.AddDim(num_rows); in ComputeAsync() [all …]
|
D | matrix_diag_op_gpu.cu.cc | 31 const int num_rows, in ComputeContentOffset() argument 40 const int diag_len = min(num_rows + y_offset, num_cols - x_offset); in ComputeContentOffset() 46 const int num_threads, const int num_rows, const int num_cols, in MatrixDiagKernel() argument 54 const int batch = batch_and_row_index / num_rows; in MatrixDiagKernel() 55 const int row = batch_and_row_index - batch * num_rows; in MatrixDiagKernel() 59 ComputeContentOffset(diag_index, max_diag_len, num_rows, num_cols, in MatrixDiagKernel() 83 const int num_rows = output.dimension(1); in Compute() local 86 if (batch_size == 0 || max_diag_len == 0 || num_rows == 0 || in Compute() 91 GetGpuLaunchConfig(batch_size * num_rows * num_cols, device); in Compute() 94 device.stream(), config.virtual_thread_count, num_rows, num_cols, in Compute() [all …]
|
D | lu_op.cc | 59 double num_rows = static_cast<double>(input_matrix_shape.dim_size(0)); in GetCostPerUnit() local 60 double cost = (2 / 3.0) * MathUtil::IPow(num_rows, 3); in GetCostPerUnit() 84 const int64_t num_rows = input.dim_size(input_rank - 2); in Compute() local 87 input_matrix_shape.AppendShape({num_rows, num_cols}); in Compute() 94 permutation_shape.AddDim(num_rows); in Compute() 110 if (num_rows == 0) { in Compute() 115 auto shard = [this, &input, &num_rows, &num_cols, &outputs, in Compute() 118 ComputeTensorSlice(context, i, input, num_rows, num_cols, outputs, in Compute() 129 const Tensor& input, int64_t num_rows, in ComputeTensorSlice() argument 135 input.flat<Scalar>().data() + matrix_index * num_rows * num_cols, in ComputeTensorSlice() [all …]
|
D | matrix_diag_op.cc | 107 const Eigen::Index num_rows = input_shape.dim_size(rank - 2); in Compute() local 111 (-num_rows < lower_diag_index && lower_diag_index < num_cols) || in Compute() 115 ". It must be between ", -num_rows, " and ", num_cols)); in Compute() 117 (-num_rows < upper_diag_index && upper_diag_index < num_cols) || in Compute() 121 " It must be between ", -num_rows, " and ", num_cols)); in Compute() 135 std::min(num_rows + std::min(upper_diag_index, 0), in Compute() 175 int32_t num_rows = -1; in Compute() local 208 num_rows = num_rows_tensor.flat<int32>()(0); in Compute() 245 OP_REQUIRES(context, num_rows == -1 || num_rows >= min_num_rows, in Compute() 252 if (num_rows == -1 && num_cols == -1) { in Compute() [all …]
|
/external/tensorflow/tensorflow/compiler/tf2xla/kernels/ |
D | matrix_diag_ops.cc | 30 static inline int ComputeDiagLen(int diag_index, int num_rows, int num_cols) { in ComputeDiagLen() argument 31 return std::min(num_rows + std::min(0, diag_index), in ComputeDiagLen() 95 const int64_t num_rows, in ValidateDiagIndexWithOutputMatrixSize() argument 99 (-num_rows < lower_diag_index && lower_diag_index < num_cols) || in ValidateDiagIndexWithOutputMatrixSize() 103 " It must be between ", -num_rows, " and ", num_cols)); in ValidateDiagIndexWithOutputMatrixSize() 105 (-num_rows < upper_diag_index && upper_diag_index < num_cols) || in ValidateDiagIndexWithOutputMatrixSize() 109 " It must be between ", -num_rows, " and ", num_cols)); in ValidateDiagIndexWithOutputMatrixSize() 122 const int64_t max_diag_len, const int64_t num_rows, in SetMatrixDiag() argument 192 if (num_cols - num_rows <= diag_index && diag_index <= 0) { in SetMatrixDiag() 194 } else if (0 <= diag_index && diag_index <= num_cols - num_rows) { in SetMatrixDiag() [all …]
|
/external/tensorflow/tensorflow/core/kernels/sparse/ |
D | sparse_cholesky_op.cc | 85 int64_t num_rows; in Compute() local 88 &batch_size, &num_rows)); in Compute() 106 (input_matrix->total_nnz() / batch_size) / num_rows; in Compute() 108 nnz_per_row * nnz_per_row * num_rows; in Compute() 120 num_rows, num_rows, input_matrix->nnz(batch_index), in Compute() 135 permutation(permutation_indices_flat + batch_index * num_rows, in Compute() 136 num_rows); in Compute() 178 TensorShape({(num_rows + 1) * batch_size})); in Compute() 201 cholesky_factor.outerIndexPtr() + num_rows + 1, in Compute() 202 output_row_ptr_ptr + batch_index * (num_rows + 1)); in Compute() [all …]
|
D | sparse_ordering_amd_op.cc | 78 const int64_t num_rows = dense_shape_vec((rank == 2) ? 0 : 1); in Compute() local 81 OP_REQUIRES(ctx, num_rows == num_cols, in Compute() 83 num_rows, " != ", num_cols)); in Compute() 88 (rank == 2) ? TensorShape{num_rows} : TensorShape{batch_size, num_rows}; in Compute() 96 10 * num_rows * (input_matrix->total_nnz() / batch_size); in Compute() 107 num_rows, num_rows, input_matrix->nnz(batch_index), in Compute() 120 permutation_indices.flat<int>().data() + batch_index * num_rows, in Compute() 121 num_rows, 1); in Compute()
|
D | kernels.cc | 31 const int64_t batch_size, const int num_rows, in operator ()() argument 40 if (csr_row_ptr.size() != batch_size * (num_rows + 1)) { in operator ()() 43 csr_row_ptr.size(), " vs. ", batch_size * (num_rows + 1)); in operator ()() 79 csr_row_ptr(cur_batch * (num_rows + 1) + indices(i, 1) + 1) += 1; in operator ()() 98 auto* row_ptr_batch = csr_row_ptr.data() + batch_idx * (num_rows + 1); in operator ()() 99 std::partial_sum(row_ptr_batch, row_ptr_batch + num_rows + 1, in operator ()()
|
/external/tensorflow/tensorflow/python/ops/ |
D | linalg_ops_impl.py | 33 def eye(num_rows, argument 43 name, default_name='eye', values=[num_rows, num_columns, batch_shape]): 46 num_columns = num_rows if num_columns is None else num_columns 49 if (isinstance(num_rows, ops.Tensor) or 51 diag_size = math_ops.minimum(num_rows, num_columns) 54 if not isinstance(num_rows, compat.integral_types) or not isinstance( 58 is_square = num_rows == num_columns 59 diag_size = np.minimum(num_rows, num_columns) 67 shape = array_ops.concat((batch_shape, [num_rows, num_columns]), axis=0) 73 shape = batch_shape + [num_rows, num_columns]
|
D | bincount_ops_test.py | 525 num_rows = 128 528 inp_indices = np.random.randint(0, num_rows, (n_elems, 1)) 532 [num_rows, 1]) 545 num_rows = 128 548 inp_indices = np.random.randint(0, num_rows, (n_elems, 1)) 552 [num_rows, 1]) 555 [num_rows, 1]) 570 num_rows = 128 573 inp_indices = np.random.randint(0, num_rows, (n_elems, 1)) 577 [num_rows, 1]) [all …]
|
/external/libhevc/decoder/ |
D | ihevcd_fmt_conv.c | 393 WORD32 num_rows, num_cols, src_strd, dst_strd; in ihevcd_fmt_conv_420sp_to_420sp() local 400 num_rows = ht; in ihevcd_fmt_conv_420sp_to_420sp() 406 for(i = 0; i < num_rows; i++) in ihevcd_fmt_conv_420sp_to_420sp() 417 num_rows = ht >> 1; in ihevcd_fmt_conv_420sp_to_420sp() 423 for(i = 0; i < num_rows; i++) in ihevcd_fmt_conv_420sp_to_420sp() 492 WORD32 num_rows, num_cols, src_strd, dst_strd; in ihevcd_fmt_conv_420sp_to_420sp_swap_uv() local 499 num_rows = ht; in ihevcd_fmt_conv_420sp_to_420sp_swap_uv() 505 for(i = 0; i < num_rows; i++) in ihevcd_fmt_conv_420sp_to_420sp_swap_uv() 516 num_rows = ht >> 1; in ihevcd_fmt_conv_420sp_to_420sp_swap_uv() 522 for(i = 0; i < num_rows; i++) in ihevcd_fmt_conv_420sp_to_420sp_swap_uv() [all …]
|
/external/ComputeLibrary/src/core/NEON/kernels/ |
D | NEWinogradConvolutionLayerKernel.h | 64 …virtual unsigned int get_input_storage_size(int num_batches, int num_channels, int num_rows, int n… 76 …virtual int get_matrix_stride(int num_batches, int num_channels, int num_rows, int num_cols, bool … 90 …virtual void configure(const ITensor *input_nhwc, const int num_batches, const int num_rows, const… 129 int num_rows, 158 int num_rows, 185 const int num_rows, 256 …virtual unsigned int get_output_storage_size(int num_batches, int num_rows, int num_cols, int num_… 267 …virtual int get_matrix_stride(int num_batches, int num_rows, int num_cols, int num_output_channels… 278 int num_rows, /* Number of rows in each feature map of the input tensor. */ 302 const int num_rows, [all …]
|
/external/tensorflow/tensorflow/compiler/xla/service/gpu/ |
D | gemm_thunk.cc | 76 int64 num_rows; member 81 return transpose == se::blas::Transpose::kTranspose ? num_rows : num_cols; in reduced_dim() 128 lhs.transpose, rhs.transpose, output_matrix.num_rows, in DoGemmWithAlgorithm() 132 /*leading dim of LHS=*/lhs.num_rows, rhs.cast<Input>(), in DoGemmWithAlgorithm() 133 /*leading dim of RHS=*/rhs.num_rows, rhs.stride, in DoGemmWithAlgorithm() 135 /*leading dim of output=*/output_matrix.num_rows, output_matrix.stride, in DoGemmWithAlgorithm() 139 lhs.transpose, rhs.transpose, output_matrix.num_rows, in DoGemmWithAlgorithm() 143 /*lda=*/lhs.num_rows, rhs.cast<Input>(), in DoGemmWithAlgorithm() 144 /*ldb=*/rhs.num_rows, in DoGemmWithAlgorithm() 146 /*ldc=*/output_matrix.num_rows, computation_type, algorithm, in DoGemmWithAlgorithm() [all …]
|
/external/webp/src/dec/ |
D | alpha_dec.c | 110 static int ALPHDecode(VP8Decoder* const dec, int row, int num_rows) { in ALPHDecode() argument 122 for (y = 0; y < num_rows; ++y) { in ALPHDecode() 129 for (y = 0; y < num_rows; ++y) { in ALPHDecode() 139 if (!VP8LDecodeAlphaImageStream(alph_dec, row + num_rows)) { in ALPHDecode() 144 if (row + num_rows >= height) { in ALPHDecode() 179 int row, int num_rows) { in VP8DecompressAlphaRows() argument 185 if (row < 0 || num_rows <= 0 || row + num_rows > height) { in VP8DecompressAlphaRows() 202 num_rows = height - row; // decode everything in one pass in VP8DecompressAlphaRows() 207 assert(row + num_rows <= height); in VP8DecompressAlphaRows() 208 if (!ALPHDecode(dec, row, num_rows)) goto Error; in VP8DecompressAlphaRows()
|
/external/libavc/encoder/ |
D | ih264e_fmt_conv.c | 371 WORD32 num_rows, num_cols, src_strd, dst_strd; in ih264e_fmt_conv_420sp_to_420sp() local 378 num_rows = ht; in ih264e_fmt_conv_420sp_to_420sp() 384 for (i = 0; i < num_rows; i++) in ih264e_fmt_conv_420sp_to_420sp() 395 num_rows = ht >> 1; in ih264e_fmt_conv_420sp_to_420sp() 401 for (i = 0; i < num_rows; i++) in ih264e_fmt_conv_420sp_to_420sp() 423 WORD32 num_rows, num_cols, src_strd, dst_strd; in ih264e_fmt_conv_420sp_to_420sp_swap_uv() local 430 num_rows = ht; in ih264e_fmt_conv_420sp_to_420sp_swap_uv() 436 for (i = 0; i < num_rows; i++) in ih264e_fmt_conv_420sp_to_420sp_swap_uv() 447 num_rows = ht >> 1; in ih264e_fmt_conv_420sp_to_420sp_swap_uv() 453 for (i = 0; i < num_rows; i++) in ih264e_fmt_conv_420sp_to_420sp_swap_uv() [all …]
|