/external/tensorflow/tensorflow/contrib/factorization/python/kernel_tests/ |
D | masked_matmul_benchmark.py | 40 def _make_sparse_mask(self, mask_shape, nnz, sort=False): argument 54 [nnz], minval=0, maxval=num_rows, dtype=dtypes.int64) 56 [nnz], minval=0, maxval=num_cols, dtype=dtypes.int64) 58 values = array_ops.ones([nnz]) 62 def _run_graph(self, a_shape, b_shape, nnz, num_iters, sort=False, argument 87 mask_indices_ph = array_ops.placeholder(dtypes.int64, shape=[nnz, 2]) 90 mask = self._make_sparse_mask(mask_shape, nnz, sort) 113 nnz=nnz, 132 nnz = 100000 139 self._run_graph(a_shape, b_shape, nnz, num_iters, sort, transpose_a,
|
/external/tensorflow/tensorflow/core/api_def/base_api/ |
D | api_def_SparseAddGrad.pbtxt | 6 1-D with shape `[nnz(sum)]`. The gradient with respect to 13 2-D. The `indices` of the `SparseTensor` A, size `[nnz(A), ndims]`. 19 2-D. The `indices` of the `SparseTensor` B, size `[nnz(B), ndims]`. 26 `[nnz(sum), ndims]`. 32 1-D with shape `[nnz(A)]`. The gradient with respect to the 39 1-D with shape `[nnz(B)]`. The gradient with respect to the
|
D | api_def_SparseAdd.pbtxt | 6 2-D. The `indices` of the first `SparseTensor`, size `[nnz, ndims]` Matrix. 12 1-D. The `values` of the first `SparseTensor`, size `[nnz]` Vector. 24 2-D. The `indices` of the second `SparseTensor`, size `[nnz, ndims]` Matrix. 30 1-D. The `values` of the second `SparseTensor`, size `[nnz]` Vector. 60 In the following shapes, `nnz` is the count after taking `thresh` into account.
|
D | api_def_SparseTensorDenseAdd.pbtxt | 6 2-D. The `indices` of the `SparseTensor`, with shape `[nnz, ndims]`. 12 1-D. The `values` of the `SparseTensor`, with shape `[nnz]`.
|
D | api_def_SparseTensorDenseMatMul.pbtxt | 6 2-D. The `indices` of the `SparseTensor`, size `[nnz, 2]` Matrix. 12 1-D. The `values` of the `SparseTensor`, size `[nnz]` Vector.
|
/external/eigen/Eigen/src/SparseCore/ |
D | SparseMap.h | 124 …inline SparseMapBase(Index rows, Index cols, Index nnz, IndexPointer outerIndexPtr, IndexPointer i… 126 …(IsRowMajor?cols:rows), m_zero_nnz(0,internal::convert_index<StorageIndex>(nnz)), m_outerIndex(out… 131 inline SparseMapBase(Index size, Index nnz, IndexPointer innerIndexPtr, ScalarPointer valuePtr) 132 …: m_outerSize(1), m_innerSize(size), m_zero_nnz(0,internal::convert_index<StorageIndex>(nnz)), m_o… 195 …inline SparseMapBase(Index rows, Index cols, Index nnz, StorageIndex* outerIndexPtr, StorageIndex*… 197 : Base(rows, cols, nnz, outerIndexPtr, innerIndexPtr, valuePtr, innerNonZerosPtr) 201 inline SparseMapBase(Index size, Index nnz, StorageIndex* innerIndexPtr, Scalar* valuePtr) 202 : Base(size, nnz, innerIndexPtr, valuePtr) 245 inline Map(Index rows, Index cols, Index nnz, StorageIndex* outerIndexPtr, 247 : Base(rows, cols, nnz, outerIndexPtr, innerIndexPtr, valuePtr, innerNonZerosPtr) [all …]
|
D | SparseBlock.h | 46 Index nnz = 0; in nonZeros() local 50 ++nnz; in nonZeros() 51 return nnz; in nonZeros() 132 Index nnz = tmp.nonZeros(); 145 if(nnz>free_size) 148 … typename SparseMatrixType::Storage newdata(m_matrix.data().allocatedSize() - block_size + nnz); 153 …internal::smart_copy(tmp.valuePtr() + tmp_start, tmp.valuePtr() + tmp_start + nnz, newd… 154 …internal::smart_copy(tmp.innerIndexPtr() + tmp_start, tmp.innerIndexPtr() + tmp_start + nnz, newd… 156 …matrix.valuePtr()+end, matrix.valuePtr()+end + tail_size, newdata.valuePtr()+start+nnz); 157 …matrix.innerIndexPtr()+end, matrix.innerIndexPtr()+end + tail_size, newdata.indexPtr()+start+nnz); [all …]
|
D | ConservativeSparseSparseProduct.h | 51 Index nnz = 0; variable 64 indices[nnz] = i; 65 ++nnz; 74 for(Index k=0; k<nnz; ++k) 93 if((nnz<200 && nnz<t200) || nnz * numext::log2(int(nnz)) < t) 95 if(nnz>1) std::sort(indices,indices+nnz); 96 for(Index k=0; k<nnz; ++k)
|
D | MappedSparseMatrix.h | 42 …inline MappedSparseMatrix(Index rows, Index cols, Index nnz, StorageIndex* outerIndexPtr, StorageI… 43 : Base(rows, cols, nnz, outerIndexPtr, innerIndexPtr, valuePtr, innerNonZeroPtr)
|
/external/tensorflow/tensorflow/core/kernels/ |
D | sparse_conditional_accumulator.h | 164 const int64 nnz = grad_idx->dim_size(0); in AllocateAndAssignToAccumGradFunction() local 169 accum_idx_vec_->reserve(nnz); in AllocateAndAssignToAccumGradFunction() 170 for (int i = 0; i < nnz; i++) { in AllocateAndAssignToAccumGradFunction() 186 count_element_ = new std::vector<int>(nnz, 1); in AllocateAndAssignToAccumGradFunction() 323 const int64 nnz = count_element_->size(); in DivideAccumGradByCounter() local 340 for (int64 i = 0; i < nnz; i++) { in DivideAccumGradByCounter() 387 const int64 nnz = grad_idx_tensor->dim_size(0); in GetAndValidateTensorInputForApplyGrad() local 391 OP_REQUIRES_BOOLEAN(ctx, grad_val_tensor->dim_size(0) == nnz, in GetAndValidateTensorInputForApplyGrad() 392 errors::InvalidArgument("Expected ", nnz, in GetAndValidateTensorInputForApplyGrad() 424 const int64 nnz = accum_idx_vec_->size(); in ReturnIdxTensor() local [all …]
|
D | sparse_tensor_dense_matmul_op_gpu.cu.cc | 31 __global__ void SparseTensorDenseMatMulKernel(int nnz, int m, int b_rows, in SparseTensorDenseMatMulKernel() argument 39 CUDA_1D_KERNEL_LOOP(index, nnz * p) { in SparseTensorDenseMatMulKernel() 73 int nnz = a_values.size(); in Compute() local 82 CudaLaunchConfig config = GetCudaLaunchConfig(p * nnz, d); in Compute() 86 config.block_count, config.thread_per_block, 0, d.stream(), nnz, m, in Compute()
|
D | sparse_tensor_dense_matmul_op_test.cc | 42 static Graph* SparseTensorDenseMatmul(int nnz, int m, int k, int n, in SparseTensorDenseMatmul() argument 45 Tensor a_values(DT_FLOAT, TensorShape({nnz})); in SparseTensorDenseMatmul() 46 Tensor a_indices(DT_INT64, TensorShape({nnz, 2})); in SparseTensorDenseMatmul() 57 for (int32 i = 0; i < nnz; ++i) { in SparseTensorDenseMatmul()
|
D | sparse_dense_binary_op_shared.cc | 106 const int64 nnz = indices_t->dim_size(0); in Compute() local 108 ctx->allocate_output(0, TensorShape({nnz}), &output_values)); in Compute() 110 ctx, ctx->allocate_temp(DataTypeToEnum<T>::value, TensorShape({nnz}), in Compute() 129 for (int i = 0; i < nnz; ++i) { \ in Compute()
|
D | sparse_tensor_dense_matmul_op.cc | 68 const int64 nnz = a_indices->shape().dim_size(0); in Compute() local 69 OP_REQUIRES(ctx, nnz == a_values->NumElements(), in Compute() 111 OP_REQUIRES(ctx, FastBoundsCheck(nnz * outer_right, int32max), in Compute() 251 const std::size_t nnz = a_values.size(); in Compute() local 267 for (std::size_t i = 0; i < nnz; ++i) { in Compute() 287 for (std::size_t i = 0; i < nnz; ++i) { \ in Compute()
|
D | reshape_util.cc | 54 const int64 nnz = input_indices_in.shape().dim_size(0); in Reshape() local 128 TensorShape({nnz, output_rank}), in Reshape() 132 for (int i = 0; i < nnz; ++i) { in Reshape()
|
D | sparse_reduce_op.cc | 274 int64 nnz = 0; in Compute() local 277 nnz++; in Compute() 283 0, TensorShape({nnz, reduction.reduced_shape.dims()}), in Compute() 292 ctx->allocate_output(1, TensorShape({nnz}), &out_values_t)); in Compute()
|
/external/libavc/common/arm/ |
D | ih264_resi_trans_quant_a9.s | 61 @ pointer to store nnz 92 @ :nnz 108 @R9 :nnz 227 vpadd.u8 d18, d16, d17 @I pair add nnz 1 228 vpadd.u8 d20, d18, d19 @I Pair add nnz 2 229 vpadd.u8 d22, d20, d21 @I Pair add nnz 3 233 vmov.u8 d25, #16 @I Get max nnz 234 vsub.u8 d26, d25, d24 @I invert current nnz 236 vst1.u8 d26[0], [r9] @I Write nnz 259 @ pointer to store nnz [all …]
|
/external/eigen/bench/ |
D | sparse_setter.cpp | 302 const int nnz, in coo_tocsr() argument 312 for (int n = 0; n < nnz; n++){ in coo_tocsr() 322 Bp[n_row] = nnz; in coo_tocsr() 325 for(int n = 0; n < nnz; n++){ in coo_tocsr() 384 I nnz = 0; in csr_sum_duplicates() local 397 Aj[nnz] = j; in csr_sum_duplicates() 398 Ax[nnz] = x; in csr_sum_duplicates() 399 nnz++; in csr_sum_duplicates() 401 Ap[i+1] = nnz; in csr_sum_duplicates()
|
/external/eigen/Eigen/src/OrderingMethods/ |
D | Ordering.h | 134 StorageIndex nnz = StorageIndex(mat.nonZeros()); in operator() local 136 StorageIndex Alen = internal::colamd_recommended(nnz, m, n); in operator() 144 for(StorageIndex i=0; i < nnz; i++) A(i) = mat.innerIndexPtr()[i]; in operator()
|
D | Eigen_Colamd.h | 257 inline IndexType colamd_recommended ( IndexType nnz, IndexType n_row, IndexType n_col) in colamd_recommended() argument 259 if ((nnz) < 0 || (n_row) < 0 || (n_col) < 0) in colamd_recommended() 262 return (2 * (nnz) + colamd_c (n_col) + colamd_r (n_row) + (n_col) + ((nnz) / 5)); in colamd_recommended() 327 IndexType nnz ; /* nonzeros in A */ in colamd() local 385 nnz = p [n_col] ; in colamd() 386 if (nnz < 0) /* nnz must be >= 0 */ in colamd() 389 stats [COLAMD_INFO1] = nnz ; in colamd() 390 COLAMD_DEBUG0 (("colamd: number of entries negative %d\n", nnz)) ; in colamd() 414 need = 2*nnz + n_col + Col_size + Row_size ; in colamd() 447 n_col2, max_deg, 2*nnz) ; in colamd()
|
/external/eigen/Eigen/src/IterativeLinearSolvers/ |
D | IncompleteCholesky.h | 217 Index nnz = m_L.nonZeros(); in factorize() local 218 Map<VectorSx> vals(m_L.valuePtr(), nnz); //values in factorize() 219 Map<VectorIx> rowIdx(m_L.innerIndexPtr(), nnz); //Row indices in factorize() 329 vals = Map<const VectorSx>(L_save.valuePtr(), nnz); in factorize() 330 rowIdx = Map<const VectorIx>(L_save.innerIndexPtr(), nnz); in factorize()
|
/external/libavc/encoder/ |
D | ih264e_cavlc.c | 1233 UWORD32 *nnz; in ih264e_write_pslice_mb_cavlc() local 1246 nnz = (UWORD32 *)ps_ent_ctxt->pu1_top_nnz_luma[ps_ent_ctxt->i4_mb_x]; in ih264e_write_pslice_mb_cavlc() 1247 *nnz = 0; in ih264e_write_pslice_mb_cavlc() 1249 nnz = (UWORD32 *)ps_ent_ctxt->pu1_top_nnz_cbcr[ps_ent_ctxt->i4_mb_x]; in ih264e_write_pslice_mb_cavlc() 1250 *nnz = 0; in ih264e_write_pslice_mb_cavlc() 1544 UWORD32 *nnz; in ih264e_write_bslice_mb_cavlc() local 1557 nnz = (UWORD32 *)ps_ent_ctxt->pu1_top_nnz_luma[ps_ent_ctxt->i4_mb_x]; in ih264e_write_bslice_mb_cavlc() 1558 *nnz = 0; in ih264e_write_bslice_mb_cavlc() 1560 nnz = (UWORD32 *)ps_ent_ctxt->pu1_top_nnz_cbcr[ps_ent_ctxt->i4_mb_x]; in ih264e_write_bslice_mb_cavlc() 1561 *nnz = 0; in ih264e_write_bslice_mb_cavlc()
|
/external/tensorflow/tensorflow/core/util/sparse/ |
D | README.md | 180 Expected input shapes, orders, and `nnz()`: 213 EXPECT_EQ(conc.nnz(), st1.nnz() + st2.nnz() + st3.nnz());
|
/external/eigen/doc/ |
D | SparseQuickReference.dox | 25 sm1.reserve(nnz); // Allocate room for nnz nonzeros elements. 28 <td> Note that when calling reserve(), it is not required that nnz is the exact number of nonzero e… 261 int innerIndices[nnz]; 262 double values[nnz]; 263 Map<SparseMatrix<double> > sm1(rows,cols,nnz,outerIndexPtr, // read-write
|
/external/tensorflow/tensorflow/python/kernel_tests/ |
D | sparse_tensor_dense_matmul_grad_test.py | 65 sp_t, nnz = self._randomTensor( 78 sp_t_val_shape = [nnz]
|