/external/u-boot/drivers/fastboot/ |
D | fb_nand.c | 108 struct fb_nand_sparse *sparse = info->priv; in fb_nand_sparse_write() local 112 ret = _fb_nand_write(sparse->mtd, sparse->part, (void *)buffer, in fb_nand_sparse_write() 191 struct sparse_storage sparse; in fastboot_nand_flash_write() local 196 sparse.blksz = mtd->writesize; in fastboot_nand_flash_write() 197 sparse.start = part->offset / sparse.blksz; in fastboot_nand_flash_write() 198 sparse.size = part->size / sparse.blksz; in fastboot_nand_flash_write() 199 sparse.write = fb_nand_sparse_write; in fastboot_nand_flash_write() 200 sparse.reserve = fb_nand_sparse_reserve; in fastboot_nand_flash_write() 201 sparse.mssg = fastboot_fail; in fastboot_nand_flash_write() 204 sparse.start); in fastboot_nand_flash_write() [all …]
|
D | fb_mmc.c | 87 struct fb_mmc_sparse *sparse = info->priv; in fb_mmc_sparse_write() local 88 struct blk_desc *dev_desc = sparse->dev_desc; in fb_mmc_sparse_write() 405 struct sparse_storage sparse; in fastboot_mmc_flash_write() local 410 sparse.blksz = info.blksz; in fastboot_mmc_flash_write() 411 sparse.start = info.start; in fastboot_mmc_flash_write() 412 sparse.size = info.size; in fastboot_mmc_flash_write() 413 sparse.write = fb_mmc_sparse_write; in fastboot_mmc_flash_write() 414 sparse.reserve = fb_mmc_sparse_reserve; in fastboot_mmc_flash_write() 415 sparse.mssg = fastboot_fail; in fastboot_mmc_flash_write() 418 sparse.start); in fastboot_mmc_flash_write() [all …]
|
/external/tensorflow/tensorflow/python/data/experimental/kernel_tests/ |
D | bucket_by_sequence_length_test.py | 46 def _format_record(array, sparse): argument 47 if sparse: 56 def _get_record_type(sparse): argument 57 if sparse: 66 def _get_record_shape(sparse): argument 67 if sparse: 115 def build_dataset(sparse): argument 128 yield (_format_record(el, sparse),) 131 _generator, (_get_record_type(sparse),), (_get_record_shape(sparse),)) 132 if sparse: [all …]
|
/external/deqp/doc/testspecs/VK/ |
D | sparse_resources.txt | 27 The test creates two queues - one supporting sparse binding operations, the second one supporting c… 29 First queue is used to perform binding of device memory to sparse buffer. The binding operation sig… 32 The second queue is used to perform transfer operations. The test creates two non-sparse buffer obj… 33 one used as input and the second as output. The input buffer is used to transfer data to sparse buf… 34 transfered further from sparse buffer to output buffer. The transer queue waits on a semaphore, bef… 46 The test creates two queues - one supporting sparse binding operations, the second one supporting c… 48 First queue is used to perform binding of device memory to sparse image. The binding operation sign… 51 The second queue is used to perform transfer operations. The test creates two non-sparse buffer obj… 52 one used as input and the second as output. The input buffer is used to transfer data to sparse ima… 53 transfered further from sparse image to output buffer. The transfer queue waits on a semaphore, bef… [all …]
|
/external/tensorflow/tensorflow/python/keras/engine/ |
D | input_layer.py | 57 sparse=False, argument 85 self.sparse = sparse 100 if sparse: 105 sparse=True) 139 'sparse': self.sparse, 151 sparse=False, argument 232 sparse=sparse,
|
/external/tensorflow/tensorflow/core/util/ |
D | strided_slice_op.cc | 73 const StridedSliceSparseSpec& sparse, StridedSliceDenseSpec* dense) { in BuildDenseSpec() argument 86 const T* const strides_flat = sparse.strides_tensor.vec<T>().data(); in BuildDenseSpec() 87 dense->begin_valid = sparse.begin_tensor != nullptr; in BuildDenseSpec() 88 dense->end_valid = sparse.end_tensor != nullptr; in BuildDenseSpec() 90 const T* const begin_flat = sparse.begin_tensor != nullptr in BuildDenseSpec() 91 ? sparse.begin_tensor->vec<T>().data() in BuildDenseSpec() 93 const T* const end_flat = sparse.end_tensor != nullptr in BuildDenseSpec() 94 ? sparse.end_tensor->vec<T>().data() in BuildDenseSpec() 97 for (int i = 0; i < sparse.dims; i++) { in BuildDenseSpec() 98 if ((1 << i) & sparse.ellipsis_mask) { in BuildDenseSpec() [all …]
|
/external/tensorflow/tensorflow/core/ops/ |
D | training_ops.cc | 37 static Status HandleGradAndIndicesInputs(InferenceContext* c, bool sparse, in HandleGradAndIndicesInputs() argument 40 if (!sparse) { in HandleGradAndIndicesInputs() 88 bool sparse) { in ApplyProximalGradientDescentShapeFn() argument 95 HandleGradAndIndicesInputs(c, sparse, 4 /* grad_idx */, &s)); in ApplyProximalGradientDescentShapeFn() 156 static Status ApplyAdadeltaShapeFn(InferenceContext* c, bool sparse) { in ApplyAdadeltaShapeFn() argument 166 HandleGradAndIndicesInputs(c, sparse, 6 /* grad_idx */, &s)); in ApplyAdadeltaShapeFn() 235 static Status ApplyAdagradShapeFn(InferenceContext* c, bool sparse) { in ApplyAdagradShapeFn() argument 241 HandleGradAndIndicesInputs(c, sparse, 3 /* grad_idx */, &s)); in ApplyAdagradShapeFn() 273 static Status ApplyProximalAdagradShapeFn(InferenceContext* c, bool sparse) { in ApplyProximalAdagradShapeFn() argument 281 HandleGradAndIndicesInputs(c, sparse, 5 /* grad_idx */, &s)); in ApplyProximalAdagradShapeFn() [all …]
|
/external/tensorflow/tensorflow/python/data/util/ |
D | sparse_test.py | 22 from tensorflow.python.data.util import sparse 72 sparse.any_sparse(test_case["classes"]), test_case["expected"]) 154 sparse.as_dense_shapes(test_case["types"], test_case["classes"]), 228 sparse.as_dense_types(test_case["types"], test_case["classes"]), 287 sparse.get_classes(test_case["classes"]), test_case["expected"]) 319 classes = sparse.get_classes(expected) 323 actual = sparse.deserialize_sparse_tensors( 324 sparse.serialize_sparse_tensors(expected), types, shapes, 325 sparse.get_classes(expected)) 349 classes = sparse.get_classes(expected) [all …]
|
/external/tensorflow/tensorflow/python/training/ |
D | input_test.py | 798 sparse = sparse_tensor.SparseTensor( 800 self.assertAllEqual((1,), sparse.dense_shape.get_shape().as_list()) 801 batched = inp.batch([sparse], batch_size=2) 806 sparse = sparse_tensor.SparseTensor( 808 self.assertAllEqual((1,), sparse.dense_shape.get_shape().as_list()) 809 batched = inp.batch([sparse], batch_size=2, enqueue_many=True) 814 sparse = sparse_tensor.SparseTensor( 818 self.assertIs(None, sparse.dense_shape.get_shape().num_elements()) 819 batched = inp.batch([sparse], batch_size=2) 824 sparse = sparse_tensor.SparseTensor( [all …]
|
/external/tensorflow/tensorflow/core/api_def/base_api/ |
D | api_def_SparseSlice.pbtxt | 6 2-D tensor represents the indices of the sparse tensor. 12 1-D tensor represents the values of the sparse tensor. 18 1-D. tensor represents the shape of the sparse tensor. 32 sparse tensors. 38 A list of 1-D tensors represents the values of the output sparse 45 A list of 1-D tensors represents the shape of the output sparse
|
D | api_def_SparseSplit.pbtxt | 13 2-D tensor represents the indices of the sparse tensor. 19 1-D tensor represents the values of the sparse tensor. 25 1-D. tensor represents the shape of the sparse tensor. 27 sparse tensors. 33 A list of 1-D tensors represents the values of the output sparse 40 A list of 1-D tensors represents the shape of the output sparse
|
D | api_def_SparseFillEmptyRows.pbtxt | 6 2-D. the indices of the sparse tensor. 12 1-D. the values of the sparse tensor. 18 1-D. the shape of the sparse tensor. 25 for rows missing from the input sparse tensor. 26 output indices: 2-D. the indices of the filled sparse tensor. 32 1-D. the values of the filled sparse tensor. 39 input sparse tensor.
|
D | api_def_SparseAccumulatorTakeGradient.pbtxt | 18 Indices of the average of the accumulated sparse gradients. 24 Values of the average of the accumulated sparse gradients. 30 Shape of the average of the accumulated sparse gradients. 40 summary: "Extracts the average sparse gradient in a SparseConditionalAccumulator."
|
D | api_def_SparseAccumulatorApplyGradient.pbtxt | 12 The local_step value at which the sparse gradient was computed. 18 Indices of the sparse gradient to be accumulated. Must be a 33 Shape of the sparse gradient to be accumulated. 50 summary: "Applies a sparse gradient to a given accumulator."
|
/external/eigen/doc/ |
D | TutorialSparse.dox | 7 Manipulating and solving sparse problems involves various modules which are summarized below: 11 …eMatrix and SparseVector classes, matrix assembly, basic sparse linear algebra (including sparse t… 12 … <Eigen/SparseCholesky>\endcode</td><td>Direct sparse LLT and LDLT Cholesky factorization to solve… 14 <td>%Sparse LU factorization to solve general square sparse systems</td></tr> 15 …de<Eigen/SparseQR>\endcode </td><td>%Sparse QR factorization for solving sparse linear least-squar… 22 …ized representation storing only the nonzero coefficients. Such a matrix is called a sparse matrix. 26 The class SparseMatrix is the main sparse matrix representation of Eigen's sparse module; it offers… 45 and one of its possible sparse, \b column \b major representation: 68 The results of %Eigen's operations always produces \b compressed sparse matrices. 97 In this example, we start by defining a column-major sparse matrix type of double \c SparseMatrix<d… [all …]
|
/external/mesa3d/src/gallium/winsys/amdgpu/drm/ |
D | amdgpu_bo.c | 222 assert(!bo->sparse); in amdgpu_bo_map() 336 assert(!bo->sparse); in amdgpu_bo_unmap() 597 __func__, bo, bo->base.size, bo->u.sparse.num_va_pages, func); in sparse_dump() 608 if (va_page < bo->u.sparse.num_va_pages) { in sparse_dump() 609 backing = bo->u.sparse.commitments[va_page].backing; in sparse_dump() 610 backing_page = bo->u.sparse.commitments[va_page].page; in sparse_dump() 624 if (va_page >= bo->u.sparse.num_va_pages) in sparse_dump() 638 list_for_each_entry(struct amdgpu_sparse_backing, backing, &bo->u.sparse.backing, list) { in sparse_dump() 663 list_for_each_entry(struct amdgpu_sparse_backing, backing, &bo->u.sparse.backing, list) { in sparse_backing_alloc() 693 assert(bo->u.sparse.num_backing_pages < DIV_ROUND_UP(bo->base.size, RADEON_SPARSE_PAGE_SIZE)); in sparse_backing_alloc() [all …]
|
/external/tensorflow/tensorflow/core/kernels/ |
D | training_ops.cc | 468 const bool sparse = false; in Compute() local 470 ctx, use_exclusive_lock_, sparse, {0}); in Compute() 473 ctx, 0, use_exclusive_lock_, sparse, &var)); in Compute() 510 const bool sparse = false; in Compute() local 512 ctx, use_exclusive_lock_, sparse, {0}); in Compute() 515 ctx, 0, use_exclusive_lock_, sparse, &var)); in Compute() 605 const bool sparse = false; in Compute() local 606 mutex* mu = GetTrainingVariableMutex<Device, T>(ctx, 0, sparse, &resource); in Compute() 630 const bool sparse = false; in DoValidate() local 632 ctx, 0, use_exclusive_lock_, sparse, &var)); in DoValidate() [all …]
|
D | edit_distance_op.cc | 136 sparse::SparseTensor hypothesis; in Compute() 137 OP_REQUIRES_OK(ctx, sparse::SparseTensor::Create( in Compute() 141 sparse::SparseTensor truth; in Compute() 142 OP_REQUIRES_OK(ctx, sparse::SparseTensor::Create( in Compute() 178 sparse::Group truth_i = *truth_iter; in Compute() 179 sparse::Group hypothesis_j = *hypothesis_iter; in Compute() 210 sparse::Group hypothesis_j = *hypothesis_iter; in Compute() 222 sparse::Group truth_i = *truth_iter; in Compute()
|
D | training_op_helpers.h | 110 mutex* GetTrainingVariableMutex(OpKernelContext* ctx, int input, bool sparse, in GetTrainingVariableMutex() argument 115 if (sparse) { in GetTrainingVariableMutex() 141 OpKernelContext* ctx, bool do_lock, bool sparse, in MaybeLockVariableInputMutexesInOrder() argument 159 GetTrainingVariableMutex<Device, T>(ctx, input, sparse, &var); in MaybeLockVariableInputMutexesInOrder() 178 mutex* mu = GetTrainingVariableMutex<Device, T>(ctx, input, sparse, &var); in MaybeLockVariableInputMutexesInOrder() 181 if (!sparse || do_lock) { in MaybeLockVariableInputMutexesInOrder() 242 bool lock_held, bool sparse, Tensor* out) { in GetInputTensorFromVariable() argument 247 if (sparse) { in GetInputTensorFromVariable()
|
D | sparse_reorder_op.cc | 63 sparse::SparseTensor input_sp; in Compute() 65 context, sparse::SparseTensor::Create(input_ind, input_val, input_shape, in Compute() 73 sparse::SparseTensor reordered_sp; in Compute() 75 sparse::SparseTensor::Create(tensor::DeepCopy(input_ind), in Compute()
|
/external/tensorflow/tensorflow/core/protobuf/tpu/ |
D | tpu_embedding_configuration.proto | 63 // This parameter determines if the execution of the sparse core will be 68 // false: The execution of the sparse core is not pipelined with that of the 69 // TensorCore. The forward pass of every step on the sparse core is executed 71 // backward pass on the sparse core is executed only after the embedding 74 // previous step on both the sparse core and the TensorCore. 76 // true: The execution of the sparse core is pipelined with that of the 77 // TensorCore. The forward pass of every step on the sparse core can be 79 // waiting for the backward pass. This improves the utilization of the sparse 82 // sparse core is executed directly after the forward pass for the next step 86 // since the embedding updates are sparse, this is generally not considered a
|
/external/tensorflow/tensorflow/contrib/boosted_trees/lib/utils/ |
D | examples_iterable_test.cc | 47 sparse::SparseTensor sparse_float_tensor1; in TEST_F() 49 sparse::SparseTensor::Create(sparse_float_indices1, sparse_float_values1, in TEST_F() 56 sparse::SparseTensor sparse_float_tensor2; in TEST_F() 58 sparse::SparseTensor::Create(sparse_float_indices2, sparse_float_values2, in TEST_F() 64 sparse::SparseTensor sparse_int_tensor1; in TEST_F() 66 sparse::SparseTensor::Create(sparse_int_indices1, sparse_int_values1, in TEST_F() 72 sparse::SparseTensor sparse_int_tensor2; in TEST_F() 74 sparse::SparseTensor::Create(sparse_int_indices2, sparse_int_values2, in TEST_F()
|
D | batch_features.cc | 99 auto order_dims = sparse::SparseTensor::VarDimArray({0, 1}); in Initialize() 100 sparse::SparseTensor sparse_tensor; in Initialize() 101 TF_RETURN_IF_ERROR(sparse::SparseTensor::Create( in Initialize() 141 auto order_dims = sparse::SparseTensor::VarDimArray({0, 1}); in Initialize() 142 sparse::SparseTensor sparse_tensor; in Initialize() 143 TF_RETURN_IF_ERROR(sparse::SparseTensor::Create( in Initialize()
|
/external/tensorflow/tensorflow/python/kernel_tests/ |
D | sparse_add_op_test.py | 53 def _randomTensor(self, size, np_dtype, sparse=True): argument 56 return _sparsify(x) if sparse else x 168 sparse, unused_nnz = _sparsify(rand_vals_np, index_dtype=index_dtype) 170 sparse_ops.sparse_add(sparse, constant_op.constant(dense_np))) 176 sparse_ops.sparse_add(constant_op.constant(dense_np), sparse)) 188 sparse, nnz = _sparsify(rand_vals_np) 190 s = sparse_ops.sparse_add(sparse, dense) 192 err = gradient_checker.compute_gradient_error([sparse.values, dense], 207 sparse = sparse_tensor.SparseTensorValue(bad_idx, val, shape) 208 s = sparse_ops.sparse_add(sparse, dense)
|
/external/deqp/external/openglcts/docs/specs/ |
D | CTS_ARB_sparse_texture_clamp.txt | 50 * Iterate through all sparse supported targets. 51 Allocate sparse texture for current <target> with committed and 70 … that the texels will be filled with (1, 1, 1, 1) if the areas of the sparse texture accessed 75 Sparse and Non-sparse Texture Clamp Lookup Color Tests 77 * Iterate through all sparse and non-sparse supported targets.
|