/external/tensorflow/tensorflow/python/ops/ragged/ |
D | ragged_gather_ops.py | 38 batch_dims=0, argument 112 def _gather(params, indices, axis, batch_dims): argument 174 def _batch_gather(params, indices, axis, batch_dims): argument 331 batch_dims=0): argument 341 batch_dims=0, argument 485 batch_dims=0): argument
|
D | ragged_gather_op_test.py | 192 batch_dims=0, argument 396 batch_dims=0): argument
|
/external/tensorflow/tensorflow/python/kernel_tests/math_ops/ |
D | banded_triangular_solve_op_test.py | 27 def _verifySolveAllWays(self, x, y, dtypes, batch_dims=None): argument 40 def _verifySolveAllWaysReal(self, x, y, batch_dims=None): argument 43 def _verifySolveAllWaysComplex(self, x, y, batch_dims=None): argument 51 batch_dims=None, argument
|
/external/tensorflow/tensorflow/python/kernel_tests/linalg/ |
D | matrix_triangular_solve_op_test.py | 27 def _verifySolveAllWays(self, x, y, dtypes, batch_dims=None): argument 40 def _verifySolveAllWaysReal(self, x, y, batch_dims=None): argument 43 def _verifySolveAllWaysComplex(self, x, y, batch_dims=None): argument 51 batch_dims=None, argument
|
D | matrix_solve_op_test.py | 37 def _verifySolve(self, x, y, batch_dims=None): argument
|
D | matrix_exponential_op_test.py | 218 def _TestRandomSmall(dtype, batch_dims, size): argument
|
/external/tensorflow/tensorflow/python/kernel_tests/array_ops/ |
D | gather_op_test.py | 476 def testBatchDims(self, params, indices, batch_dims, expected=None, argument 594 batch_dims, axis, output_shape): argument 629 def _batchNumpyGather(self, params, indices, axis, batch_dims): argument
|
/external/tensorflow/tensorflow/compiler/xla/service/ |
D | qr_expander.cc | 109 Status House(XlaOp x, XlaOp k, absl::Span<const int64_t> batch_dims, in House() 214 std::vector<int64_t> batch_dims(num_batch_dims); in QrBlock() local 319 PrimitiveType type, absl::Span<const int64_t> batch_dims, XlaOp vs, in CompactWYRepresentation() 394 std::vector<int64_t> batch_dims(num_batch_dims); in BuildQrDecomposition() local 465 std::vector<int64_t> batch_dims(num_batch_dims); in ProductOfElementaryHouseholderReflectors() local
|
D | dot_as_convolution_util.h | 53 std::vector<DimNums> batch_dims; member
|
/external/tensorflow/tensorflow/compiler/xla/service/gpu/ |
D | matmul_utils.cc | 51 const Shape& shape, absl::Span<const int64_t> batch_dims, in GetNonContractingDims() 69 absl::Span<const int64_t> batch_dims, in GetBatchRowColumnShape() 163 const Shape& shape, absl::Span<const int64_t> batch_dims, in For() 184 auto batch_dims = absl::Span<const int64_t>(dims).first(num_batch_dims); in For() local 216 auto batch_dims = (operand_idx == 0) ? dot_dims.lhs_batch_dimensions() in CanFoldTransposeOperandIntoDot() local
|
D | gpu_layout_assignment.cc | 403 absl::Span<const int64_t> batch_dims, absl::Span<const int64_t> row_dims, in SetDotOperandLayout() 425 absl::Span<const int64_t> batch_dims, absl::Span<const int64_t> row_dims, in SetOperandBatchRowsColsLayout()
|
D | cusolver_rewriter.cc | 62 std::vector<int64_t> batch_dims(a_shape.dimensions().begin(), in CreateCholesky() local
|
D | matmul_utils_test.cc | 41 std::vector<int64_t> batch_dims; member
|
/external/tensorflow/tensorflow/compiler/xla/client/lib/ |
D | lu_decomposition.cc | 35 const std::vector<int64_t> batch_dims( in LuDecomposition() local
|
D | self_adjoint_eig.cc | 70 const std::vector<int64_t> batch_dims( in SelfAdjointEig() local
|
D | svd.cc | 120 std::vector<int64_t> batch_dims(num_batch_dims); in HouseRow() local 185 std::vector<int64_t> batch_dims(num_batch_dims); in HouseCol() local 259 std::vector<int64_t> batch_dims(num_batch_dims); in HouseHolderBidiagonalization() local 461 std::vector<int64_t> batch_dims(num_batch_dims); in OneSidedJacobiUpdate() local 840 std::vector<int64_t> batch_dims(num_batch_dims); in SVD() local
|
/external/tensorflow/tensorflow/compiler/xla/service/spmd/ |
D | gather_scatter_handler.cc | 170 PartitionedHlo& indices, absl::Span<const int64_t> batch_dims, in PartitionGatherIndexPassthroughPartition() 272 const HloSharding& output_sharding, absl::Span<const int64_t> batch_dims, in PartitionGatherPassthroughOperand() 376 const HloSharding& output_sharding, absl::Span<const int64_t> batch_dims, in PartitionGatherTrivialIndexedOperandDimension() 448 std::vector<int64_t> batch_dims; in PartitionGatherTrivialIndexedOperandDimension() local 487 const HloSharding& output_sharding, absl::Span<const int64_t> batch_dims, in PartitionGatherIndexParallelDimensions() 640 const HloSharding& output_sharding, absl::Span<const int64_t> batch_dims, in PartitionGather() 690 std::vector<int64_t> batch_dims; in HandleGather() local
|
/external/tensorflow/tensorflow/dtensor/mlir/expansions/ |
D | gather_spmd_expander.cc | 54 int batch_dims = gather_op.batch_dims(); in ExpandOp() local 174 int batch_dims = gather_op.batch_dims(); in ComputeLayoutForward() local 253 int batch_dims = gather_op.batch_dims(); in ComputeLayoutBackward() local
|
/external/tensorflow/tensorflow/compiler/tests/ |
D | matrix_solve_op_test.py | 54 def testSolve(self, n, nrhs, batch_dims, rhs_batch_dims, adjoint): argument
|
/external/tensorflow/tensorflow/lite/kernels/internal/reference/ |
D | gather.h | 38 int batch_dims = op_params.batch_dims; in Gather() local
|
/external/tensorflow/tensorflow/python/ops/structured/ |
D | structured_array_ops_test.py | 1113 def testGather(self, params, indices, axis, batch_dims, expected): argument 1150 def testGatherRagged(self, params, indices, axis, batch_dims, expected): argument 1200 indices, axis, batch_dims, argument
|
/external/tensorflow/tensorflow/core/ops/ |
D | sparse_csr_matrix_ops.cc | 279 ShapeHandle batch_dims; in __anon6adce4610702() local 432 ShapeHandle batch_dims; in __anon6adce4610a02() local
|
/external/tensorflow/tensorflow/core/kernels/ |
D | gather_op.cc | 92 int32_t batch_dims = batch_dims_; in Compute() local
|
D | gather_op_test.cc | 43 void MakeOp(DataType data_type, DataType index_type, int batch_dims = 0) { in MakeOp()
|
/external/tensorflow/tensorflow/lite/kernels/ |
D | gather.cc | 90 int batch_dims = params->batch_dims; in Prepare() local
|