/external/tensorflow/tensorflow/contrib/factorization/python/kernel_tests/ |
D | masked_matmul_benchmark.py | 62 def _run_graph(self, a_shape, b_shape, nnz, num_iters, sort=False, argument 82 mask_shape = [a_shape[0], b_shape[1]] 83 a_shape = a_shape if not transpose_a else [a_shape[1], a_shape[0]] 85 a_var = variables.Variable(random_ops.random_normal(a_shape)) 88 a_ph = array_ops.placeholder(dtypes.float32, shape=a_shape) 114 a_shape=a_shape, 137 a_shape = [10000, dim] 139 self._run_graph(a_shape, b_shape, nnz, num_iters, sort, transpose_a,
|
/external/tensorflow/tensorflow/compiler/xla/service/ |
D | cholesky_expander.cc | 57 TF_ASSIGN_OR_RETURN(Shape a_shape, builder->GetShape(a)); in CholeskyUnblocked() 58 const int n_dims = a_shape.rank(); in CholeskyUnblocked() 59 const int64 n = ShapeUtil::GetDimension(a_shape, -1); in CholeskyUnblocked() 60 auto major_dims = AsInt64Slice(a_shape.dimensions()) in CholeskyUnblocked() 77 ShapeUtil::MakeShape(a_shape.element_type(), row_shape_dims)); in CholeskyUnblocked() 83 ShapeUtil::MakeShape(a_shape.element_type(), col_shape_dims)); in CholeskyUnblocked() 142 TF_ASSIGN_OR_RETURN(Shape a_shape, builder->GetShape(a)); in BuildCholesky() 143 const int ndims = a_shape.rank(); in BuildCholesky() 147 a_shape.ToString()); in BuildCholesky() 150 const int64 n = ShapeUtil::GetDimension(a_shape, -1); in BuildCholesky() [all …]
|
D | triangular_solve_expander.cc | 262 TF_ASSIGN_OR_RETURN(Shape a_shape, builder->GetShape(a)); in SolveWithInvertedDiagonalBlocks() 263 int64 ndims = a_shape.rank(); in SolveWithInvertedDiagonalBlocks() 264 int64 n = ShapeUtil::GetDimension(a_shape, -1); in SolveWithInvertedDiagonalBlocks() 356 TF_ASSIGN_OR_RETURN(Shape a_shape, builder->GetShape(a)); in BuildTriangularSolve() 358 if (a_shape.rank() != b_shape.rank()) { in BuildTriangularSolve() 362 ShapeUtil::HumanString(a_shape), ShapeUtil::HumanString(b_shape)); in BuildTriangularSolve() 364 const int64 ndims = a_shape.rank(); in BuildTriangularSolve() 373 int64 a_size = a_shape.dimensions(i); in BuildTriangularSolve() 379 ShapeUtil::HumanString(a_shape), ShapeUtil::HumanString(b_shape)); in BuildTriangularSolve() 384 if (ShapeUtil::GetDimension(a_shape, -1) != in BuildTriangularSolve() [all …]
|
D | hlo_cost_analysis.cc | 555 const Shape& a_shape = hlo->operand(0)->shape(); in HandleTriangularSolve() local 558 int64 elems = a_shape.dimensions(a_shape.dimensions_size() - 1); in HandleTriangularSolve() 568 const Shape& a_shape = hlo->operand(0)->shape(); in HandleCholesky() local 570 int64 elems = a_shape.dimensions(a_shape.dimensions_size() - 1); in HandleCholesky() 571 elems *= ShapeUtil::ElementsIn(a_shape); in HandleCholesky()
|
/external/tensorflow/tensorflow/core/kernels/ |
D | betainc_op.cc | 47 const TensorShape& a_shape = a.shape(); in Compute() local 50 if (a_shape.dims() > 0 && b_shape.dims() > 0) { in Compute() 51 OP_REQUIRES(ctx, a_shape == b_shape, in Compute() 54 a_shape.DebugString(), " vs. ", b_shape.DebugString())); in Compute() 56 if (a_shape.dims() > 0 && x_shape.dims() > 0) { in Compute() 57 OP_REQUIRES(ctx, a_shape == x_shape, in Compute() 60 a_shape.DebugString(), " vs. ", x_shape.DebugString())); in Compute() 69 TensorShape merged_shape(a_shape); in Compute() 76 if (a_shape == b_shape && a_shape == x_shape) { in Compute() 84 BCast a_shaper(BCast::FromShape(a_shape), merged_shape_vec); in Compute()
|
D | sparse_add_op.cc | 32 const Tensor *a_indices, *b_indices, *a_values_t, *b_values_t, *a_shape, in Compute() local 65 OP_REQUIRES_OK(ctx, ctx->input("a_shape", &a_shape)); in Compute() 68 TensorShapeUtils::IsVector(a_shape->shape()) && in Compute() 72 a_shape->shape().DebugString(), " and ", in Compute() 75 ctx, a_shape->IsSameSize(*b_shape), in Compute() 78 a_shape->SummarizeValue(10), " and ", b_shape->SummarizeValue(10))); in Compute() 79 const auto a_shape_flat = a_shape->flat<int64>(); in Compute() 81 for (int i = 0; i < a_shape->NumElements(); ++i) { in Compute() 102 const int num_dims = a_shape->dim_size(0); in Compute() 163 ctx->set_output(2, *a_shape); in Compute()
|
D | sparse_tensor_dense_add_op.cc | 36 const Tensor *a_shape, const Tensor *b) { in ValidateInputs() argument 43 !TensorShapeUtils::IsVector(a_shape->shape())) { in ValidateInputs() 48 a_shape->shape().DebugString()); in ValidateInputs() 50 if (a_shape->NumElements() != b->dims()) { in ValidateInputs() 52 "Two operands have different ranks; received: ", a_shape->NumElements(), in ValidateInputs() 55 const auto a_shape_flat = a_shape->flat<Index>(); in ValidateInputs()
|
D | sparse_tensor_dense_matmul_op_test.cc | 27 Node* a_shape, Node* b, bool adjoint_a, in SparseTensorDenseMatMulNode() argument 33 .Input(a_shape) in SparseTensorDenseMatMulNode() 47 Tensor a_shape(DT_INT64, TensorShape({2})); in SparseTensorDenseMatmul() local 48 auto a_shape_t = a_shape.vec<int64>(); in SparseTensorDenseMatmul() 66 test::graph::Constant(g, a_values), test::graph::HostConstant(g, a_shape), in SparseTensorDenseMatmul()
|
D | sparse_tensor_dense_matmul_op.cc | 44 const Tensor* a_shape; in Compute() local 48 OP_REQUIRES_OK(ctx, ctx->input("a_shape", &a_shape)); in Compute() 55 OP_REQUIRES(ctx, TensorShapeUtils::IsVector(a_shape->shape()), in Compute() 59 ctx, a_shape->NumElements() == 2, in Compute() 74 ctx, a_indices->shape().dim_size(1) == a_shape->NumElements(), in Compute() 78 auto a_shape_t = a_shape->vec<int64>(); in Compute()
|
D | sparse_sparse_binary_op_shared.cc | 174 const auto a_shape = a_shape_t->flat<int64>(); in Compute() local 177 OP_REQUIRES(ctx, a_shape(i) == b_shape(i), in Compute() 179 a_shape(i), " and ", b_shape(i), in Compute()
|
/external/tensorflow/tensorflow/compiler/xla/service/gpu/ |
D | cusolver_rewriter.cc | 57 Shape a_shape = operand->shape(); in CreateCholesky() local 58 int ndim = a_shape.dimensions_size(); in CreateCholesky() 60 int64 n = a_shape.dimensions(ndim - 1); in CreateCholesky() 62 int64 batch_size = std::accumulate(a_shape.dimensions().begin(), in CreateCholesky() 63 a_shape.dimensions().end() - 2, int64{1}, in CreateCholesky() 70 switch (a_shape.element_type()) { in CreateCholesky() 101 a_shape.ToString()); in CreateCholesky() 109 SetFortranLayout(&a_shape); in CreateCholesky() 118 {a_shape, in CreateCholesky() 124 call_shape, {operand}, kCusolverCholeskyCallTarget, {a_shape})); in CreateCholesky()
|
/external/tensorflow/tensorflow/compiler/xla/client/lib/ |
D | qr.cc | 156 TF_ASSIGN_OR_RETURN(Shape a_shape, builder->GetShape(a)); in QRBlock() 157 const int num_dims = a_shape.rank(); in QRBlock() 160 a_shape.ToString()); in QRBlock() 162 PrimitiveType type = a_shape.element_type(); in QRBlock() 164 const int64 m = ShapeUtil::GetDimension(a_shape, -2); in QRBlock() 165 const int64 n = ShapeUtil::GetDimension(a_shape, -1); in QRBlock() 170 batch_dims[i] = ShapeUtil::GetDimension(a_shape, i); in QRBlock() 327 TF_ASSIGN_OR_RETURN(Shape a_shape, builder->GetShape(a)); in QRDecomposition() 328 const int num_dims = a_shape.rank(); in QRDecomposition() 331 a_shape.ToString()); in QRDecomposition() [all …]
|
D | svd.cc | 117 TF_ASSIGN_OR_RETURN(Shape a_shape, builder->GetShape(a)); in HouseRow() 118 const int64 num_dims = a_shape.rank(); in HouseRow() 119 const int64 n = ShapeUtil::GetDimension(a_shape, -1); in HouseRow() 126 batch_dims[k] = ShapeUtil::GetDimension(a_shape, k); in HouseRow() 183 TF_ASSIGN_OR_RETURN(Shape a_shape, builder->GetShape(a)); in HouseCol() 184 const int64 num_dims = a_shape.rank(); in HouseCol() 185 const int64 m = ShapeUtil::GetDimension(a_shape, -2); in HouseCol() 192 batch_dims[k] = ShapeUtil::GetDimension(a_shape, k); in HouseCol() 261 TF_ASSIGN_OR_RETURN(Shape a_shape, builder->GetShape(a)); in HouseHolderBidiagonalization() 262 const int64 num_dims = a_shape.rank(); in HouseHolderBidiagonalization() [all …]
|
D | self_adjoint_eig.cc | 80 TF_ASSIGN_OR_RETURN(Shape a_shape, builder->GetShape(a)); in SymmetricShurDecomposition2x2() 406 Shape a_shape = shape_with_status.ValueOrDie(); in SelfAdjointEig() local 407 const int64 num_dims = a_shape.rank(); in SelfAdjointEig() 411 a_shape.ToString())); in SelfAdjointEig() 413 PrimitiveType type = a_shape.element_type(); in SelfAdjointEig() 416 "Type of the input matrix must be float: got %s.", a_shape.ToString())); in SelfAdjointEig() 419 const int64 m = ShapeUtil::GetDimension(a_shape, -2); in SelfAdjointEig() 420 const int64 n = ShapeUtil::GetDimension(a_shape, -1); in SelfAdjointEig() 432 batch_dims[i] = ShapeUtil::GetDimension(a_shape, i); in SelfAdjointEig()
|
/external/tensorflow/tensorflow/compiler/tf2xla/kernels/ |
D | matmul_op.cc | 53 const TensorShape a_shape = ctx->InputShape(0); in Compile() local 58 ctx, TensorShapeUtils::IsMatrix(a_shape), in Compile() 60 a_shape.DebugString())); in Compile() 69 a_shape.dim_size(first_index) == b_shape.dim_size(second_index), in Compile() 71 a_shape.DebugString(), ", In[1]: ", in Compile()
|
/external/tensorflow/tensorflow/core/ops/ |
D | sparse_ops.cc | 76 ShapeHandle a_shape; in __anona7ffb82a0302() local 77 TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 1, &a_shape)); in __anona7ffb82a0302() 79 0, c->Matrix(InferenceContext::kUnknownDim, c->Dim(a_shape, 0))); in __anona7ffb82a0302() 81 c->set_output(2, a_shape); in __anona7ffb82a0302() 99 ShapeHandle a_shape; in __anona7ffb82a0402() local 102 TF_RETURN_IF_ERROR(c->MakeShapeFromShapeTensor(2, &a_shape)); in __anona7ffb82a0402() 103 TF_RETURN_IF_ERROR(c->WithRank(a_shape, 2, &a_shape)); in __anona7ffb82a0402() 112 DimensionHandle output_left = c->Dim(a_shape, adjoint_a ? 1 : 0); in __anona7ffb82a0402() 113 DimensionHandle inner_left = c->Dim(a_shape, adjoint_a ? 0 : 1); in __anona7ffb82a0402()
|
D | math_ops.cc | 128 ShapeHandle a_shape; in __anonb22bfa860202() local 130 TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 2, &a_shape)); in __anonb22bfa860202() 138 DimensionHandle output_rows = c->Dim(a_shape, adj_x ? -1 : -2); in __anonb22bfa860202() 145 TF_RETURN_IF_ERROR(c->Subshape(a_shape, 0, -2, &a_batch_dims)); in __anonb22bfa860202() 151 TF_RETURN_IF_ERROR(c->Merge(c->Dim(a_shape, adj_x ? -2 : -1), in __anonb22bfa860202() 1459 ShapeHandle a_shape; in __anonb22bfa860c02() local 1462 TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 1, &a_shape)); in __anonb22bfa860c02() 1466 TF_RETURN_IF_ERROR(c->Merge(a_shape, b_shape, &a_shape)); in __anonb22bfa860c02() 1469 if (c->RankKnown(a_shape)) { in __anonb22bfa860c02() 1470 int rank = c->Rank(a_shape); in __anonb22bfa860c02() [all …]
|
/external/tensorflow/tensorflow/python/ops/ |
D | linalg_grad.py | 355 a_shape = a.get_shape().with_rank_at_least(2) 361 grad_a.set_shape(a_shape) 373 m = a_shape.dims[-2].merge_with(grad_u_shape[-2]) 374 n = a_shape.dims[-1].merge_with(grad_v_shape[-2]) 375 batch_shape = a_shape[:-2].merge_with(grad_u_shape[:-2]).merge_with( 377 a_shape = batch_shape.concatenate([m, n]) 379 m = a_shape.dims[-2].value 380 n = a_shape.dims[-1].value 466 grad_a.set_shape(a_shape)
|
/external/tensorflow/tensorflow/core/api_def/base_api/ |
D | api_def_SparseTensorDenseAdd.pbtxt | 16 name: "a_shape" 24 `ndims`-D Tensor. With shape `a_shape`.
|
D | api_def_SparseSparseMinimum.pbtxt | 17 name: "a_shape" 37 counterpart to `a_shape` for the other operand; the two shapes must be equal.
|
D | api_def_SparseSparseMaximum.pbtxt | 17 name: "a_shape" 37 counterpart to `a_shape` for the other operand; the two shapes must be equal.
|
/external/tensorflow/tensorflow/compiler/tests/ |
D | matrix_triangular_solve_op_test.py | 101 for dtype, (a_shape, b_shape) in tuples: 102 n = a_shape[-1] 103 a = np.tril(rng.rand(*a_shape) - 0.5) / (2.0 * n) + np.eye(n)
|
/external/tensorflow/tensorflow/python/kernel_tests/ |
D | tensordot_op_test.py | 146 a_shape = np.random.random_integers(1, _MAXDIM, rank_a_) 152 a_shape[a_dims[i]] = shared_shape[i] 156 size=np.prod(a_shape)).reshape(a_shape).astype(dtype_)
|
D | batch_matmul_op_test.py | 109 def CompareNonEmpty(self, a_shape, b_shape): argument 111 self._rand(a_shape, dtype), 126 def CompareEmpty(self, a_shape, b_shape): argument 128 np.zeros(a_shape).astype(dtype),
|
/external/tensorflow/tensorflow/python/framework/ |
D | common_shapes.py | 106 a_shape = op.inputs[0].get_shape().with_rank(2) 110 output_rows = a_shape[1] if transpose_a else a_shape[0] 112 inner_a = a_shape[0] if transpose_a else a_shape[1]
|