/external/tensorflow/tensorflow/core/kernels/ |
D | reshape_util.cc | 45 const int64 output_rank = output_shape.dims(); in operator ()() local 55 gtl::InlinedVector<int64, 8> output_strides(output_rank); in operator ()() 56 if (output_rank > 0) { in operator ()() 57 output_strides[output_rank - 1] = 1; in operator ()() 58 for (int d = output_rank - 2; d >= 0; --d) { in operator ()() 69 for (int j = 0; j < output_rank; ++j) { in operator ()() 99 const int64 output_rank = target_shape_in.NumElements(); in ReshapeSparseTensor() local 110 for (int d = 0; d < output_rank; ++d) { in ReshapeSparseTensor() 162 TensorShape({output_rank}), in ReshapeSparseTensor() 172 TensorShape({nnz, output_rank}), in ReshapeSparseTensor()
|
D | deserialize_sparse_variant_op.cc | 155 const int output_rank = output_shape->NumElements(); in Compute() local 158 0, {static_cast<int64>(total_non_zeros), output_rank}, in Compute() 177 i, output_rank - input_dims_to_stack, in Compute() 205 if (input_dims_to_stack == 1 && output_rank == 2) { in Compute() 239 for (size_t k = input_dims_to_stack; k < output_rank; ++k) { in Compute()
|
/external/tensorflow/tensorflow/python/keras/layers/ |
D | multi_head_attention.py | 122 def _get_output_shape(output_rank, known_last_dims): argument 123 return [None] * (output_rank - len(known_last_dims)) + list(known_last_dims) 348 einsum_equation, bias_axes, output_rank = _build_proj_equation( 352 output_shape=_get_output_shape(output_rank - 1, 357 einsum_equation, bias_axes, output_rank = _build_proj_equation( 361 output_shape=_get_output_shape(output_rank - 1, 366 einsum_equation, bias_axes, output_rank = _build_proj_equation( 370 output_shape=_get_output_shape(output_rank - 1, 379 self._build_attention(output_rank) 387 einsum_equation, bias_axes, output_rank = _build_proj_equation( [all …]
|
D | convolutional.py | 254 output_rank = outputs.shape.rank 261 if output_rank is not None and output_rank > 2 + self.rank:
|
/external/tensorflow/tensorflow/core/ops/ |
D | set_ops.cc | 53 DimensionHandle output_rank; in __anonda6063e60102() local 74 output_rank = c->MakeDim(input0_rank); in __anonda6063e60102() 79 output_rank = c->MakeDim(c->Rank(input1_shape)); in __anonda6063e60102() 81 output_rank = c->UnknownDim(); in __anonda6063e60102() 85 c->set_output(0, c->Matrix(c->UnknownDim(), output_rank)); in __anonda6063e60102() 87 c->set_output(2, c->Vector(output_rank)); in __anonda6063e60102()
|
/external/tensorflow/tensorflow/lite/kernels/ |
D | batch_matmul.cc | 102 bool adj_x, bool adj_y, int output_rank, in ResizeOutputTensor() argument 104 TfLiteIntArray* output_shape = TfLiteIntArrayCreate(output_rank); in ResizeOutputTensor() 106 for (int i = 0; i < output_rank - 2; ++i) { in ResizeOutputTensor() 116 int lhs_rows_index = adj_x ? output_rank - 1 : output_rank - 2; in ResizeOutputTensor() 117 int rhs_cols_index = adj_y ? output_rank - 2 : output_rank - 1; in ResizeOutputTensor() 119 output_shape->data[output_rank - 2] = extended_lhs_shape.Dims(lhs_rows_index); in ResizeOutputTensor() 120 output_shape->data[output_rank - 1] = extended_rhs_shape.Dims(rhs_cols_index); in ResizeOutputTensor() 358 const int output_rank = std::max(lhs_rank, rhs_rank); in Prepare() local 360 RuntimeShape::ExtendedShape(output_rank, GetTensorShape(lhs_data)); in Prepare() 362 RuntimeShape::ExtendedShape(output_rank, GetTensorShape(rhs_data)); in Prepare() [all …]
|
D | gather_nd.cc | 92 const int output_rank = indices_rank + params_rank - indices_nd - 1; in Prepare() local 93 TfLiteIntArray* output_shape = TfLiteIntArrayCreate(output_rank); in Prepare()
|
D | embedding_lookup_sparse.cc | 168 const int output_rank = (lookup_rank - 1) + (embedding_rank - 1); in Eval() local 175 TfLiteIntArray* output_shape = TfLiteIntArrayCreate(output_rank); in Eval()
|
/external/tensorflow/tensorflow/compiler/xla/service/ |
D | gather_expander.cc | 254 int64 output_rank) { in PermuteBatchAndOffsetDims() argument 256 permutation.reserve(output_rank); in PermuteBatchAndOffsetDims() 259 int64 offset_idx_counter = output_rank - offset_dims.size(); in PermuteBatchAndOffsetDims() 260 for (int64 i = 0; i < output_rank; i++) { in PermuteBatchAndOffsetDims() 331 int64 output_rank = output_shape.dimensions_size(); in ExpandInstruction() local 378 output_rank); in ExpandInstruction()
|
D | instruction_fusion.cc | 202 int64 output_rank = 0; in EffectivelyAtMostUnary() local 205 [&output_rank](const Shape& subshape, const ShapeIndex& shape_index) { in EffectivelyAtMostUnary() 207 output_rank = std::max(output_rank, ShapeUtil::TrueRank(subshape)); in EffectivelyAtMostUnary() 211 hlo->operands(), [output_rank](HloInstruction* operand) { in EffectivelyAtMostUnary() 220 return ShapeUtil::TrueRank(operand->shape()) >= output_rank; in EffectivelyAtMostUnary()
|
D | hlo_evaluator.cc | 1303 const int64 output_rank = output_shape.rank(); in CheckParameters() local 1304 if (output_rank < fft_rank) { in CheckParameters() 1309 if (input_rank != output_rank) { in CheckParameters() 1452 int64 output_rank = output_shape.dimensions_size(); in IterationSpaceForOutputBatchIndices() local 1453 std::vector<int64> index_base(output_rank, 0); in IterationSpaceForOutputBatchIndices() 1455 index_count.reserve(output_rank); in IterationSpaceForOutputBatchIndices() 1456 for (int64 i = 0; i < output_rank; i++) { in IterationSpaceForOutputBatchIndices() 1463 std::vector<int64>(output_rank, 1)}; in IterationSpaceForOutputBatchIndices() 1469 int64 output_rank, absl::Span<const int64> slice_sizes, in IterationSpaceForOutputOffsetIndices() argument 1471 std::vector<int64> index_base(output_rank, 0); in IterationSpaceForOutputOffsetIndices() [all …]
|
D | shape_inference.cc | 2865 const int64 output_rank = output_shape.rank(); in InferBroadcastShape() local 2866 if (operand_rank > output_rank) { in InferBroadcastShape() 2870 operand_rank, output_rank); in InferBroadcastShape() 2879 if (broadcast_dimensions[i] < 0 || broadcast_dimensions[i] >= output_rank) { in InferBroadcastShape()
|
D | dynamic_dimension_inference.cc | 1223 int64 output_rank = hlo->shape().rank(); in HandleGather() local 1228 for (int64 output_dim = 0; output_dim < output_rank; ++output_dim) { in HandleGather()
|
/external/tensorflow/tensorflow/compiler/xla/client/lib/ |
D | matrix.cc | 380 const int64 output_rank = output_config.size(); in Einsum() local 483 std::vector<int64> transpose_dims(output_rank); in Einsum() 516 if (transpose_rank == output_rank) { in Einsum() 526 new_dims.reserve(output_rank); in Einsum()
|
/external/tensorflow/tensorflow/compiler/mlir/lite/ir/ |
D | tfl_ops.cc | 597 const int64_t output_rank = output_type.getRank(); in VerifyConcatenationOpTypes() local 600 SmallVector<int64_t, 4> result_dim_sizes_loc(output_rank, -1); in VerifyConcatenationOpTypes() 619 if (operand_rank != output_rank) in VerifyConcatenationOpTypes() 622 << output_rank << ", got " << operand_rank; in VerifyConcatenationOpTypes() 624 for (int64_t dim = 0; dim < output_rank; ++dim) { in VerifyConcatenationOpTypes() 1062 auto output_rank = outermost_dim + updates_type.getRank() - outer_dims; in Verify() local 1063 if (shape_type.getDimSize(0) != output_rank) { in Verify() 1065 << "shape must be a vector of length " << output_rank; in Verify() 1068 if (output_type.getRank() != output_rank) { in Verify() 1071 << output_rank; in Verify()
|
/external/tensorflow/tensorflow/compiler/xla/service/llvm_ir/ |
D | ir_array.cc | 355 int64 output_rank = shape.rank(); in SourceIndexOfBroadcast() local 357 int64 min_broadcasted_dimension = output_rank; in SourceIndexOfBroadcast() 383 for (int64 i = max_broadcasted_dimension + 1; i < output_rank; ++i) { in SourceIndexOfBroadcast()
|
/external/tensorflow/tensorflow/compiler/xla/ |
D | shape_util.cc | 1461 int64 output_rank = output_shape.rank(); in AlignLayouts() local 1484 for (int64 i = 0, j = 0; i < input_rank || j < output_rank;) { in AlignLayouts() 1492 j == output_rank) { in AlignLayouts() 1510 alignment.push_back({input_rank, output_rank}); in AlignLayouts() 1518 output_layout.reserve(output_rank); in AlignLayouts() 1556 CHECK_EQ(output_layout.size(), output_rank); in AlignLayouts()
|
/external/tensorflow/tensorflow/python/keras/ |
D | backend.py | 5035 output_rank = len(output.shape) 5037 output_rank = output.shape.ndims 5038 if output_rank is not None: 5039 axis %= output_rank 5040 if axis != output_rank - 1: 5042 itertools.chain(range(axis), range(axis + 1, output_rank), [axis])) 5056 target_rank is not None and output_rank is not None and 5057 target_rank != output_rank - 1) 5070 if update_shape and output_rank >= 3:
|
/external/tensorflow/tensorflow/compiler/mlir/tensorflow/ir/ |
D | tf_ops_n_z.cc | 3149 int64_t output_rank = max_rank_ty.getRank(); in inferReturnTypes() local 3150 llvm::SmallVector<int64_t, 4> broadcast_shape(output_rank, 1LL); in inferReturnTypes() 3151 llvm::SmallVector<bool, 4> is_broadcasted(output_rank, false); in inferReturnTypes() 3155 if (dim < 0 || dim > output_rank) { in inferReturnTypes()
|
/external/tensorflow/tensorflow/compiler/mlir/tensorflow/transforms/ |
D | legalize_hlo.cc | 1195 int64_t output_rank = output.getType().cast<ShapedType>().getRank(); in IsTFStyleBroadcast() local 1198 output_rank - input_rank); in IsTFStyleBroadcast()
|
/external/tensorflow/tensorflow/compiler/xla/service/spmd/ |
D | dot_handler.cc | 341 int64 output_rank) { in ComputeDimensionIndexMapping() argument 346 std::vector<int64> output_to_lhs_indices(output_rank, -1); in ComputeDimensionIndexMapping() 347 std::vector<int64> output_to_rhs_indices(output_rank, -1); in ComputeDimensionIndexMapping()
|
/external/llvm-project/mlir/include/mlir/Dialect/Linalg/IR/ |
D | LinalgStructuredOps.td | 364 // * "image" dimensions (xs with #xs = #zs = output_rank - #bs - #ks)
|
/external/tensorflow/tensorflow/core/grappler/optimizers/ |
D | constant_folding.cc | 765 const int output_rank = in MaterializeReductionIndices() local 768 bool full_reduction = output_rank == 0 || num_reduction_indices == input_rank; in MaterializeReductionIndices()
|
/external/tensorflow/tensorflow/compiler/xla/client/ |
D | xla_builder.cc | 2144 const int64 output_rank = output_shape.rank(); in Map() local 2150 if (rank != output_rank) { in Map()
|