/external/tensorflow/tensorflow/core/kernels/ |
D | reshape_util.cc | 51 const int64 output_rank = target_shape_in.NumElements(); in Reshape() local 62 for (int d = 0; d < output_rank; ++d) { in Reshape() 117 gtl::InlinedVector<int64, 8> output_strides(output_rank); in Reshape() 118 if (output_rank > 0) { in Reshape() 119 output_strides[output_rank - 1] = 1; in Reshape() 120 for (int d = output_rank - 2; d >= 0; --d) { in Reshape() 128 TensorShape({nnz, output_rank}), in Reshape() 137 for (int j = 0; j < output_rank; ++j) { in Reshape() 145 TensorShape({output_rank}), in Reshape()
|
D | deserialize_sparse_variant_op.cc | 155 const int output_rank = output_shape->NumElements(); in Compute() local 158 0, {static_cast<int64>(total_non_zeros), output_rank}, in Compute() 177 i, output_rank - input_dims_to_stack, in Compute() 205 if (input_dims_to_stack == 1 && output_rank == 2) { in Compute() 239 for (size_t k = input_dims_to_stack; k < output_rank; ++k) { in Compute()
|
/external/tensorflow/tensorflow/contrib/layers/python/layers/ |
D | feature_column_ops.py | 42 def _maybe_reshape_input_tensor(tensor, column_name, output_rank): argument 69 if output_rank > input_rank + 1: 75 column_name, input_rank, output_rank)) 76 elif output_rank == input_rank + 1: 88 elif output_rank < input_rank: 89 return layers._inner_flatten(tensor, output_rank) # pylint: disable=protected-access 99 output_rank, argument 121 if output_rank == 3: 126 output_rank=output_rank), transformed_tensor) 137 output_rank=output_rank)) [all …]
|
D | feature_column.py | 254 output_rank=2): argument 428 output_rank=2): argument 824 output_rank=2): argument 968 output_rank=2): argument 989 sparse_id_column = layers._inner_flatten(sparse_id_column, output_rank) 1018 return math_ops.reduce_sum(one_hot_id_tensor, axis=[output_rank - 1]) 1197 output_rank=2): argument 1215 input_tensor = layers._inner_flatten(args.input_tensor, output_rank) 1218 weight_tensor = layers._inner_flatten(args.weight_tensor, output_rank) 1631 def _reshape_real_valued_tensor(input_tensor, output_rank, column_name=None): argument [all …]
|
D | feature_column_test.py | 339 for output_rank in range(1, len(id_tensor_shape) + 1): 340 with variable_scope.variable_scope("output_rank_{}".format(output_rank)): 342 id_tensor, output_rank=output_rank) 345 expected_shape = (id_tensor_shape[:output_rank - 1] + [vocab_size]) 366 output_rank = len(id_tensor_shape) 375 output_rank=output_rank) 601 for output_rank in range(1, 3 + len(dimensions)): 602 with variable_scope.variable_scope("output_rank_{}".format(output_rank)): 605 output_rank=output_rank) 609 input_shape[:output_rank - 1] + [all …]
|
/external/tensorflow/tensorflow/core/ops/ |
D | set_ops.cc | 53 DimensionHandle output_rank; in __anon967df2c80102() local 74 output_rank = c->MakeDim(input0_rank); in __anon967df2c80102() 79 output_rank = c->MakeDim(c->Rank(input1_shape)); in __anon967df2c80102() 81 output_rank = c->UnknownDim(); in __anon967df2c80102() 85 c->set_output(0, c->Matrix(c->UnknownDim(), output_rank)); in __anon967df2c80102() 87 c->set_output(2, c->Vector(output_rank)); in __anon967df2c80102()
|
/external/tensorflow/tensorflow/compiler/xla/service/ |
D | gather_expander.cc | 254 int64 output_rank) { in PermuteBatchAndOffsetDims() argument 256 permutation.reserve(output_rank); in PermuteBatchAndOffsetDims() 259 int64 offset_idx_counter = output_rank - offset_dims.size(); in PermuteBatchAndOffsetDims() 260 for (int64 i = 0; i < output_rank; i++) { in PermuteBatchAndOffsetDims() 316 int64 output_rank = output_shape.dimensions_size(); in ExpandInstruction() local 369 output_rank); in ExpandInstruction()
|
D | instruction_fusion.cc | 173 int64 output_rank = 0; in EffectivelyAtMostUnary() local 176 [&output_rank](const Shape& subshape, const ShapeIndex& shape_index) { in EffectivelyAtMostUnary() 178 output_rank = std::max(output_rank, ShapeUtil::TrueRank(subshape)); in EffectivelyAtMostUnary() 182 hlo->operands(), [output_rank](HloInstruction* operand) { in EffectivelyAtMostUnary() 191 return ShapeUtil::TrueRank(operand->shape()) >= output_rank; in EffectivelyAtMostUnary()
|
D | hlo_evaluator.cc | 783 int64 output_rank = output_shape.dimensions_size(); in IterationSpaceForOutputBatchIndices() local 784 std::vector<int64> index_base(output_rank, 0); in IterationSpaceForOutputBatchIndices() 786 index_count.reserve(output_rank); in IterationSpaceForOutputBatchIndices() 787 for (int64 i = 0; i < output_rank; i++) { in IterationSpaceForOutputBatchIndices() 794 std::vector<int64>(output_rank, 1)}; in IterationSpaceForOutputBatchIndices() 800 int64 output_rank, absl::Span<const int64> slice_sizes, in IterationSpaceForOutputOffsetIndices() argument 802 std::vector<int64> index_base(output_rank, 0); in IterationSpaceForOutputOffsetIndices() 803 std::vector<int64> index_count(output_rank, 1); in IterationSpaceForOutputOffsetIndices() 805 for (int64 i = 0; i < output_rank; i++) { in IterationSpaceForOutputOffsetIndices() 818 std::vector<int64>(output_rank, 1)}; in IterationSpaceForOutputOffsetIndices()
|
D | shape_inference.cc | 2631 const int64 output_rank = output_shape.rank(); in InferBroadcastShape() local 2632 if (operand_rank > output_rank) { in InferBroadcastShape() 2636 operand_rank, output_rank); in InferBroadcastShape() 2645 if (broadcast_dimensions[i] < 0 || broadcast_dimensions[i] >= output_rank) { in InferBroadcastShape()
|
/external/tensorflow/tensorflow/compiler/xla/client/lib/ |
D | matrix.cc | 169 const int64 output_rank = output_config.size(); in Einsum() local 232 output_dims.reserve(output_rank); in Einsum() 243 std::vector<int64> transpose_dims(output_rank); in Einsum() 244 for (int64 i = 0; i < output_rank; ++i) { in Einsum()
|
/external/tensorflow/tensorflow/lite/kernels/ |
D | gather_nd.cc | 84 const int output_rank = indices_rank + params_rank - indices_nd - 1; in Prepare() local 85 TfLiteIntArray* output_shape = TfLiteIntArrayCreate(output_rank); in Prepare()
|
D | embedding_lookup_sparse.cc | 154 const int output_rank = (lookup_rank - 1) + (embedding_rank - 1); in Eval() local 161 TfLiteIntArray* output_shape = TfLiteIntArrayCreate(output_rank); in Eval()
|
/external/tensorflow/tensorflow/compiler/xla/ |
D | shape_util.cc | 1303 int64 output_rank = output_shape.rank(); in AlignLayouts() local 1326 for (int64 i = 0, j = 0; i < input_rank || j < output_rank;) { in AlignLayouts() 1334 j == output_rank) { in AlignLayouts() 1352 alignment.push_back({input_rank, output_rank}); in AlignLayouts() 1360 output_layout.reserve(output_rank); in AlignLayouts() 1398 CHECK_EQ(output_layout.size(), output_rank); in AlignLayouts()
|
/external/tensorflow/tensorflow/compiler/xla/service/llvm_ir/ |
D | ir_array.cc | 268 int64 output_rank = shape.rank(); in SourceIndexOfBroadcast() local 270 int64 min_broadcasted_dimension = output_rank; in SourceIndexOfBroadcast() 296 for (int64 i = max_broadcasted_dimension + 1; i < output_rank; ++i) { in SourceIndexOfBroadcast()
|
/external/tensorflow/tensorflow/core/grappler/optimizers/ |
D | constant_folding.cc | 710 const int output_rank = in MaterializeReductionIndices() local 713 bool full_reduction = output_rank == 0 || num_reduction_indices == input_rank; in MaterializeReductionIndices()
|
/external/tensorflow/tensorflow/compiler/xla/client/ |
D | xla_builder.cc | 1759 const int64 output_rank = output_shape.rank(); in Map() local 1765 if (rank != output_rank) { in Map()
|