/external/tensorflow/tensorflow/core/api_def/base_api/ |
D | api_def_SpaceToBatchND.pbtxt | 27 input according to `paddings` to produce `padded` of shape `padded_shape`. 32 [padded_shape[1] / block_shape[0], 35 padded_shape[M] / block_shape[M-1], 44 [padded_shape[1] / block_shape[0], 46 padded_shape[M] / block_shape[M-1]] + 53 [padded_shape[1] / block_shape[0], 55 padded_shape[M] / block_shape[M-1]] +
|
/external/tensorflow/tensorflow/compiler/tf2xla/kernels/ |
D | spacetobatch_op.cc | 54 std::vector<int64> padded_shape(input_shape.begin(), input_shape.end()); in SpaceToBatch() local 65 padded_shape[1 + i] += pad_start + pad_end; in SpaceToBatch() 92 OP_REQUIRES(ctx, padded_shape[1 + i] % block_shape[i] == 0, in SpaceToBatch() 94 "]=", padded_shape[1 + i], in SpaceToBatch() 98 reshaped_padded_shape[1 + i * 2] = padded_shape[1 + i] / block_shape[i]; in SpaceToBatch() 139 output_shape[1 + i] = padded_shape[1 + i] / block_shape[i]; in SpaceToBatch()
|
/external/tensorflow/tensorflow/lite/toco/graph_transformations/ |
D | resolve_constant_slice.cc | 66 Shape padded_shape = input_array.shape(); in Slice() local 67 while (padded_shape.dimensions_count() < 4) { in Slice() 68 padded_shape.mutable_dims()->insert(padded_shape.mutable_dims()->begin(), in Slice() 78 input_data[Offset(padded_shape, {in_b, in_h, in_w, in_d})]; in Slice()
|
/external/tensorflow/tensorflow/core/kernels/data/ |
D | padded_batch_dataset_op.cc | 246 const PartialTensorShape& padded_shape = in GetNextInternal() local 249 for (int dim = 0; dim < padded_shape.dims(); ++dim) { in GetNextInternal() 250 if (padded_shape.dim_size(dim) == -1) { in GetNextInternal() 253 batch_component_shape.AddDim(padded_shape.dim_size(dim)); in GetNextInternal() 262 if (element_shape.dims() != padded_shape.dims()) { in GetNextInternal() 266 component_index, ": expected rank ", padded_shape.dims(), in GetNextInternal() 269 for (int dim = 0; dim < padded_shape.dims(); ++dim) { in GetNextInternal() 270 if (padded_shape.dim_size(dim) == -1) { in GetNextInternal() 428 PartialTensorShape padded_shape; in MakeDataset() local 431 padded_shape_t.NumElements(), &padded_shape)); in MakeDataset() [all …]
|
D | padded_batch_dataset_op_test.cc | 54 for (auto& padded_shape : padded_shapes_) { in GetInputTensors() local 55 input_tensors.emplace_back(padded_shape); in GetInputTensors()
|
/external/tensorflow/tensorflow/python/data/experimental/kernel_tests/serialization/ |
D | padded_batch_dataset_serialization_test.py | 56 padded_shape = [-1] 60 padded_shapes=(padded_shape, padded_shape),
|
/external/tensorflow/tensorflow/compiler/xla/service/gpu/ |
D | tree_reduction_rewriter.cc | 131 Shape padded_shape = in RewriteReduction() local 133 VLOG(3) << "Generated padded shape: " << padded_shape.ToString(); in RewriteReduction() 135 padded_shape, input, initial_value, padding_config)); in RewriteReduction()
|
/external/tensorflow/tensorflow/core/kernels/ |
D | conv_ops_fused_image_transform.cc | 709 TensorShape padded_shape; in Compute() local 738 padded_shape.AddDim(before + resized_shape.dim_size(d) + after); in Compute() 761 OP_REQUIRES(context, padded_shape.dims() == 4, in Compute() 763 padded_shape.DebugString())); in Compute() 779 const int64 in_depth = padded_shape.dim_size(3); in Compute() 790 const int64 padded_rows_raw = padded_shape.dim_size(1); in Compute() 801 const int64 padded_cols_raw = padded_shape.dim_size(2); in Compute() 811 const int64 batch_raw = padded_shape.dim_size(0); in Compute()
|
/external/tensorflow/tensorflow/compiler/mlir/tensorflow/transforms/ |
D | lower_tf.cc | 876 auto padded_shape = llvm::to_vector<4>(input_shape); in matchAndRewrite() local 885 padded_shape[i + 1] = (paddings_sum + input_shape[i + 1]); in matchAndRewrite() 890 padded_shape[i + 1] = ShapedType::kDynamicSize; in matchAndRewrite() 896 RankedTensorType::get(padded_shape, rewriter.getF32Type()); in matchAndRewrite() 946 auto padded_shape_i = padded_shape[1 + i]; in matchAndRewrite() 958 SmallVector<int64_t, 6> reshaped_shape_ints{padded_shape[0]}; in matchAndRewrite() 968 reshaped_shape_ints.push_back(padded_shape[i]); in matchAndRewrite()
|
/external/tensorflow/tensorflow/python/data/ops/ |
D | dataset_ops.py | 4083 def _is_padded_shape_compatible_with(padded_shape, input_component_shape): argument 4095 if padded_shape.dims is None or input_component_shape.dims is None: 4097 if len(padded_shape.dims) != len(input_component_shape.dims): 4100 padded_shape.dims, input_component_shape.dims): 4107 def _padded_shape_to_tensor(padded_shape, input_component_shape): argument 4126 padded_shape_as_shape = tensor_shape.as_shape(padded_shape) 4136 ret = ops.convert_to_tensor(padded_shape, preferred_dtype=dtypes.int64) 4140 "shape was %s." % (padded_shape, ret.shape)), sys.exc_info()[2]) 4146 "its element type was %s." % (padded_shape, ret.dtype.name)), 4233 for input_component_shape, padded_shape in zip( [all …]
|
/external/tensorflow/tensorflow/compiler/xla/service/spmd/ |
D | spmd_partitioner_util.cc | 250 HloInstruction* PadToShape(HloInstruction* hlo, const Shape& padded_shape, in PadToShape() argument 253 if (ShapeUtil::Compatible(hlo->shape(), padded_shape)) { in PadToShape() 257 for (int64 i = 0; i < padded_shape.rank(); ++i) { in PadToShape() 261 padding_config_dim->set_edge_padding_high(padded_shape.dimensions(i) - in PadToShape() 273 HloInstruction::CreatePad(padded_shape, hlo, zero, padding_config)); in PadToShape() 613 auto padded_shape = result->shape(); in PadFromPartialReplicateShape() local 618 padded_shape.set_dimensions(dim, result->shape().dimensions(dim) + in PadFromPartialReplicateShape() 623 HloInstruction::CreatePad(padded_shape, result, zero, pad_config)); in PadFromPartialReplicateShape()
|
D | dot_handler.cc | 799 auto padded_shape = padded_slice_operand->shape(); in PartitionBaseCase() local 801 padded_shape.set_dimensions(padding_dim, 2); in PartitionBaseCase() 812 padded_shape, padded_slice_operand, min, ccw_pad_config)); in PartitionBaseCase() 823 padded_shape, padded_slice_operand, min, cw_pad_config)); in PartitionBaseCase() 3062 Shape padded_shape = user_gte->shape(); in MoveUsersIntoWindowedDotGeneralLoopOnNonContractingDimensions() local 3158 padded_shape.dimensions(operand_dim)); in MoveUsersIntoWindowedDotGeneralLoopOnNonContractingDimensions() 3218 std::vector<HloInstruction*> slice_offsets(padded_shape.rank()); in MoveUsersIntoWindowedDotGeneralLoopOnNonContractingDimensions() 3233 i, padded_shape.dimensions(broadcast->dimensions(i))); in MoveUsersIntoWindowedDotGeneralLoopOnNonContractingDimensions() 3239 ShapeUtil::ChangeElementType(padded_shape, in MoveUsersIntoWindowedDotGeneralLoopOnNonContractingDimensions() 3246 ShapeUtil::ChangeElementType(padded_shape, in MoveUsersIntoWindowedDotGeneralLoopOnNonContractingDimensions() [all …]
|
D | spmd_partitioner.cc | 578 auto padded_shape = base_shape_; in ReshardAsWindowedInput() local 639 padded_shape.set_dimensions( in ReshardAsWindowedInput() 751 padding_config_dim->set_edge_padding_high(padded_shape.dimensions(i) - in ReshardAsWindowedInput() 755 auto padded_hlo = ShapeUtil::Compatible(padded_shape, base_shape_) in ReshardAsWindowedInput() 758 padded_shape, hlo_, pad_value, padding_config)); in ReshardAsWindowedInput() 807 padded_shape.dimensions(dim), shard_shape.dimensions(dim), dim, target, in ReshardAsWindowedInput() 1142 auto padded_shape = hlo_->shape(); in ReshardWithAllToAll() local 1143 padded_shape.set_dimensions( in ReshardWithAllToAll() 1145 RoundUpToNearest(padded_shape.dimensions(target_dim), in ReshardWithAllToAll() 1147 auto padded_hlo = PadToShape(hlo_, padded_shape, state_.b); in ReshardWithAllToAll()
|
D | spmd_partitioner_util.h | 116 HloInstruction* PadToShape(HloInstruction* hlo, const Shape& padded_shape,
|
/external/tensorflow/tensorflow/compiler/xla/service/ |
D | dynamic_padder.cc | 752 Shape padded_shape = input->shape(); in RewriteInputWithDynamicPadding() local 787 padded_shape.mutable_dimensions()[shape_dim] = in RewriteInputWithDynamicPadding() 789 window_util::DilatedBound(padded_shape.dimensions(shape_dim), in RewriteInputWithDynamicPadding() 802 padded_shape, pad, start_indices, padded_shape.dimensions())); in RewriteInputWithDynamicPadding()
|
/external/tensorflow/tensorflow/compiler/mlir/tosa/transforms/ |
D | legalize_common.cc | 843 SmallVector<int64_t, 2> padded_shape(input_rank); in convertSpaceToBatchNDOp() local 851 padded_shape[0] = input_shape[0]; in convertSpaceToBatchNDOp() 872 padded_shape[i + 1] = input_shape[i + 1] + lo_pad + hi_pad; in convertSpaceToBatchNDOp() 879 padded_shape[i + block_rank + 1] = input_shape[i + block_rank + 1]; in convertSpaceToBatchNDOp() 893 RankedTensorType::get(ArrayRef<int64_t>(padded_shape), in convertSpaceToBatchNDOp() 914 a2_shape[1 + i * 2 + 0] = padded_shape[1 + i] / block_shape_val; in convertSpaceToBatchNDOp() 983 a4_reshape_shape[i + 1] = padded_shape[i + 1] / block_shape_val; in convertSpaceToBatchNDOp()
|
/external/tensorflow/tensorflow/python/tpu/ |
D | tpu.py | 1064 padded_shape = padded_shapes[idx] 1067 if any(need_padding[idx]) and padded_shape is not None: 1081 for i, s in enumerate(padded_shape.dims):
|
/external/tensorflow/tensorflow/stream_executor/tpu/ |
D | tpu_executor_c_api.h | 342 XLA_Shape* padded_shape,
|
/external/tensorflow/tensorflow/compiler/mlir/tosa/g3doc/ |
D | legalization.md | 729 // [batch + padded_shape[1] / block_shape[0], block_shape[0], ... 730 // padded_shape[M] / block_shape[M-1], block_shape[M-1]] + 749 // [padded_shape[1] / block_shape[0], 751 // [padded_shape[M] / block_shape[M-1]] + 771 // [ padded_shape[1] / block_shape[0], 773 // padded_shape[M] / block_shape[M-1]] +
|
/external/tensorflow/tensorflow/python/ops/ |
D | image_ops_impl.py | 1092 padded_shape = [ 1096 padded.set_shape(padded_shape)
|
/external/tensorflow/tensorflow/compiler/mlir/tensorflow/ir/ |
D | tf_generated_ops.td | 13988 input according to `paddings` to produce `padded` of shape `padded_shape`. 13993 [padded_shape[1] / block_shape[0], 13996 padded_shape[M] / block_shape[M-1], 14005 [padded_shape[1] / block_shape[0], 14007 padded_shape[M] / block_shape[M-1]] + 14014 [padded_shape[1] / block_shape[0], 14016 padded_shape[M] / block_shape[M-1]] +
|