/external/tensorflow/tensorflow/core/api_def/base_api/ |
D | api_def_SpaceToBatchND.pbtxt | 39 input according to `paddings` to produce `padded` of shape `padded_shape`. 44 [padded_shape[1] / block_shape[0], 47 padded_shape[M] / block_shape[M-1], 56 [padded_shape[1] / block_shape[0], 58 padded_shape[M] / block_shape[M-1]] + 65 [padded_shape[1] / block_shape[0], 67 padded_shape[M] / block_shape[M-1]] +
|
/external/tensorflow/tensorflow/compiler/tf2xla/kernels/ |
D | spacetobatch_op.cc | 55 std::vector<int64_t> padded_shape(input_shape.begin(), input_shape.end()); in SpaceToBatch() local 70 padded_shape[1 + i] += pad_start + pad_end; in SpaceToBatch() 106 OP_REQUIRES(ctx, padded_shape[1 + i] % block_shape[i] == 0, in SpaceToBatch() 108 "]=", padded_shape[1 + i], in SpaceToBatch() 112 reshaped_padded_shape[1 + i * 2] = padded_shape[1 + i] / block_shape[i]; in SpaceToBatch() 153 output_shape[1 + i] = padded_shape[1 + i] / block_shape[i]; in SpaceToBatch()
|
/external/tensorflow/tensorflow/core/kernels/data/ |
D | padded_batch_dataset_op.cc | 289 const PartialTensorShape& padded_shape = in CopyBatch() local 292 for (int dim = 0; dim < padded_shape.dims(); ++dim) { in CopyBatch() 293 if (padded_shape.dim_size(dim) == -1) { in CopyBatch() 296 batch_component_shape.AddDim(padded_shape.dim_size(dim)); in CopyBatch() 305 if (element_shape.dims() != padded_shape.dims()) { in CopyBatch() 309 component_index, ": expected rank ", padded_shape.dims(), in CopyBatch() 312 for (int dim = 0; dim < padded_shape.dims(); ++dim) { in CopyBatch() 313 if (padded_shape.dim_size(dim) == -1) { in CopyBatch() 451 PartialTensorShape padded_shape; in MakeDataset() local 454 padded_shape_t.NumElements(), &padded_shape)); in MakeDataset() [all …]
|
D | padded_batch_dataset_op_test.cc | 54 for (auto& padded_shape : padded_shapes_) { in GetInputTensors() local 55 input_tensors.emplace_back(padded_shape); in GetInputTensors()
|
/external/tensorflow/tensorflow/lite/toco/graph_transformations/ |
D | resolve_constant_slice.cc | 66 Shape padded_shape = input_array.shape(); in Slice() local 67 while (padded_shape.dimensions_count() < 4) { in Slice() 68 padded_shape.mutable_dims()->insert(padded_shape.mutable_dims()->begin(), in Slice() 78 input_data[Offset(padded_shape, {in_b, in_h, in_w, in_d})]; in Slice()
|
/external/ComputeLibrary/src/runtime/NEON/functions/ |
D | NEPadLayer.cpp | 181 …const TensorShape padded_shape = misc::shape_calculator::compute_padded_shape(input->info()->tenso… in configure() local 183 auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(padded_shape)); in configure() 217 …const TensorShape padded_shape = misc::shape_calculator::compute_padded_shape(input->tensor_shape(… in validate() local 221 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), padded_shape); in validate()
|
/external/ComputeLibrary/tests/validation/reference/ |
D | PadLayer.cpp | 52 …const TensorShape padded_shape = misc::shape_calculator::compute_padded_shape(orig_shape, paddings… in pad_layer() local 54 SimpleTensor<T> dst(padded_shape, dst_data_type); in pad_layer() 60 const Coordinates coord = index2coord(padded_shape, idx); in pad_layer()
|
/external/tensorflow/tensorflow/core/kernels/ |
D | conv_ops_fused_image_transform.cc | 710 TensorShape padded_shape; in Compute() local 739 padded_shape.AddDim(before + resized_shape.dim_size(d) + after); in Compute() 762 OP_REQUIRES(context, padded_shape.dims() == 4, in Compute() 764 padded_shape.DebugString())); in Compute() 780 const int64_t in_depth = padded_shape.dim_size(3); in Compute() 791 const int64_t padded_rows_raw = padded_shape.dim_size(1); in Compute() 802 const int64_t padded_cols_raw = padded_shape.dim_size(2); in Compute() 812 const int64_t batch_raw = padded_shape.dim_size(0); in Compute()
|
/external/tensorflow/tensorflow/python/data/kernel_tests/ |
D | padded_batch_test.py | 375 padded_shape = [-1] 379 padded_shapes=(padded_shape, padded_shape), 413 padded_shape = [-1] 417 padded_shapes=(padded_shape, padded_shape),
|
/external/tensorflow/tensorflow/compiler/xla/service/spmd/ |
D | spmd_partitioner_util.h | 174 HloInstruction* PadToShape(HloInstruction* hlo, const Shape& padded_shape, T* b, 176 if (ShapeUtil::Compatible(hlo->shape(), padded_shape)) { 180 for (int64_t i = 0; i < padded_shape.rank(); ++i) { 184 padding_config_dim->set_edge_padding_high(padded_shape.dimensions(i) - 193 HloInstruction::CreatePad(padded_shape, hlo, padding, padding_config));
|
D | dot_handler.cc | 1149 auto padded_shape = padded_slice_operand->shape(); in PartitionBaseCase() local 1151 padded_shape.set_dimensions(padding_dim, 2); in PartitionBaseCase() 1162 padded_shape, padded_slice_operand, min, ccw_pad_config)); in PartitionBaseCase() 1173 padded_shape, padded_slice_operand, min, cw_pad_config)); in PartitionBaseCase() 4016 Shape padded_shape = user_gte->shape(); in MoveUsersIntoWindowedDotGeneralLoopOnNonContractingDimensions() local 4137 padded_shape.dimensions(operand_dim)); in MoveUsersIntoWindowedDotGeneralLoopOnNonContractingDimensions() 4194 motion_clusters.back().slice_offsets.reserve(padded_shape.rank()); in MoveUsersIntoWindowedDotGeneralLoopOnNonContractingDimensions() 4195 for (int64_t i = 0; i < padded_shape.rank(); ++i) { in MoveUsersIntoWindowedDotGeneralLoopOnNonContractingDimensions() 4244 i, padded_shape.dimensions(broadcast->dimensions(i))); in MoveUsersIntoWindowedDotGeneralLoopOnNonContractingDimensions() 4251 ShapeUtil::ChangeElementType(padded_shape, in MoveUsersIntoWindowedDotGeneralLoopOnNonContractingDimensions() [all …]
|
D | spmd_partitioner.cc | 683 Shape padded_shape = base_shape_; in ReshardAsWindowedInput() local 866 padded_shape.set_dimensions( in ReshardAsWindowedInput() 968 auto pad_hlo_shape = padded_shape; in ReshardAsWindowedInput() 983 padding_config_dim->set_edge_padding_high(padded_shape.dimensions(i) - in ReshardAsWindowedInput() 1109 padded_shape.dimensions(dim), shard_shape.dimensions(dim), dim, in ReshardAsWindowedInput() 1539 auto padded_shape = hlo_->shape(); in ReshardWithAllToAll() local 1540 padded_shape.set_dimensions( in ReshardWithAllToAll() 1541 target_dim, RoundUpTo(padded_shape.dimensions(target_dim), in ReshardWithAllToAll() 1543 auto padded_hlo = PadToShape(hlo_, padded_shape, state_.b); in ReshardWithAllToAll()
|
D | spmd_partitioner_util.cc | 529 auto padded_shape = result->shape(); in PadFromPartialReplicateShape() local 534 padded_shape.set_dimensions(dim, result->shape().dimensions(dim) + in PadFromPartialReplicateShape() 539 HloInstruction::CreatePad(padded_shape, result, zero, pad_config)); in PadFromPartialReplicateShape()
|
/external/tensorflow/tensorflow/compiler/xla/service/gpu/ |
D | tree_reduction_rewriter.cc | 148 Shape padded_shape = in RewriteReduction() local 150 VLOG(3) << "Generated padded shape: " << padded_shape.ToString(); in RewriteReduction() 152 HloInstruction::CreatePad(padded_shape, in, in RewriteReduction()
|
/external/ComputeLibrary/src/cpu/kernels/ |
D | CpuCopyKernel.cpp | 70 … const TensorShape padded_shape = misc::shape_calculator::compute_padded_shape(src_shape, padding); in validate_and_configure_window_with_padding() local 71 auto_init_if_empty(*dst, src->clone()->set_tensor_shape(padded_shape)); in validate_and_configure_window_with_padding()
|
/external/ComputeLibrary/src/core/CL/kernels/ |
D | CLPadLayerKernel.cpp | 57 …TensorShape padded_shape = misc::shape_calculator::compute_padded_shape(input->tensor_shape(), pad… in validate_arguments() local 60 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), padded_shape); in validate_arguments()
|
/external/tensorflow/tensorflow/compiler/mlir/tensorflow/transforms/ |
D | lower_tf.cc | 907 auto padded_shape = llvm::to_vector<4>(input_shape); in matchAndRewrite() local 916 padded_shape[i + 1] = (paddings_sum + input_shape[i + 1]); in matchAndRewrite() 921 padded_shape[i + 1] = ShapedType::kDynamicSize; in matchAndRewrite() 926 auto padded_type = RankedTensorType::get(padded_shape, element_type); in matchAndRewrite() 976 auto padded_shape_i = padded_shape[1 + i]; in matchAndRewrite() 988 SmallVector<int64_t, 6> reshaped_shape_ints{padded_shape[0]}; in matchAndRewrite() 998 reshaped_shape_ints.push_back(padded_shape[i]); in matchAndRewrite()
|
/external/ComputeLibrary/arm_compute/core/utils/misc/ |
D | ShapeCalculator.h | 1237 TensorShape padded_shape = input_shape; in compute_padded_shape() local 1241 … const uint32_t shape_on_index = (padded_shape.num_dimensions() <= dim) ? 1 : input_shape[dim]; in compute_padded_shape() 1242 padded_shape.set(dim, padding_pair.first + shape_on_index + padding_pair.second); in compute_padded_shape() 1244 return padded_shape; in compute_padded_shape()
|
/external/tensorflow/tensorflow/compiler/mlir/tosa/transforms/ |
D | legalize_common.cc | 781 SmallVector<int64_t> padded_shape(input_rank); in convertSpaceToBatchNDOp() local 789 padded_shape[0] = input_shape[0]; in convertSpaceToBatchNDOp() 807 padded_shape[i + 1] = input_shape[i + 1]; in convertSpaceToBatchNDOp() 808 if (!ShapedType::isDynamic(padded_shape[i + 1])) { in convertSpaceToBatchNDOp() 811 padded_shape[i + 1] += lo_pad + hi_pad; in convertSpaceToBatchNDOp() 819 padded_shape[i + block_rank + 1] = input_shape[i + block_rank + 1]; in convertSpaceToBatchNDOp() 833 RankedTensorType::get(padded_shape, result_type.getElementType()), in convertSpaceToBatchNDOp() 850 a2_shape[1 + i * 2 + 0] = padded_shape[1 + i]; in convertSpaceToBatchNDOp() 920 a4_reshape_shape[i + 1] = padded_shape[i + 1]; in convertSpaceToBatchNDOp()
|
/external/tensorflow/tensorflow/python/data/ops/ |
D | dataset_ops.py | 5186 def _is_padded_shape_compatible_with(padded_shape, input_component_shape): argument 5198 if padded_shape.dims is None or input_component_shape.dims is None: 5200 if len(padded_shape.dims) != len(input_component_shape.dims): 5203 padded_shape.dims, input_component_shape.dims): 5210 def _padded_shape_to_tensor(padded_shape, input_component_shape): argument 5229 padded_shape_as_shape = tensor_shape.as_shape(padded_shape) 5239 ret = ops.convert_to_tensor(padded_shape, preferred_dtype=dtypes.int64) 5340 for input_component_shape, padded_shape in zip( 5343 _padded_shape_to_tensor(padded_shape, input_component_shape))
|
/external/tensorflow/tensorflow/compiler/xla/service/ |
D | dynamic_padder.cc | 905 Shape padded_shape = input->shape(); in RewriteInputWithDynamicPadding() local 941 padded_shape.mutable_dimensions()[shape_dim] = in RewriteInputWithDynamicPadding() 943 window_util::DilatedBound(padded_shape.dimensions(shape_dim), in RewriteInputWithDynamicPadding() 956 padded_shape, pad, start_indices, padded_shape.dimensions())); in RewriteInputWithDynamicPadding()
|
/external/tensorflow/tensorflow/python/tpu/ |
D | tpu.py | 1098 padded_shape = padded_shapes[idx] 1101 if any(need_padding[idx]) and padded_shape is not None: 1115 for i, s in enumerate(padded_shape.dims):
|
/external/tensorflow/tensorflow/compiler/xla/stream_executor/tpu/ |
D | tpu_executor_c_api.h | 390 XLA_Shape* padded_shape,
|
/external/tensorflow/tensorflow/compiler/mlir/tosa/g3doc/ |
D | legalization.md | 740 // [batch + padded_shape[1] / block_shape[0], block_shape[0], ... 741 // padded_shape[M] / block_shape[M-1], block_shape[M-1]] + 760 // [padded_shape[1] / block_shape[0], 762 // [padded_shape[M] / block_shape[M-1]] + 782 // [ padded_shape[1] / block_shape[0], 784 // padded_shape[M] / block_shape[M-1]] +
|
/external/tensorflow/tensorflow/python/ops/ |
D | image_ops_impl.py | 1140 padded_shape = [ 1144 padded.set_shape(padded_shape)
|