/external/tensorflow/tensorflow/lite/delegates/gpu/metal/kernels/ |
D | reshape.cc | 135 return BHWC{batch, attr.new_shape.h, attr.new_shape.w, in Reshape() 136 attr.new_shape.c}; in Reshape() 161 const uint3 grid = uint3(attr.new_shape.w, attr.new_shape.h, in Reshape() 162 IntegralDivideRoundUp(attr.new_shape.c, 4)); in Reshape() 189 return BHWC{batch, attr.new_shape.h, attr.new_shape.w, in Reshapex4() 190 attr.new_shape.c}; in Reshapex4() 220 const uint3 grid = uint3(attr.new_shape.w, attr.new_shape.h, in Reshapex4() 221 IntegralDivideRoundUp(attr.new_shape.c, 4)); in Reshapex4()
|
D | reshape_test.mm | 60 attr.new_shape = output.shape; 82 attr.new_shape = output.shape; 104 attr.new_shape = output.shape; 126 attr.new_shape = output.shape;
|
/external/tensorflow/tensorflow/lite/testing/op_tests/ |
D | shape.py | 50 new_shape = tf.compat.v1.placeholder( 52 reshaped = tf.reshape(input_value, shape=new_shape) 54 return [input_value, new_shape], [out] 59 new_shape = np.array(parameters["new_shape"]) 60 return [input_value, new_shape], sess.run( 61 outputs, feed_dict=dict(zip(inputs, [input_value, new_shape])))
|
/external/tensorflow/tensorflow/compiler/xla/service/cpu/ |
D | cpu_layout_assignment.cc | 81 Shape new_shape(old_shape); in RowMajorShape() local 82 std::vector<int64> dimension_order(new_shape.dimensions_size()); in RowMajorShape() 84 *new_shape.mutable_layout() = LayoutUtil::MakeLayout(dimension_order); in RowMajorShape() 85 return new_shape; in RowMajorShape() 89 Shape new_shape(old_shape); in ColMajorShape() local 90 std::vector<int64> dimension_order(new_shape.dimensions_size()); in ColMajorShape() 92 *new_shape.mutable_layout() = LayoutUtil::MakeLayout(dimension_order); in ColMajorShape() 93 return new_shape; in ColMajorShape()
|
/external/tensorflow/tensorflow/core/api_def/base_api/ |
D | api_def_SparseReshape.pbtxt | 17 name: "new_shape" 33 SparseTensor. This is the same as `new_shape` but with any -1 dimensions 40 tensor. The `input_indices` are recomputed based on the requested `new_shape`. 42 If one component of `new_shape` is the special value -1, the size of that 44 most one component of `new_shape` can be -1. The number of dense elements 45 implied by `new_shape` must be the same as the number of dense elements 50 If the input tensor has rank `R_in` and `N` non-empty values, and `new_shape`
|
/external/tensorflow/tensorflow/lite/ |
D | string_util_test.cc | 57 auto new_shape = TfLiteIntArrayCreate(2); in TEST() local 58 new_shape->data[0] = 2; in TEST() 59 new_shape->data[1] = 1; in TEST() 60 buf0.WriteToTensor(t0, new_shape); in TEST() 147 auto new_shape = TfLiteIntArrayCreate(2); in TEST() local 148 new_shape->data[0] = 1; in TEST() 149 new_shape->data[1] = 2; in TEST() 151 buf.WriteToTensor(t0, new_shape); in TEST()
|
D | string_util.cc | 99 TfLiteIntArray* new_shape) { in WriteToTensor() argument 103 if (new_shape == nullptr) { in WriteToTensor() 104 new_shape = TfLiteIntArrayCopy(tensor->dims); in WriteToTensor() 108 TfLiteTensorReset(tensor->type, tensor->name, new_shape, tensor->params, in WriteToTensor()
|
/external/tensorflow/tensorflow/core/kernels/ |
D | shape_ops.h | 157 std::vector<int64> new_shape(existing_dims_size); in Compute() 158 for (size_t i = 0; i < new_shape.size(); ++i) { in Compute() 159 new_shape[i] = existing_dims[i]; in Compute() 170 new_shape.emplace(new_shape.begin() + dim, 1); in Compute() 171 const TensorShape output_shape(new_shape); in Compute() 202 std::vector<int64> new_shape; in Compute() local 233 new_shape.push_back(existing_dim); in Compute() 238 new_shape.push_back(existing_dim); in Compute() 243 const TensorShape output_shape(new_shape); in Compute()
|
/external/tensorflow/tensorflow/compiler/xla/service/gpu/ |
D | cudnn_pad_for_convolutions.cc | 46 const Shape& new_shape) { in PadInstruction() argument 57 if (shape.dimensions(dim) == new_shape.dimensions(dim)) { in PadInstruction() 60 CHECK_GT(new_shape.dimensions(dim), shape.dimensions(dim)); in PadInstruction() 62 new_shape.dimensions(dim) - shape.dimensions(dim)); in PadInstruction() 70 HloInstruction::CreatePad(new_shape, instr, zero, pad_config)); in PadInstruction() 245 const Shape& new_shape) { in TryResolvePaddedShapesForTensorCore() argument 247 int64 new_bytes = ShapeUtil::ByteSizeOf(new_shape); in TryResolvePaddedShapesForTensorCore() 255 << ShapeUtil::HumanString(new_shape) << ", a size increase of " in TryResolvePaddedShapesForTensorCore() 354 const Shape& new_shape) { in TryResolvePaddedShapesForIntegerConvolution() argument 356 int64 new_bytes = ShapeUtil::ByteSizeOf(new_shape); in TryResolvePaddedShapesForIntegerConvolution() [all …]
|
D | variadic_op_splitter.cc | 58 Shape new_shape = concat->shape(); in SplitConcatenate() local 66 new_shape.set_dimensions(concat->concatenate_dimension(), in SplitConcatenate() 69 new_shape, operands_span.subspan(offset, kMaxParameters))); in SplitConcatenate()
|
D | gpu_conv_rewriter.cc | 306 Shape new_shape = lhs_transpose->shape(); in MatchBackwardFilter() local 307 new_shape.DeleteDimension(input_feature_dimension); in MatchBackwardFilter() 308 new_shape.set_dimensions(input_feature_dimension, in MatchBackwardFilter() 311 HloInstruction::CreateReshape(new_shape, lhs_transpose)); in MatchBackwardFilter() 608 Shape new_shape = rhs->shape(); in MatchBackwardInput() local 609 new_shape.DeleteDimension(output_feature_dimension); in MatchBackwardInput() 610 new_shape.set_dimensions(output_feature_dimension, in MatchBackwardInput() 612 rhs = c->AddInstruction(HloInstruction::CreateReshape(new_shape, rhs)); in MatchBackwardInput()
|
/external/tensorflow/tensorflow/compiler/tf2xla/kernels/ |
D | shape_op.cc | 176 std::vector<int64> new_shape(existing_dims_size); in Compile() local 177 for (size_t i = 0; i < new_shape.size(); ++i) { in Compile() 178 new_shape[i] = existing_dims[i]; in Compile() 189 new_shape.emplace(new_shape.begin() + dim, 1); in Compile() 191 ctx->SetOutput(0, xla::Reshape(ctx->Input("input"), new_shape)); in Compile() 209 std::vector<int64> new_shape; in Compile() local 239 new_shape.push_back(existing_dim); in Compile() 244 new_shape.push_back(existing_dim); in Compile() 249 ctx->SetOutput(0, xla::Reshape(ctx->Input(0), new_shape)); in Compile()
|
D | dynamic_stitch_op.cc | 154 TensorShape new_shape; in Compile() local 156 new_shape.AddDim(indices[input_num].shape().dimensions(0)); in Compile() 159 new_shape.AddDim(data0_shape.dim_size(d)); in Compile() 163 if (new_shape == data_shapes[input_num]) { in Compile() 166 input[input_num] = xla::Reshape(handle, new_shape.dim_sizes()); in Compile()
|
/external/tensorflow/tensorflow/lite/delegates/gpu/common/ |
D | operations.cc | 509 BHWC new_shape = input[0]; in CalculateOutputShape() local 513 if (input[i].h != new_shape.h || input[i].w != new_shape.w) { in CalculateOutputShape() 518 new_shape.c += input[i].c; in CalculateOutputShape() 523 if (input[i].w != new_shape.w || input[i].c != new_shape.c) { in CalculateOutputShape() 528 new_shape.h += input[i].h; in CalculateOutputShape() 533 if (input[i].h != new_shape.h || input[i].c != new_shape.c) { in CalculateOutputShape() 538 new_shape.w += input[i].w; in CalculateOutputShape() 545 *output_shape = new_shape; in CalculateOutputShape() 614 return BHWC(input.b, attr.new_shape.h, attr.new_shape.w, input.c); in CalculateOutputShape() 618 return BHWDC(input.b, attr.new_shape.h, attr.new_shape.w, attr.new_shape.d, in CalculateOutputShape()
|
/external/tensorflow/tensorflow/lite/delegates/gpu/gl/kernels/ |
D | reshape_test.cc | 45 attr.new_shape = output.shape; 67 attr.new_shape = output.shape; 89 attr.new_shape = output.shape; 110 attr.new_shape = output.shape; in TEST()
|
D | resize_test.cc | 44 attr.new_shape = HW(2, 2); in TEST() 69 attr.new_shape = HW(1, 4); in TEST() 93 attr.new_shape = HW(4, 4); in TEST() 119 attr.new_shape = HW(2, 4); in TEST()
|
/external/tensorflow/tensorflow/python/ops/parallel_for/ |
D | gradients.py | 74 new_shape = array_ops.concat( 76 out = array_ops.reshape(out, new_shape) 146 new_shape = array_ops.concat([output_shape, inp_shape[1:]], axis=0) 147 return array_ops.reshape(output, new_shape)
|
/external/tensorflow/tensorflow/compiler/xla/service/ |
D | depthwise_convolution_converter.cc | 163 Shape new_shape = lhs->shape(); in HandleBackwardFilterBatchGroupConvolution() local 164 new_shape.DeleteDimension(input_feature_dimension); in HandleBackwardFilterBatchGroupConvolution() 165 new_shape.set_dimensions(input_feature_dimension, in HandleBackwardFilterBatchGroupConvolution() 167 lhs = add(HloInstruction::CreateReshape(new_shape, lhs)); in HandleBackwardFilterBatchGroupConvolution()
|
D | reshape_mover.cc | 142 const Shape new_shape = in UpdateOperand() local 150 HloInstruction::CreateReshape(new_shape, operand)); in UpdateOperand() 157 new_shape, operand, inverse_permutation)); in UpdateOperand() 164 operand->CloneWithNewOperands(new_shape, operand->operands())); in UpdateOperand() 174 operand->CloneWithNewOperands(new_shape, operand->operands())); in UpdateOperand()
|
/external/tensorflow/tensorflow/lite/delegates/gpu/cl/kernels/ |
D | resize_test.cc | 41 attr.new_shape = HW(4, 4); in TEST_F() 72 attr.new_shape = HW(4, 4); in TEST_F() 103 attr.new_shape = HW(2, 4); in TEST_F()
|
/external/tensorflow/tensorflow/python/kernel_tests/ |
D | sparse_ops_test.py | 359 new_shape = np.array([3, 6, 7], dtype=np.int64) 360 sp_output = sparse_ops.sparse_reset_shape(sp_input, new_shape) 366 new_shape = np.array([3, 6, 7], dtype=np.int64) 367 sp_output = sparse_ops.sparse_reset_shape(sp_input, new_shape) 379 new_shape = np.array([3, 6, 7], dtype=np.int64) 380 sp_output = sparse_ops.sparse_reset_shape(sp_input, new_shape) 393 new_shape = np.array([3, 6, 7], dtype=np.int64) 394 sp_output = sparse_ops.sparse_reset_shape(sp_input, new_shape) 430 new_shape = np.array([3, 7], dtype=np.int64) 433 sparse_ops.sparse_reset_shape(sp_input, new_shape) [all …]
|
/external/tensorflow/tensorflow/python/ops/ |
D | special_math_ops.py | 648 new_shape = ( 651 t0 = _reshape_if_necessary(t0, new_shape) 656 new_shape = ( 659 t1 = _reshape_if_necessary(t1, new_shape) 684 def _reshape_if_necessary(tensor, new_shape): argument 687 new_shape = tuple(-1 if x is None else x for x in new_shape) 689 if (len(new_shape) == len(cur_shape) and 691 for d0, d1 in zip(cur_shape, new_shape))): 694 return array_ops.reshape(tensor, new_shape)
|
D | random_grad.py | 29 new_shape = array_ops.concat( 32 return array_ops.reshape(x, new_shape)
|
/external/tensorflow/tensorflow/compiler/xla/ |
D | shape_util.cc | 239 Shape new_shape = MakeShapeWithDescendingLayout(shape.element_type(), dims); in MakeShapeWithDescendingLayoutAndSamePhysicalLayout() local 242 new_shape.mutable_layout()->mutable_tiles()->assign( in MakeShapeWithDescendingLayoutAndSamePhysicalLayout() 244 new_shape.mutable_layout()->set_element_size_in_bits( in MakeShapeWithDescendingLayoutAndSamePhysicalLayout() 247 new_shape.set_dynamic_dimension(i, shape.is_dynamic_dimension(i)); in MakeShapeWithDescendingLayoutAndSamePhysicalLayout() 249 return new_shape; in MakeShapeWithDescendingLayoutAndSamePhysicalLayout() 764 Shape new_shape = original; in ChangeElementType() local 765 new_shape.set_element_type(type); in ChangeElementType() 766 return new_shape; in ChangeElementType() 932 Shape new_shape = shape; in PermuteDimensions() local 933 new_shape.clear_dimensions(); in PermuteDimensions() [all …]
|
/external/tensorflow/tensorflow/compiler/mlir/lite/transforms/ |
D | prepare_tf.cc | 446 SmallVector<int64_t, 4> new_shape; in matchAndRewrite() local 450 new_shape.emplace_back(1); in matchAndRewrite() 452 new_shape.emplace_back(original_input_shape[index++]); in matchAndRewrite() 457 const int dim_size = new_shape.size(); in matchAndRewrite() 464 rewriter.getI32IntegerAttr(static_cast<int32_t>(new_shape[i])); in matchAndRewrite() 469 RankedTensorType::get(new_shape, original_input_type.getElementType()); in matchAndRewrite()
|