/external/tensorflow/tensorflow/python/ops/ragged/ |
D | ragged_tensor_shape_test.py | 97 dict(dim_sizes=[], rank=0, expected_dim_sizes=[]), 98 dict(dim_sizes=[], rank=3, expected_dim_sizes=[1, 1, 1]), 99 dict(dim_sizes=[3], rank=1, expected_dim_sizes=[3]), 100 dict(dim_sizes=[3], rank=3, expected_dim_sizes=[1, 1, 3]), 101 dict(dim_sizes=[2, 3], rank=3, expected_dim_sizes=[1, 2, 3]), 102 dict(dim_sizes=[3, [3, 2, 4]], rank=2, expected_dim_sizes=[3, [3, 2, 4]]), 104 dim_sizes=[3, [3, 2, 4]], 108 dim_sizes=[3, [3, 2, 4], 2, 3], 112 def testBroadcastToRank(self, dim_sizes, rank, expected_dim_sizes): argument 113 shape = RaggedTensorDynamicShape.from_dim_sizes(dim_sizes) [all …]
|
D | ragged_tensor_shape.py | 132 def from_dim_sizes(dim_sizes): argument 146 [dim_sizes]): 147 dim_sizes = tuple( 149 size, dtype=dtypes.int64, name='dim_sizes') for size in dim_sizes) 152 for dim, dim_size in enumerate(dim_sizes): 157 return RaggedTensorDynamicShape(dim_sizes[:inner_split], 158 dim_sizes[inner_split:])
|
/external/tensorflow/tensorflow/core/util/ |
D | tensor_format.h | 457 return GetTensorDim(gtl::ArraySlice<int64>(tensor_shape.dim_sizes()), in GetTensorDim() 466 return GetFilterDim(gtl::ArraySlice<int64>(tensor_shape.dim_sizes()), in GetFilterDim() 512 gtl::InlinedVector<int64, 6> dim_sizes(dims); in ShapeFromFormat() 513 dim_sizes[GetTensorBatchDimIndex(dims, format)] = N; in ShapeFromFormat() 521 dim_sizes[GetTensorInnerWidthDimIndex(dims, format)] = 4; in ShapeFromFormat() 524 dim_sizes[GetTensorSpatialDimIndex(dims, format, dim)] = dim_size; in ShapeFromFormat() 532 dim_sizes[GetTensorInnerFeatureDimIndex(dims, format)] = 4; in ShapeFromFormat() 534 dim_sizes[feature_index] = C; in ShapeFromFormat() 535 return TensorShape(dim_sizes); in ShapeFromFormat() 546 gtl::InlinedVector<int64, 6> dim_sizes(dims); in ShapeFromFilterTensorFormat() [all …]
|
/external/tensorflow/tensorflow/compiler/tf2xla/kernels/ |
D | const_op.cc | 55 shape.dim_sizes())); in Compile() 63 shape.dim_sizes())); in Compile() 71 shape.dim_sizes())); in Compile() 82 shape.dim_sizes())); in Compile() 93 shape.dim_sizes())); in Compile() 101 shape.dim_sizes())); in Compile() 109 shape.dim_sizes())); in Compile()
|
D | select_op.cc | 68 const auto dim_sizes = then_shape.dim_sizes(); in Compile() local 69 absl::Span<const int64> bdims = dim_sizes; in Compile()
|
D | tensor_list_ops.cc | 94 list_shape.dim_sizes()); in CreateZerosList() 181 ctx->SetOutput(0, xla::ConstantR1<int64>(b, shape.dim_sizes())); in Compile() 185 for (int64 s : shape.dim_sizes()) { in Compile() 227 auto slice_shape = shape.dim_sizes(); in Compile() 283 absl::Span<const int64>(tensor_shape.dim_sizes()).subspan(1)), in Compile() 368 auto update = xla::Reshape(value, slice_shape.dim_sizes()); in Compile() 411 auto update = xla::Reshape(value, slice_shape.dim_sizes()); in Compile() 453 auto slice_shape = shape.dim_sizes(); in Compile()
|
D | split_op.cc | 180 auto dim_sizes = input_shape.dim_sizes(); in Compile() local 181 std::vector<int64> limits(dim_sizes.begin(), dim_sizes.end()); in Compile()
|
D | diag_op.cc | 88 auto dims = input_shape.dim_sizes(); in Compile() 127 auto dims = input_shape.dim_sizes(); in Compile() 172 auto dims = input_shape.dim_sizes(); in Compile() 198 auto dims = input_shape.dim_sizes(); in Compile()
|
D | index_ops_cpu.cc | 95 &b, xla::LiteralUtil::CreateR1<int64>(input_shape.dim_sizes()))); in Compile() 100 &b, xla::LiteralUtil::CreateR1<int64>(output_shape.dim_sizes()))); in Compile() 107 xla::S64, output_shape.dim_sizes()); in Compile()
|
D | relu_op.cc | 77 xla::Broadcast(XlaHelpers::Zero(b, input_type(0)), shape.dim_sizes()); in Compile() 93 xla::Broadcast(XlaHelpers::Zero(b, input_type(0)), shape.dim_sizes()); in Compile() 95 XlaHelpers::IntegerLiteral(b, input_type(0), 6), shape.dim_sizes()); in Compile()
|
D | shape_op.cc | 136 auto existing_dims = input_shape.dim_sizes(); in Compile() 170 auto existing_dims = input_shape.dim_sizes(); in Compile() 229 ctx->SetOutput(0, xla::Broadcast(zero, input_shape.dim_sizes())); in Compile() 243 ctx->SetOutput(0, xla::Broadcast(one, input_shape.dim_sizes())); in Compile()
|
D | clip_by_value_op.cc | 48 min = xla::Broadcast(min, shape.dim_sizes()); in Compile() 52 max = xla::Broadcast(max, shape.dim_sizes()); in Compile()
|
D | tensor_array_ops.cc | 166 value = xla::Broadcast(zero, ta_shape.dim_sizes()); in Compile() 221 auto update = xla::Reshape(value, slice_shape.dim_sizes()); in Compile() 225 written = DynamicAddSlice(b, ta, update, slice_shape.dim_sizes(), in Compile() 270 auto slice_shape = ta_shape.dim_sizes(); in Compile() 405 auto slice_dims = value_shape.dim_sizes(); in Compile() 409 auto value_ends = value_shape.dim_sizes(); in Compile() 461 auto ta_dims = ta_shape.dim_sizes(); in Compile() 533 const xla::XlaOp reshape = xla::Reshape(value, ta_shape.dim_sizes()); in Compile()
|
D | aggregate_ops.cc | 51 ctx, sum_shape.dim_sizes() == operand_shape.dim_sizes(), in Compile()
|
D | resampler_ops.cc | 59 auto warp_dims = warp_shape.dim_sizes(); in BilinearWeights() 256 auto warp_dims = warp_shape.dim_sizes(); in CalculateGradData() 363 auto warp_dims = warp_shape.dim_sizes(); in CalculateGradWarp() 499 for (int size : warp_shape.dim_sizes()) { in Compile() 579 auto warp_dims = warp_shape.dim_sizes(); in Compile() 620 for (int size : warp_shape.dim_sizes()) { in Compile() 665 auto warp_dims = warp_shape.dim_sizes(); in Compile()
|
D | cwise_ops.cc | 81 Computation(ctx, lhs_handle, lhs_shape.dim_sizes(), rhs_handle, in Compile() 82 rhs_shape.dim_sizes(), bcast, extend_dimension); in Compile()
|
D | matrix_set_diag_op.cc | 69 indicator = xla::Broadcast(indicator, batch_shape.dim_sizes()); in Compile() 78 diag = xla::Add(diag, xla::Broadcast(zero, input_shape.dim_sizes()), in Compile()
|
D | matrix_band_part_op.cc | 82 indicator = xla::Broadcast(indicator, batch_shape.dim_sizes()); in Compile() 86 indicator, input, xla::Broadcast(zero_input, input_shape.dim_sizes())); in Compile()
|
/external/tensorflow/tensorflow/core/framework/ |
D | tensor_shape.cc | 136 TensorShapeBase<Shape>::TensorShapeBase(gtl::ArraySlice<int64> dim_sizes) { in TensorShapeBase() argument 139 InitDims(dim_sizes); in TensorShapeBase() 159 void TensorShapeBase<Shape>::InitDims(gtl::ArraySlice<int64> dim_sizes) { in InitDims() argument 168 for (auto s : dim_sizes) { in InitDims() 178 switch (dim_sizes.size()) { in InitDims() 181 const int64 size = dim_sizes[0]; in InitDims() 188 const int64 size0 = dim_sizes[0]; in InitDims() 189 const int64 size1 = dim_sizes[1]; in InitDims() 197 const int64 size0 = dim_sizes[0]; in InitDims() 198 const int64 size1 = dim_sizes[1]; in InitDims() [all …]
|
D | tensor_shape_test.cc | 220 explicit TensorShapeOld(gtl::ArraySlice<int64> dim_sizes); 221 TensorShapeOld(std::initializer_list<int64> dim_sizes) in TensorShapeOld() argument 222 : TensorShapeOld(gtl::ArraySlice<int64>(dim_sizes)) {} in TensorShapeOld() 276 gtl::ArraySlice<int64> dim_sizes() const { return dim_sizes_; } in dim_sizes() function in tensorflow::__anone850e9110111::TensorShapeOld 390 TensorShapeOld::TensorShapeOld(gtl::ArraySlice<int64> dim_sizes) { in TensorShapeOld() argument 391 dim_sizes_.reserve(dim_sizes.size()); in TensorShapeOld() 393 for (auto s : dim_sizes) { in TensorShapeOld()
|
D | tensor.h | 750 CHECK_EQ(sizeof(T), shape_.dim_sizes()[NDIMS] * DataTypeSize(dtype())); in reinterpret_last_dimension() 753 dims[d] = shape_.dim_sizes()[d]; in reinterpret_last_dimension() 766 CHECK_EQ(sizeof(T), shape_.dim_sizes()[NDIMS] * DataTypeSize(dtype())); in reinterpret_last_dimension() 769 dims[d] = shape_.dim_sizes()[d]; in reinterpret_last_dimension() 880 return shaped<T, NDIMS>(ComputeFlatInnerDims(shape_.dim_sizes(), NDIMS)); in flat_inner_dims() 885 return shaped<T, NDIMS>(ComputeFlatOuterDims(shape_.dim_sizes(), NDIMS)); in flat_outer_dims() 891 ComputeFlatOuterDims(shape_.dim_sizes(), begin + NDIMS); in flat_inner_outer_dims() 897 return shaped<T, NDIMS>(ComputeFlatInnerDims(shape_.dim_sizes(), NDIMS)); in flat_inner_dims() 902 return shaped<T, NDIMS>(ComputeFlatOuterDims(shape_.dim_sizes(), NDIMS)); in flat_outer_dims() 909 ComputeFlatOuterDims(shape_.dim_sizes(), begin + NDIMS); in flat_inner_outer_dims()
|
D | tensor_shape.h | 170 explicit TensorShapeBase(gtl::ArraySlice<int64> dim_sizes); 171 TensorShapeBase(std::initializer_list<int64> dim_sizes) in TensorShapeBase() argument 172 : TensorShapeBase(gtl::ArraySlice<int64>(dim_sizes)) {} in TensorShapeBase() 244 gtl::InlinedVector<int64, 4> dim_sizes() const; 259 void InitDims(gtl::ArraySlice<int64> dim_sizes);
|
/external/tensorflow/tensorflow/compiler/xla/service/cpu/ |
D | shape_partition_test.cc | 169 std::vector<int64> dim_sizes(num_outer_dims_to_partition); in TEST_F() local 175 dim_sizes[i] = shape.dimensions(dimension); in TEST_F() 176 total_dim_size *= dim_sizes[i]; in TEST_F() 178 const int64 dim_partition_count = 1 + Rand() % dim_sizes[i]; in TEST_F() 202 EXPECT_EQ(expected_index, dim_sizes[i]); in TEST_F()
|
/external/tensorflow/tensorflow/compiler/tf2xla/ |
D | xla_resource.cc | 132 xla::Broadcast(XlaHelpers::Zero(builder, type_), shape_.dim_sizes()); in SetZeroValue() 140 ta_shape.dim_sizes()); in SetZeroValue() 149 ta_shape.dim_sizes()), in SetZeroValue() 173 xla::Broadcast(XlaHelpers::Zero(builder, type_), ta_shape.dim_sizes()); in GetOrCreateTensorArrayGradient()
|
D | xla_helpers.cc | 103 xla::Broadcast(on_value, output_shape.dim_sizes()), in OneHot() 104 xla::Broadcast(off_value, output_shape.dim_sizes())); in OneHot()
|