/external/tensorflow/tensorflow/python/ops/ragged/ |
D | ragged_tensor_shape_test.py | 90 dict(dim_sizes=[], rank=0, expected_dim_sizes=[]), 91 dict(dim_sizes=[], rank=3, expected_dim_sizes=[1, 1, 1]), 92 dict(dim_sizes=[3], rank=1, expected_dim_sizes=[3]), 93 dict(dim_sizes=[3], rank=3, expected_dim_sizes=[1, 1, 3]), 94 dict(dim_sizes=[2, 3], rank=3, expected_dim_sizes=[1, 2, 3]), 95 dict(dim_sizes=[3, [3, 2, 4]], rank=2, expected_dim_sizes=[3, [3, 2, 4]]), 97 dim_sizes=[3, [3, 2, 4]], 101 dim_sizes=[3, [3, 2, 4], 2, 3], 105 def testBroadcastToRank(self, dim_sizes, rank, expected_dim_sizes): argument 106 shape = RaggedTensorDynamicShape.from_dim_sizes(dim_sizes) [all …]
|
D | ragged_tensor_shape.py | 152 def from_dim_sizes(dim_sizes): argument 166 [dim_sizes]): 167 dim_sizes = tuple( 169 name='dim_sizes') for size in dim_sizes) 172 for dim, dim_size in enumerate(dim_sizes): 177 return RaggedTensorDynamicShape(dim_sizes[:inner_split], 178 dim_sizes[inner_split:])
|
/external/tensorflow/tensorflow/compiler/tf2xla/kernels/ |
D | const_op.cc | 55 shape.dim_sizes())); in Compile() 63 shape.dim_sizes())); in Compile() 71 shape.dim_sizes())); in Compile() 82 shape.dim_sizes())); in Compile() 93 shape.dim_sizes())); in Compile() 101 shape.dim_sizes())); in Compile() 109 shape.dim_sizes())); in Compile()
|
D | reshape_op.cc | 115 ctx->SetOutput(0, xla::Reshape(ctx->Input(0), shape.dim_sizes())); in Compile() 131 shape.dim_sizes(), dims_are_dynamic)); in Compile() 135 xla::CommonFactors(input_shape.dim_sizes(), shape.dim_sizes()); in Compile() 173 shape.dim_sizes(), dims_are_dynamic)); in Compile() 175 << " to " << xla::VectorString(shape.dim_sizes()) in Compile()
|
D | beta_op.cc | 67 auto a, BroadcastTo(ctx->Input(0), merged_shape.dim_sizes())); in Compile() 69 auto b, BroadcastTo(ctx->Input(1), merged_shape.dim_sizes())); in Compile() 71 auto x, BroadcastTo(ctx->Input(2), merged_shape.dim_sizes())); in Compile()
|
D | split_op.cc | 184 auto dim_sizes = input_shape.dim_sizes(); in Compile() local 185 std::vector<int64> limits(dim_sizes.begin(), dim_sizes.end()); in Compile()
|
D | clip_by_value_op.cc | 48 min = xla::Broadcast(min, shape.dim_sizes()); in Compile() 52 max = xla::Broadcast(max, shape.dim_sizes()); in Compile()
|
D | relu_op.cc | 83 xla::Broadcast(XlaHelpers::Zero(b, input_type(0)), shape.dim_sizes()); in Compile() 99 xla::Broadcast(XlaHelpers::Zero(b, input_type(0)), shape.dim_sizes()); in Compile() 101 XlaHelpers::IntegerLiteral(b, input_type(0), 6), shape.dim_sizes()); in Compile()
|
D | tensor_array_ops.cc | 166 value = xla::Broadcast(zero, ta_shape.dim_sizes()); in Compile() 221 auto update = xla::Reshape(value, slice_shape.dim_sizes()); in Compile() 225 written = DynamicAddSlice(b, ta, update, slice_shape.dim_sizes(), in Compile() 270 auto slice_shape = ta_shape.dim_sizes(); in Compile() 405 auto slice_dims = value_shape.dim_sizes(); in Compile() 409 auto value_ends = value_shape.dim_sizes(); in Compile() 461 auto ta_dims = ta_shape.dim_sizes(); in Compile() 533 const xla::XlaOp reshape = xla::Reshape(value, ta_shape.dim_sizes()); in Compile()
|
D | cwise_ops.cc | 80 Computation(ctx, lhs_handle, lhs_shape.dim_sizes(), rhs_handle, in Compile() 81 rhs_shape.dim_sizes(), bcast, extend_dimension); in Compile()
|
D | lower_upper_bound_ops.cc | 51 auto values_reshaped = xla::Reshape(values, new_values_shape.dim_sizes()); in BuildLowerUpperBoundOp() 58 xla::Reshape(sorted_inputs, new_sorted_inputs_shape.dim_sizes()); in BuildLowerUpperBoundOp()
|
D | resampler_ops.cc | 59 auto warp_dims = warp_shape.dim_sizes(); in BilinearWeights() 256 auto warp_dims = warp_shape.dim_sizes(); in CalculateGradData() 363 auto warp_dims = warp_shape.dim_sizes(); in CalculateGradWarp() 499 for (int size : warp_shape.dim_sizes()) { in Compile() 579 auto warp_dims = warp_shape.dim_sizes(); in Compile() 620 for (int size : warp_shape.dim_sizes()) { in Compile() 665 auto warp_dims = warp_shape.dim_sizes(); in Compile()
|
D | random_ops.cc | 343 BroadcastTo(ctx->Input(1), shape.dim_sizes())); in Compile() 345 BroadcastTo(ctx->Input(2), shape.dim_sizes())); in Compile() 347 BroadcastTo(ctx->Input(3), shape.dim_sizes())); in Compile() 349 BroadcastTo(ctx->Input(4), shape.dim_sizes())); in Compile()
|
/external/tensorflow/tensorflow/core/util/ |
D | tensor_format.h | 470 return GetTensorDim(gtl::ArraySlice<int64>(tensor_shape.dim_sizes()), in GetTensorDim() 479 return GetFilterDim(gtl::ArraySlice<int64>(tensor_shape.dim_sizes()), in GetFilterDim() 525 gtl::InlinedVector<int64, 6> dim_sizes(dims); in ShapeFromFormat() 526 dim_sizes[GetTensorBatchDimIndex(dims, format)] = N; in ShapeFromFormat() 534 dim_sizes[GetTensorInnerWidthDimIndex(dims, format)] = 4; in ShapeFromFormat() 537 dim_sizes[GetTensorSpatialDimIndex(dims, format, dim)] = dim_size; in ShapeFromFormat() 545 dim_sizes[GetTensorInnerFeatureDimIndex(dims, format)] = 4; in ShapeFromFormat() 547 dim_sizes[feature_index] = C; in ShapeFromFormat() 548 return TensorShape(dim_sizes); in ShapeFromFormat() 559 gtl::InlinedVector<int64, 6> dim_sizes(dims); in ShapeFromFilterTensorFormat() [all …]
|
/external/tensorflow/tensorflow/c/eager/ |
D | unified_api_test.cc | 93 int64 dim_sizes[] = {2, 4}; in TestTensorShape2x4() local 95 if (shape.dim_size(i) != dim_sizes[i]) { in TestTensorShape2x4() 97 dim_sizes[i], in TestTensorShape2x4() 122 int64_t dim_sizes[] = {2, 4}; in TEST_P() local 124 TestTensorHandleWithDimsFloat(ctx.get(), data, dim_sizes, 2, &x_raw); in TEST_P() 173 int64 dim_sizes[] = {2, -1}; in TEST_P() local 174 Status s = PartialTensorShape::MakePartialShape(dim_sizes, 2, &shape); in TEST_P()
|
/external/tensorflow/tensorflow/core/framework/ |
D | tensor_shape.cc | 65 for (auto d : dim_sizes()) { in IsValid() 184 TensorShapeBase<Shape>::TensorShapeBase(gtl::ArraySlice<int64> dim_sizes) { in TensorShapeBase() argument 187 TF_CHECK_OK(InitDims(dim_sizes)); in TensorShapeBase() 192 gtl::ArraySlice<int64> dim_sizes, TensorShapeBase* out) { in BuildTensorShapeBase() argument 195 return out->InitDims(dim_sizes); in BuildTensorShapeBase() 213 Status TensorShapeBase<Shape>::InitDims(gtl::ArraySlice<int64> dim_sizes) { in InitDims() argument 222 for (auto s : dim_sizes) { in InitDims() 230 for (auto s : dim_sizes) { in InitDims() 241 switch (dim_sizes.size()) { in InitDims() 244 const int64 size = dim_sizes[0]; in InitDims() [all …]
|
D | tensor_shape.h | 170 explicit TensorShapeBase(gtl::ArraySlice<int64> dim_sizes); 171 TensorShapeBase(std::initializer_list<int64> dim_sizes) in TensorShapeBase() argument 172 : TensorShapeBase(gtl::ArraySlice<int64>(dim_sizes)) {} in TensorShapeBase() 184 static Status BuildTensorShapeBase(gtl::ArraySlice<int64> dim_sizes, 186 static Status BuildTensorShapeBase(std::initializer_list<int64> dim_sizes, in BuildTensorShapeBase() argument 188 return BuildTensorShapeBase(gtl::ArraySlice<int64>(dim_sizes), out); in BuildTensorShapeBase() 305 gtl::InlinedVector<int64, 4> dim_sizes() const; 327 Status InitDims(gtl::ArraySlice<int64> dim_sizes);
|
D | tensor.h | 778 CHECK_EQ(sizeof(T), shape_.dim_sizes()[NDIMS] * DataTypeSize(dtype())); in reinterpret_last_dimension() 781 dims[d] = shape_.dim_sizes()[d]; in reinterpret_last_dimension() 794 CHECK_EQ(sizeof(T), shape_.dim_sizes()[NDIMS] * DataTypeSize(dtype())); in reinterpret_last_dimension() 797 dims[d] = shape_.dim_sizes()[d]; in reinterpret_last_dimension() 914 return shaped<T, NDIMS>(ComputeFlatInnerDims(shape_.dim_sizes(), NDIMS)); in flat_inner_dims() 919 return shaped<T, NDIMS>(ComputeFlatOuterDims(shape_.dim_sizes(), NDIMS)); in flat_outer_dims() 925 ComputeFlatOuterDims(shape_.dim_sizes(), begin + NDIMS); in flat_inner_outer_dims() 931 return shaped<T, NDIMS>(ComputeFlatInnerDims(shape_.dim_sizes(), NDIMS)); in flat_inner_dims() 936 return shaped<T, NDIMS>(ComputeFlatOuterDims(shape_.dim_sizes(), NDIMS)); in flat_outer_dims() 943 ComputeFlatOuterDims(shape_.dim_sizes(), begin + NDIMS); in flat_inner_outer_dims()
|
/external/tensorflow/tensorflow/python/lib/core/ |
D | py_seq_tensor.cc | 324 TFE_Context* ctx, absl::Span<const int64> dim_sizes) { in CreateTensor() 325 return tensorflow::unwrap(ctx)->CreateTensor(DT_INT64, dim_sizes); in CreateTensor() 364 TFE_Context* ctx, absl::Span<const int64> dim_sizes) { in CreateTensor() 365 return tensorflow::unwrap(ctx)->CreateTensor(DT_UINT64, dim_sizes); in CreateTensor() 401 TFE_Context* ctx, absl::Span<const int64> dim_sizes) { in CreateTensor() 402 return tensorflow::unwrap(ctx)->CreateTensor(DT_INT32, dim_sizes); in CreateTensor() 508 TFE_Context* ctx, absl::Span<const int64> dim_sizes) { in CreateTensor() 509 return tensorflow::unwrap(ctx)->CreateTensor(DT_FLOAT, dim_sizes); in CreateTensor() 524 TFE_Context* ctx, absl::Span<const int64> dim_sizes) { in CreateTensor() 525 return tensorflow::unwrap(ctx)->CreateTensor(DT_DOUBLE, dim_sizes); in CreateTensor() [all …]
|
/external/tensorflow/tensorflow/compiler/xla/service/cpu/ |
D | shape_partition_test.cc | 169 std::vector<int64> dim_sizes(num_outer_dims_to_partition); in TEST_F() local 175 dim_sizes[i] = shape.dimensions(dimension); in TEST_F() 176 total_dim_size *= dim_sizes[i]; in TEST_F() 178 const int64 dim_partition_count = 1 + Rand() % dim_sizes[i]; in TEST_F() 202 EXPECT_EQ(expected_index, dim_sizes[i]); in TEST_F()
|
/external/tensorflow/tensorflow/c/experimental/saved_model/core/ops/ |
D | variable_ops.cc | 49 gtl::InlinedVector<int64, 4> dim_sizes = shape.dim_sizes(); in CreateUninitializedResourceVariable() local 51 "shape", reinterpret_cast<const int64_t*>(dim_sizes.data()), in CreateUninitializedResourceVariable()
|
/external/tensorflow/tensorflow/compiler/tf2xla/ |
D | xla_resource.cc | 133 xla::Broadcast(XlaHelpers::Zero(builder, type_), shape_.dim_sizes()); in SetZeroValue() 141 ta_shape.dim_sizes()); in SetZeroValue() 150 ta_shape.dim_sizes()), in SetZeroValue() 174 xla::Broadcast(XlaHelpers::Zero(builder, type_), ta_shape.dim_sizes()); in GetOrCreateTensorArrayGradient()
|
/external/tensorflow/tensorflow/core/grappler/optimizers/ |
D | mkl_remapper_test.cc | 105 TensorShape(input_shape.shape_.dim_sizes())); in FuseConv2DWithBiasAndAddNOrAdd() 107 TensorShape(input_shape_addn.shape_.dim_sizes())); in FuseConv2DWithBiasAndAddNOrAdd() 109 TensorShape(filter_shape.shape_.dim_sizes())); in FuseConv2DWithBiasAndAddNOrAdd() 111 TensorShape(bias_shape.shape_.dim_sizes())); in FuseConv2DWithBiasAndAddNOrAdd() 475 TensorShape(input_shape.shape_.dim_sizes())); in TEST_F() 477 TensorShape(input_shape_add.shape_.dim_sizes())); in TEST_F() 479 TensorShape(filter_shape.shape_.dim_sizes())); in TEST_F() 481 TensorShape(bias_shape.shape_.dim_sizes())); in TEST_F()
|
/external/tensorflow/tensorflow/core/kernels/ |
D | cwise_op_select.cc | 152 BCastList<3> bcast({cond->shape().dim_sizes(), then->shape().dim_sizes(), in Compute() 153 else_->shape().dim_sizes()}, in Compute() 163 BCast cond_bcast(bcast.output_shape(), cond->shape().dim_sizes(), false); in Compute() 164 BCast then_bcast(bcast.output_shape(), then->shape().dim_sizes(), false); in Compute() 165 BCast else_bcast(bcast.output_shape(), else_->shape().dim_sizes(), false); in Compute()
|
/external/tensorflow/tensorflow/core/kernels/data/experimental/ |
D | rebatch_dataset_op.cc | 460 auto dim_sizes = dataset()->output_shapes()[i].dim_sizes(); in GetNextInternal() local 464 dim_sizes[0] = 0; in GetNextInternal() 468 for (int j = 1; j < dim_sizes.size(); ++j) { in GetNextInternal() 469 if (dim_sizes[j] == -1) dim_sizes[j] = 0; in GetNextInternal() 472 TensorShape tensor_shape(dim_sizes); in GetNextInternal()
|