/external/tensorflow/tensorflow/compiler/xla/service/gpu/ |
D | infeed_thunk.cc | 49 const Shape& source_shape = in ExecuteOnStream() local 51 TF_RET_CHECK(ShapeUtil::Equal(dest_slices_[index].shape, source_shape)) in ExecuteOnStream() 53 << ShapeUtil::HumanStringWithLayout(source_shape) in ExecuteOnStream()
|
D | ir_emitter_unnested.cc | 2232 const Shape source_shape = in EmitSelectAndScatterFromMlir() local 2239 source_shape, ir_emitter_context_->gpu_device_info()); in EmitSelectAndScatterFromMlir() 2426 return ParallelLoopEmitter(loop_body_emitter, source_shape, launch_dimensions, in EmitSelectAndScatterFromMlir()
|
/external/tensorflow/tensorflow/compiler/xla/tests/ |
D | select_and_scatter_test.cc | 43 std::vector<int64> source_shape; member 78 auto source_shape = GetParam().source_shape; in XLA_TEST_P() local 79 Array<float> s(source_shape); in XLA_TEST_P()
|
/external/tensorflow/tensorflow/compiler/xla/service/ |
D | indexed_array_analysis.cc | 476 const Shape& source_shape = operand->source()->shape(); in ReshapeToRemoveDegenerateDims() local 478 for (int64 i = 0, e = source_shape.dimensions_size(); i < e; i++) { in ReshapeToRemoveDegenerateDims() 479 if (i == operand->source_dim() || source_shape.dimensions(i) != 1) { in ReshapeToRemoveDegenerateDims() 480 new_source_shape_dims.push_back(source_shape.dimensions(i)); in ReshapeToRemoveDegenerateDims() 511 std::count(source_shape.dimensions().begin(), in ReshapeToRemoveDegenerateDims() 512 source_shape.dimensions().begin() + operand->source_dim(), 1); in ReshapeToRemoveDegenerateDims()
|
D | hlo_sharding_util.cc | 297 absl::optional<HloSharding> ReshapeSharding(const Shape& source_shape, in ReshapeSharding() argument 314 const Shape tile_shape = sharding.TileShape(source_shape); in ReshapeSharding() 316 std::vector<int64> source_dims_stack(source_shape.rank()); in ReshapeSharding() 318 std::vector<int64> sharding_tile_dims_stack(source_shape.rank()); in ReshapeSharding() 319 for (int64 i = 0; i < source_shape.rank(); ++i) { in ReshapeSharding() 320 source_dims_stack[i] = source_shape.dimensions(source_shape.rank() - 1 - i); in ReshapeSharding() 322 sharding.tile_assignment().dim(source_shape.rank() - 1 - i); in ReshapeSharding()
|
D | hlo_sharding_util.h | 89 absl::optional<HloSharding> ReshapeSharding(const Shape& source_shape,
|
D | shape_inference.h | 185 const Window& window, const Shape& source_shape,
|
D | shape_inference.cc | 2245 const Window& window, const Shape& source_shape, in InferSelectAndScatterShape() argument 2283 {source_shape.element_type()}, in InferSelectAndScatterShape() 2291 if (!ShapeUtil::CompatibleIgnoringFpPrecision(source_shape, in InferSelectAndScatterShape() 2296 ShapeUtil::HumanString(source_shape), in InferSelectAndScatterShape()
|
D | dynamic_dimension_inference_test.cc | 990 auto source_shape = ShapeUtil::MakeShape(F32, {2, 2, 2}); in TEST_F() local 1018 /*parameter_number=*/2, source_shape, "B")); in TEST_F()
|
/external/tensorflow/tensorflow/python/eager/ |
D | backprop.py | 1294 source_shape = array_ops.shape(source) 1300 [check_ops.assert_equal(batch_size, source_shape[0])]): 1329 new_shape = array_ops.concat([target_shape, source_shape[1:]], axis=0)
|
/external/tensorflow/tensorflow/compiler/xla/ |
D | literal.cc | 119 const Shape& source_shape, const Shape& dest_shape, in StrideConfig() argument 127 if (dimensions[LayoutUtil::Minor(source_shape.layout(), 0)] >= in StrideConfig() 129 minor_dimension = LayoutUtil::Minor(source_shape.layout(), 0); in StrideConfig() 134 IndexUtil::GetDimensionStride(source_shape, minor_dimension); in StrideConfig()
|
D | literal.h | 736 StrideConfig(const Shape& source_shape, const Shape& dest_shape,
|
/external/tensorflow/tensorflow/compiler/xla/client/ |
D | xla_builder.cc | 3080 TF_ASSIGN_OR_RETURN(const Shape* source_shape, GetShapePtr(source)); in SelectAndScatterInternal() 3093 *source_shape, *init_shape, scatter_shape)); in SelectAndScatterInternal()
|