/external/tensorflow/tensorflow/compiler/xla/ |
D | shape_tree_test.cc | 54 ShapeTree<int> int_tree; in TEST_F() 57 ShapeTree<bool> bool_tree; in TEST_F() 63 ShapeTree<int> int_tree(shape); in TestShapeConstructor() 71 ShapeTree<bool> bool_tree(shape); in TestShapeConstructor() 89 ShapeTree<int> tree(shape, 42); in TestInitValueConstructor() 121 ShapeTree<int> shape_tree{ShapeUtil::MakeTupleShape({})}; in TEST_F() 126 ShapeTree<int> shape_tree{array_shape_}; in TEST_F() 135 ShapeTree<int> copy{shape_tree}; in TEST_F() 149 ShapeTree<int> shape_tree{tuple_shape_}; in TEST_F() 168 ShapeTree<int> copy{shape_tree}; in TEST_F() [all …]
|
D | shape_tree.h | 94 class ShapeTree { 100 ShapeTree() : ShapeTree(ShapeUtil::MakeNil()) {} in ShapeTree() function 108 explicit ShapeTree(Shape shape); 109 explicit ShapeTree(const Shape* shape); 110 explicit ShapeTree(const std::shared_ptr<Shape>& shape); 113 ShapeTree(Shape shape, const T& init_value); 114 ShapeTree(const Shape* shape, const T& init_value); 115 ShapeTree(const std::shared_ptr<Shape>& shape, const T& init_value); 141 ShapeTree(const ShapeTree&) = default; 142 ShapeTree& operator=(const ShapeTree&) = default; [all …]
|
/external/tensorflow/tensorflow/compiler/xla/service/ |
D | hlo_sharding_test.cc | 72 ShapeTree<HloSharding> shape_tree = in TEST_F() 142 ShapeTree<HloSharding> shape_tree = in TEST_F() 191 ShapeTree<HloSharding> shape_tree(ShapeUtil::MakeTupleShape({}), in TEST_F() 199 ShapeTree<HloSharding> shape_tree(ShapeUtil::MakeTupleShape({}), in TEST_F() 207 ShapeTree<HloSharding> shape_tree1( in TEST_F() 211 ShapeTree<HloSharding> shape_tree2( in TEST_F() 221 ShapeTree<HloSharding> shape_tree1( in TEST_F() 225 ShapeTree<HloSharding> shape_tree2( in TEST_F() 302 ShapeTree<HloSharding> sharding_tree(tuple_shape, HloSharding::Replicate()); in TEST_F()
|
D | shaped_buffer.h | 90 void set_buffers(ShapeTree<se::DeviceMemoryBase> buffers) { in set_buffers() 97 const ShapeTree<se::DeviceMemoryBase>& buffers() const { return buffers_; } in buffers() 98 ShapeTree<se::DeviceMemoryBase>& buffers() { return buffers_; } in buffers() 119 ShapeTree<se::DeviceMemoryBase> buffers_;
|
D | copy_insertion.cc | 130 const ShapeTree<bool>& indices_to_copy) { in DeepCopyAndAddControlEdges() 135 ShapeTree<HloInstruction*> from_copy_tree(from->shape(), in DeepCopyAndAddControlEdges() 141 ShapeTree<HloInstruction*> to_copy_tree(to->shape(), /*init_value=*/nullptr); in DeepCopyAndAddControlEdges() 170 ShapeTree<bool>* indices_to_copy) { in IndicesToCopyForWhile() 261 ShapeTree<bool> indices_to_copy(xla_while->shape()); in AddCopiesForWhile() 344 ShapeTree<bool> output_indices_to_copy(root->shape()); in AddCopiesForAliasedInputOutputs() 345 std::vector<absl::optional<ShapeTree<HloInstruction*>>> copied_parameters( in AddCopiesForAliasedInputOutputs() 350 ShapeTree<bool> param_indices_to_copy(param->shape()); in AddCopiesForAliasedInputOutputs() 374 ShapeTree<HloInstruction*> param_copy_tree(param->shape(), in AddCopiesForAliasedInputOutputs() 391 ShapeTree<HloInstruction*> output_copy_tree(root->shape(), in AddCopiesForAliasedInputOutputs() [all …]
|
D | hlo_sharding_metadata.cc | 172 StatusOr<ShapeTree<HloSharding>> GetShardingTreeFromUser( in GetShardingTreeFromUser() 206 ShapeTree<HloSharding>* lhs_tree, ShapeTree<HloSharding>::iterator lhs_it, in AssignTreeSharding() 207 const ShapeTree<HloSharding>& rhs_tree) { in AssignTreeSharding() 252 ShapeTree<HloSharding> sharding_tree( in ApplyShardingFromUsers() 267 TF_ASSIGN_OR_RETURN(ShapeTree<HloSharding> user_sharding_tree, in ApplyShardingFromUsers() 275 ShapeTree<HloSharding>::iterator sharding_tree_begin = in ApplyShardingFromUsers()
|
D | hlo_liveness_analysis.cc | 53 void ForEachLiveIndex(const ShapeTree<bool>& index_tree, in ForEachLiveIndex() 125 const ShapeTree<bool>& index_tree = FindOrDie(*live_index_map, instruction); in PropagateLivenessThroughTuple() 155 const ShapeTree<bool>& index_tree = FindOrDie(*live_index_map, instruction); in PropagateLivenessThroughGTE() 174 const ShapeTree<bool>& index_tree = FindOrDie(*live_index_map, instruction); in PropagateLivenessThroughWhile() 205 const ShapeTree<bool>& index_tree = in PropagateLivenessToParameterCallers()
|
D | hlo_sharding.h | 63 static HloSharding Tuple(const ShapeTree<HloSharding>& sub_shardings); 167 StatusOr<ShapeTree<HloSharding>> AsShapeTree(const Shape& shape) const; 168 ShapeTree<HloSharding> GetAsShapeTree(const Shape& shape) const { in GetAsShapeTree()
|
D | hlo_sharding.cc | 42 HloSharding HloSharding::Tuple(const ShapeTree<HloSharding>& sub_shardings) { in Tuple() 216 StatusOr<ShapeTree<HloSharding>> HloSharding::AsShapeTree( in AsShapeTree() 219 ShapeTree<HloSharding> result(shape, HloSharding::Replicate()); in AsShapeTree() 232 return ShapeTree<HloSharding>(shape, *this); in AsShapeTree() 241 return Tuple(ShapeTree<HloSharding>(shape, *this)); in GetTupleSharding() 280 ShapeTree<HloSharding> shape_tree = GetAsShapeTree(shape); in ValidateTuple()
|
D | hlo_computation_test.cc | 305 ShapeTree<bool> indices_to_copy(constant->shape(), /*init_value=*/true); in TEST_F() 313 ShapeTree<bool> indices_to_copy(constant->shape(), /*init_value=*/false); in TEST_F() 334 ShapeTree<bool> indices_to_copy(tuple->shape(), /*init_value=*/true); in TEST_F() 335 ShapeTree<HloInstruction*> copies_added(tuple->shape(), in TEST_F() 351 ShapeTree<bool> indices_to_copy(tuple->shape(), /*init_value=*/false); in TEST_F() 352 ShapeTree<HloInstruction*> copies_added(tuple->shape(), in TEST_F() 368 ShapeTree<bool> indices_to_copy(tuple->shape(), /*init_value=*/false); in TEST_F() 370 ShapeTree<HloInstruction*> copies_added(tuple->shape(), in TEST_F()
|
D | executable.h | 104 std::vector<ShapeTree<xla::MaybeOwningDeviceMemory>> arguments, in ExecuteOnStream() 112 std::vector<ShapeTree<xla::MaybeOwningDeviceMemory>> arguments) { in ExecuteAsyncOnStream()
|
D | hlo_value.h | 240 class InstructionValueSet : public ShapeTree<HloValueSet> { 242 InstructionValueSet(const Shape& shape) : ShapeTree<HloValueSet>(shape) {} in InstructionValueSet()
|
D | hlo_liveness_analysis.h | 42 std::unordered_map<const HloInstruction*, ShapeTree<bool>>;
|
/external/tensorflow/tensorflow/compiler/xla/service/gpu/ |
D | gpu_transfer_manager.cc | 57 ShapeTree<InfeedBuffer> buffer_tree(shape); in TransferLiteralToInfeed() 75 se::StreamExecutor* executor, ShapeTree<InfeedBuffer> buffers) { in EnqueueBuffersToInfeed() 121 ShapeTree<std::unique_ptr<gpu::OutfeedBuffer>>* shape_tree) { in ShapeTreeToLiteral() 125 ShapeTree<std::unique_ptr<gpu::OutfeedBuffer>>* shape_tree, in ShapeTreeToLiteral() 150 ShapeTree<std::unique_ptr<gpu::OutfeedBuffer>> outfeed_buffers( in TransferLiteralFromOutfeed()
|
D | outfeed_thunk.h | 35 OutfeedThunk(ShapeTree<BufferAllocation::Slice> outfeed_slices, 46 const ShapeTree<BufferAllocation::Slice> outfeed_slices_;
|
D | infeed_thunk.h | 37 InfeedThunk(const ShapeTree<BufferAllocation::Slice>& infeed_slices, 48 const ShapeTree<BufferAllocation::Slice> infeed_slices_;
|
D | outfeed_thunk.cc | 26 OutfeedThunk::OutfeedThunk(ShapeTree<BufferAllocation::Slice> outfeed_slices, in OutfeedThunk() 38 ShapeTree<std::unique_ptr<OutfeedBuffer>>* outfeed_buffers = in ExecuteOnStream()
|
D | infeed_thunk.cc | 26 const ShapeTree<BufferAllocation::Slice>& infeed_slices, in InfeedThunk() 36 ShapeTree<InfeedBuffer> infeed_buffers = in ExecuteOnStream()
|
D | outfeed_manager.h | 58 using OutfeedManager = XfeedQueue<ShapeTree<std::unique_ptr<OutfeedBuffer>>*>;
|
D | gpu_transfer_manager.h | 56 ShapeTree<InfeedBuffer> buffers);
|
D | infeed_manager.h | 64 class InfeedManager : public XfeedQueue<ShapeTree<InfeedBuffer>> {
|
D | hlo_to_ir_bindings.cc | 186 InsertOrDie(&base_ptrs_, &hlo, ShapeTree<llvm::Value*>(hlo_shape, nullptr)); in BindHloToIrValue() 281 const ShapeTree<llvm::Value*>& shape_tree = it->second; in ToString()
|
/external/tensorflow/tensorflow/compiler/xrt/ |
D | xrt_state.h | 129 const xla::ShapeTree<ExpandedTupleInput>& elements, 189 xla::ShapeTree<xla::MaybeOwningDeviceMemory> ToDeviceMemoryTree( 212 const xla::ShapeTree<ExpandedTupleInput>& elements, int device_ordinal, 227 xla::ShapeTree<XRTBufferAllocation*> buffers_;
|
D | xrt_state.cc | 340 const xla::ShapeTree<ExpandedTupleInput>& elements, int device_ordinal, in ExpandTreeOfTuples() 382 const xla::ShapeTree<ExpandedTupleInput>& elements, in MakeTuple() 540 xla::ShapeTree<xla::MaybeOwningDeviceMemory> 543 xla::ShapeTree<xla::MaybeOwningDeviceMemory> shaped_tree(on_device_shape()); in ToDeviceMemoryTree()
|
/external/tensorflow/tensorflow/compiler/xla/client/ |
D | sharding_builder.h | 54 OpSharding Tuple(const ShapeTree<OpSharding>& shardings);
|