/external/tensorflow/tensorflow/compiler/xla/ |
D | shape_tree_test.cc | 54 ShapeTree<int> int_tree; in TEST_F() 57 ShapeTree<bool> bool_tree; in TEST_F() 63 ShapeTree<int> int_tree(shape); in TestShapeConstructor() 71 ShapeTree<bool> bool_tree(shape); in TestShapeConstructor() 89 ShapeTree<int> tree(shape, 42); in TestInitValueConstructor() 121 ShapeTree<int> shape_tree{ShapeUtil::MakeTupleShape({})}; in TEST_F() 126 ShapeTree<int> shape_tree{array_shape_}; in TEST_F() 135 ShapeTree<int> copy{shape_tree}; in TEST_F() 149 ShapeTree<int> shape_tree{tuple_shape_}; in TEST_F() 168 ShapeTree<int> copy{shape_tree}; in TEST_F() [all …]
|
D | shape_tree.h | 96 class ShapeTree { 102 ShapeTree() : ShapeTree(ShapeUtil::MakeNil()) {} in ShapeTree() function 110 explicit ShapeTree(Shape shape); 111 explicit ShapeTree(const Shape* shape); 112 explicit ShapeTree(const std::shared_ptr<Shape>& shape); 115 ShapeTree(Shape shape, const T& init_value); 116 ShapeTree(const Shape* shape, const T& init_value); 117 ShapeTree(const std::shared_ptr<Shape>& shape, const T& init_value); 144 ShapeTree(const ShapeTree&) = default; 145 ShapeTree& operator=(const ShapeTree&) = default; [all …]
|
/external/tensorflow/tensorflow/compiler/xla/service/ |
D | hlo_replication_analysis.cc | 43 const absl::flat_hash_map<const HloInstruction*, ShapeTree<bool>>& in DetermineHloInstructionIsReplicated() 171 auto assign_or_combine_shapetree = [&](ShapeTree<bool>&& to_combine, in ComputeHloReplicationOnComputation() 196 return assign_or_combine_shapetree(ShapeTree<bool>(source_it->second), in ComputeHloReplicationOnComputation() 268 ShapeTree<bool>(inst->shape(), false), inst); in ComputeHloReplicationOnComputation() 281 ShapeTree<bool>(inst->shape(), false), inst); in ComputeHloReplicationOnComputation() 287 ShapeTree<bool> shape_tree(inst->shape(), true); in ComputeHloReplicationOnComputation() 293 ShapeTree<bool> shape_tree(inst->shape(), true); in ComputeHloReplicationOnComputation() 298 ShapeTree<bool> shape_tree(inst->shape(), false); in ComputeHloReplicationOnComputation() 310 ShapeTree<bool>(inst->shape(), false), inst); in ComputeHloReplicationOnComputation() 312 ShapeTree<bool> shape_tree(inst->shape(), true); in ComputeHloReplicationOnComputation() [all …]
|
D | shaped_buffer.h | 92 void set_buffers(ShapeTree<se::DeviceMemoryBase> buffers) { in set_buffers() 116 const ShapeTree<se::DeviceMemoryBase>& buffers() const { return buffers_; } in buffers() 117 ShapeTree<se::DeviceMemoryBase>& buffers() { return buffers_; } in buffers() 136 ShapeTree<se::DeviceMemoryBase> buffers_;
|
D | copy_insertion.cc | 130 const ShapeTree<bool>& indices_to_copy) { in DeepCopyAndAddControlEdges() 135 ShapeTree<HloInstruction*> from_copy_tree(from->shape(), in DeepCopyAndAddControlEdges() 141 ShapeTree<HloInstruction*> to_copy_tree(to->shape(), /*init_value=*/nullptr); in DeepCopyAndAddControlEdges() 170 ShapeTree<bool>* indices_to_copy) { in IndicesToCopyForWhile() 198 ShapeTree<bool>* indices_to_copy) { in IndicesToCopyForConditional() 284 ShapeTree<bool> indices_to_copy(xla_while->shape()); in AddCopiesForWhile() 356 ShapeTree<bool> output_indices_to_copy(root->shape()); in AddCopiesForAliasedInputOutputs() 357 std::vector<absl::optional<ShapeTree<HloInstruction*>>> copied_parameters( in AddCopiesForAliasedInputOutputs() 362 ShapeTree<bool> param_indices_to_copy(param->shape()); in AddCopiesForAliasedInputOutputs() 386 ShapeTree<HloInstruction*> param_copy_tree(param->shape(), in AddCopiesForAliasedInputOutputs() [all …]
|
D | hlo_sharding_metadata.cc | 172 StatusOr<ShapeTree<HloSharding>> GetShardingTreeFromUser( in GetShardingTreeFromUser() 206 ShapeTree<HloSharding>* lhs_tree, ShapeTree<HloSharding>::iterator lhs_it, in AssignTreeSharding() 207 const ShapeTree<HloSharding>& rhs_tree) { in AssignTreeSharding() 252 ShapeTree<HloSharding> sharding_tree( in ApplyShardingFromUsers() 267 TF_ASSIGN_OR_RETURN(ShapeTree<HloSharding> user_sharding_tree, in ApplyShardingFromUsers() 275 ShapeTree<HloSharding>::iterator sharding_tree_begin = in ApplyShardingFromUsers()
|
D | hlo_liveness_analysis.cc | 53 void ForEachLiveIndex(const ShapeTree<bool>& index_tree, in ForEachLiveIndex() 125 const ShapeTree<bool>& index_tree = FindOrDie(*live_index_map, instruction); in PropagateLivenessThroughTuple() 155 const ShapeTree<bool>& index_tree = FindOrDie(*live_index_map, instruction); in PropagateLivenessThroughGTE() 174 const ShapeTree<bool>& index_tree = FindOrDie(*live_index_map, instruction); in PropagateLivenessThroughWhile() 205 const ShapeTree<bool>& index_tree = in PropagateLivenessToParameterCallers()
|
D | executable.h | 72 explicit ExecutionInput(ShapeTree<MaybeOwningDeviceMemory> buffers) in ExecutionInput() 77 ExecutionInput(ShapeTree<MaybeOwningDeviceMemory> buffers, in ExecutionInput() 119 const ShapeTree<MaybeOwningDeviceMemory>& Buffers() const { return buffers_; } in Buffers() 121 ShapeTree<MaybeOwningDeviceMemory>* MutableBuffers() { return &buffers_; } in MutableBuffers() 138 ShapeTree<MaybeOwningDeviceMemory> buffers_;
|
D | hlo_sharding_test.cc | 87 ShapeTree<HloSharding> shape_tree = in TEST_F() 177 ShapeTree<HloSharding> shape_tree = in TEST_F() 226 ShapeTree<HloSharding> shape_tree(ShapeUtil::MakeTupleShape({}), in TEST_F() 234 ShapeTree<HloSharding> shape_tree(ShapeUtil::MakeTupleShape({}), in TEST_F() 242 ShapeTree<HloSharding> shape_tree1( in TEST_F() 246 ShapeTree<HloSharding> shape_tree2( in TEST_F() 256 ShapeTree<HloSharding> shape_tree1( in TEST_F() 260 ShapeTree<HloSharding> shape_tree2( in TEST_F() 424 ShapeTree<HloSharding> sharding_tree(tuple_shape, HloSharding::Replicate()); in TEST_P()
|
D | hlo_sharding.h | 89 static HloSharding Tuple(const ShapeTree<HloSharding>& sub_shardings); 211 StatusOr<ShapeTree<HloSharding>> AsShapeTree(const Shape& shape) const; 212 ShapeTree<HloSharding> GetAsShapeTree(const Shape& shape) const { in GetAsShapeTree()
|
D | hlo_computation_test.cc | 304 ShapeTree<bool> indices_to_copy(constant->shape(), /*init_value=*/true); in TEST_F() 312 ShapeTree<bool> indices_to_copy(constant->shape(), /*init_value=*/false); in TEST_F() 333 ShapeTree<bool> indices_to_copy(tuple->shape(), /*init_value=*/true); in TEST_F() 334 ShapeTree<HloInstruction*> copies_added(tuple->shape(), in TEST_F() 350 ShapeTree<bool> indices_to_copy(tuple->shape(), /*init_value=*/false); in TEST_F() 351 ShapeTree<HloInstruction*> copies_added(tuple->shape(), in TEST_F() 367 ShapeTree<bool> indices_to_copy(tuple->shape(), /*init_value=*/false); in TEST_F() 369 ShapeTree<HloInstruction*> copies_added(tuple->shape(), in TEST_F()
|
D | hlo_liveness_analysis.h | 42 std::unordered_map<const HloInstruction*, ShapeTree<bool>>;
|
D | shaped_buffer.cc | 75 TF_ASSIGN_OR_RETURN(ShapeTree<se::DeviceMemoryBase> sub_buffers, in SubShapedBuffer() 153 buffers_ = ShapeTree<se::DeviceMemoryBase>(); in release()
|
D | hlo_sharding.cc | 108 HloSharding HloSharding::Tuple(const ShapeTree<HloSharding>& sub_shardings) { in Tuple() 324 StatusOr<ShapeTree<HloSharding>> HloSharding::AsShapeTree( in AsShapeTree() 327 ShapeTree<HloSharding> result(shape, HloSharding::Replicate()); in AsShapeTree() 340 return ShapeTree<HloSharding>(shape, *this); in AsShapeTree() 349 return Tuple(ShapeTree<HloSharding>(shape, *this)); in GetTupleSharding() 388 ShapeTree<HloSharding> shape_tree = GetAsShapeTree(shape); in ValidateTuple()
|
D | hlo_value.h | 252 class InstructionValueSet : public ShapeTree<HloValueSet> { 255 : ShapeTree<HloValueSet>(shape) {} in InstructionValueSet()
|
D | hlo_replication_analysis.h | 88 absl::flat_hash_map<const HloInstruction*, ShapeTree<bool>> hlo_replication_;
|
/external/tensorflow/tensorflow/compiler/xla/pjrt/ |
D | tracked_device_buffer.cc | 101 ShapeTree<se::DeviceMemoryBase>::iterator iterator = in FromScopedShapedBuffer() 123 ShapeTree<se::DeviceMemoryBase>::iterator iterator = in AsShapedBuffer() 138 ShapeTree<MaybeOwningDeviceMemory>::iterator* iterator, in AddToInputAsImmutable() 139 const ShapeTree<MaybeOwningDeviceMemory>::iterator& end) const { in AddToInputAsImmutable() 149 ShapeTree<MaybeOwningDeviceMemory>::iterator* iterator, in AddToInputAsDonated() 150 const ShapeTree<MaybeOwningDeviceMemory>::iterator& end, in AddToInputAsDonated()
|
D | tracked_device_buffer.h | 149 ShapeTree<MaybeOwningDeviceMemory>::iterator* iterator, 150 const ShapeTree<MaybeOwningDeviceMemory>::iterator& end) const; 161 ShapeTree<MaybeOwningDeviceMemory>::iterator* iterator, 162 const ShapeTree<MaybeOwningDeviceMemory>::iterator& end,
|
/external/tensorflow/tensorflow/compiler/xla/service/gpu/ |
D | gpu_transfer_manager.cc | 57 ShapeTree<InfeedBuffer> buffer_tree(literal_shape); in TransferLiteralToInfeed() 70 se::StreamExecutor* executor, ShapeTree<InfeedBuffer> buffers) { in EnqueueBuffersToInfeed() 118 ShapeTree<std::unique_ptr<gpu::OutfeedBuffer>> outfeed_buffers( in TransferLiteralFromOutfeed()
|
D | gpu_transfer_manager.h | 55 ShapeTree<InfeedBuffer> buffers);
|
D | outfeed_manager.h | 58 using OutfeedManager = XfeedQueue<ShapeTree<std::unique_ptr<OutfeedBuffer>>*>;
|
/external/tensorflow/tensorflow/compiler/xrt/ |
D | xrt_state.h | 141 const xla::ShapeTree<ExpandedTupleInput>& elements, 268 const xla::ShapeTree<ExpandedTupleInput>& elements, int device_ordinal, 287 xla::ShapeTree<XRTBufferAllocation*> buffers_;
|
/external/tensorflow/tensorflow/core/tpu/kernels/ |
D | tpu_compile_op_support.h | 115 xla::ShapeTree<xla::HloSharding> GetSubtree( 116 const xla::ShapeTree<xla::HloSharding>& tuple_shape_tree,
|
D | tpu_compile_op_support.cc | 39 using ::xla::ShapeTree; 149 ShapeTree<HloSharding> GetSubtree( in GetSubtree() 150 const ShapeTree<HloSharding>& tuple_shape_tree, int element_index) { in GetSubtree() 151 ShapeTree<HloSharding> element_shape_tree( in GetSubtree() 165 ShapeTree<HloSharding> tuple_shape_tree = sharding.GetAsShapeTree(shape); in GetPerDeviceShape()
|
/external/tensorflow/tensorflow/compiler/xla/client/ |
D | sharding_builder.h | 57 OpSharding Tuple(const ShapeTree<OpSharding>& shardings);
|