Home
last modified time | relevance | path

Searched refs:hlo (Results 1 – 25 of 305) sorted by relevance

12345678910>>...13

/external/tensorflow/tensorflow/compiler/xla/service/
Ddfs_hlo_visitor.h74 virtual Status HandleElementwiseUnary(HloInstructionPtr hlo);
75 virtual Status HandleElementwiseBinary(HloInstructionPtr hlo);
77 virtual Status HandleClamp(HloInstructionPtr hlo) = 0;
78 virtual Status HandleSelect(HloInstructionPtr hlo) = 0;
79 virtual Status HandleTupleSelect(HloInstructionPtr hlo) = 0;
80 virtual Status HandleMaximum(HloInstructionPtr hlo) { in HandleMaximum() argument
81 return HandleElementwiseBinary(hlo); in HandleMaximum()
83 virtual Status HandleMinimum(HloInstructionPtr hlo) { in HandleMinimum() argument
84 return HandleElementwiseBinary(hlo); in HandleMinimum()
86 virtual Status HandleConcatenate(HloInstructionPtr hlo) = 0;
[all …]
Dbfloat16_normalization.cc46 Status DefaultAction(HloInstruction* hlo) override;
47 Status Preprocess(HloInstruction* hlo) override;
52 Status HandleInstruction(HloInstruction* hlo);
56 Status HandleMultipleOutputs(HloInstruction* hlo);
60 StatusOr<HloInstruction*> ConvertType(HloInstruction* hlo, PrimitiveType from,
66 Status InsertConvertAfterOutput(HloInstruction* hlo, PrimitiveType from,
73 Status ChangeOutputTypeThenInsertConvertBack(HloInstruction* hlo,
80 Status InsertConvertBeforeOperand(HloInstruction* hlo, int64 operand_idx,
87 HloInstruction* hlo, absl::Span<HloComputation* const> bf16_called_comps);
118 HloInstruction* hlo, PrimitiveType from, PrimitiveType to, in ConvertType() argument
[all …]
Ddynamic_dimension_inference.cc79 Status DefaultAction(HloInstruction* hlo) override;
91 Status HandleParameter(HloInstruction* hlo) override;
93 Status HandleReduce(HloInstruction* hlo) override;
95 Status HandleDot(HloInstruction* hlo) override;
97 Status HandleTuple(HloInstruction* hlo) override;
99 Status HandleTranspose(HloInstruction* hlo) override;
101 Status HandleDynamicReshape(HloInstruction* hlo) override;
103 Status HandleReshape(HloInstruction* hlo) override;
105 Status HandleSort(HloInstruction* hlo) override;
107 Status HandlePad(HloInstruction* hlo) override;
[all …]
Dbfloat16_conversion_folding.cc40 Status DefaultAction(HloInstruction* hlo) override;
57 Status TryFoldBF16Conversions(HloInstruction* hlo);
62 Status FoldOutputConversions(HloInstruction* hlo);
67 Status FoldOperandConversion(HloInstruction* hlo, int64 operand_index);
76 HloInstruction* hlo) { in FoldOutputConversions() argument
77 std::vector<HloInstruction*> materialized_users = hlo->users(); in FoldOutputConversions()
78 hlo->mutable_shape()->set_element_type(BF16); in FoldOutputConversions()
79 bfloat16_conversion_folding_->UpdateLayout(hlo->mutable_shape()); in FoldOutputConversions()
82 TF_RETURN_IF_ERROR(user->ReplaceAllUsesWith(hlo)); in FoldOutputConversions()
89 HloInstruction* hlo, int64 operand_index) { in FoldOperandConversion() argument
[all …]
Dbfloat16_propagation.cc235 bool BFloat16Propagation::AllUsersConsumeBF16(const HloInstruction& hlo, in AllUsersConsumeBF16() argument
238 const Shape& subshape = ShapeUtil::GetSubshape(hlo.shape(), index); in AllUsersConsumeBF16()
243 auto& value_set = dataflow_->GetValueSet(&hlo, index); in AllUsersConsumeBF16()
369 void BFloat16Propagation::DetermineInstructionPrecision(HloInstruction* hlo, in DetermineInstructionPrecision() argument
377 [this, hlo, &postpone_processing_called_computations] { in DetermineInstructionPrecision()
379 if (hlo->opcode() == HloOpcode::kFusion) { in DetermineInstructionPrecision()
380 DetermineFusionComputationPrecision(hlo); in DetermineInstructionPrecision()
381 } else if (hlo->opcode() == HloOpcode::kWhile) { in DetermineInstructionPrecision()
382 DetermineWhileComputationsPrecision(hlo); in DetermineInstructionPrecision()
383 } else if (hlo->opcode() == HloOpcode::kConditional) { in DetermineInstructionPrecision()
[all …]
Dhlo_element_type_converter.cc38 HloInstruction* ToElementType(HloInstruction* hlo, PrimitiveType type) { in ToElementType() argument
39 if (hlo->shape().element_type() != type) { in ToElementType()
40 Shape shape = ShapeUtil::ChangeElementType(hlo->shape(), type); in ToElementType()
41 hlo = hlo->parent()->AddInstruction( in ToElementType()
42 HloInstruction::CreateConvert(shape, hlo)); in ToElementType()
44 CHECK_EQ(hlo->shape().element_type(), type); in ToElementType()
45 return hlo; in ToElementType()
48 bool HasOperandType(HloInstruction* hlo, PrimitiveType type) { in HasOperandType() argument
49 for (HloInstruction* operand : hlo->operands()) { in HasOperandType()
85 HloInstruction* ConvertTupleElements(HloInstruction* hlo, in ConvertTupleElements() argument
[all …]
Dhlo_module_group_metadata.cc72 const auto visitor = [this](HloInstruction* hlo) -> Status { in Build() argument
76 const TrackedInstruction* tracked = GetTrackedInstruction(hlo->parent()); in Build()
81 if (IsChannelInstruction(hlo) || hlo->IsCrossModuleAllReduce()) { in Build()
83 if (IsChannelInstruction(hlo)) { in Build()
84 peers.push_back(PeerComputation(hlo)); in Build()
85 } else if (hlo->IsCrossModuleAllReduce()) { in Build()
86 for (HloInstruction* instr : GetAllReduceGroup(*hlo->channel_id())) { in Build()
87 if (instr == hlo) { in Build()
106 tracked_instructions_comms_[tracked->instruction()].push_back(hlo); in Build()
108 } else if (IsCompanionInstruction(hlo)) { in Build()
[all …]
Ddfs_hlo_visitor_with_default_test.cc40 Status DefaultAction(HloInstruction* hlo) override { in TEST_F() argument
43 TF_RET_CHECK(!(hlo->IsElementwise() && hlo->operand_count() == 2)) in TEST_F()
44 << hlo->ToString(); in TEST_F()
45 TF_RET_CHECK(!(hlo->IsElementwise() && hlo->operand_count() == 1)) in TEST_F()
46 << hlo->ToString(); in TEST_F()
50 Status HandleElementwiseBinary(HloInstruction* hlo) override { in TEST_F() argument
52 TF_RET_CHECK(hlo->IsElementwise() && hlo->operand_count() == 2) in TEST_F()
53 << hlo->ToString(); in TEST_F()
56 Status HandleElementwiseUnary(HloInstruction* hlo) override { in TEST_F() argument
58 TF_RET_CHECK(hlo->IsElementwise() && hlo->operand_count() == 1) in TEST_F()
[all …]
Dhlo_replication_analysis.cc41 const HloInstruction* hlo, const ShapeIndex& index, in DetermineHloInstructionIsReplicated() argument
58 if (hlo->opcode() == HloOpcode::kAllReduce || in DetermineHloInstructionIsReplicated()
59 hlo->opcode() == HloOpcode::kAllGather) { in DetermineHloInstructionIsReplicated()
62 if (all_operands_replicated(hlo)) { in DetermineHloInstructionIsReplicated()
65 if (!hlo->channel_id().has_value()) { in DetermineHloInstructionIsReplicated()
72 return hlo->replica_groups().empty() || hlo->replica_groups().size() == 1; in DetermineHloInstructionIsReplicated()
75 if (hlo->opcode() == HloOpcode::kAllReduce) { in DetermineHloInstructionIsReplicated()
76 global_id = Cast<HloAllReduceInstruction>(hlo)->use_global_device_ids(); in DetermineHloInstructionIsReplicated()
78 global_id = Cast<HloAllGatherInstruction>(hlo)->use_global_device_ids(); in DetermineHloInstructionIsReplicated()
84 hlo->GetModule()->config().num_partitions(); in DetermineHloInstructionIsReplicated()
[all …]
Dbfloat16_support.cc23 bool BFloat16Support::SupportsBF16Operand(const HloInstruction& hlo, in SupportsBF16Operand() argument
25 switch (hlo.opcode()) { in SupportsBF16Operand()
36 return hlo.operand(0)->shape().element_type() == BF16; in SupportsBF16Operand()
43 bool BFloat16Support::SupportsBF16Output(const HloInstruction& hlo) const { in SupportsBF16Output()
44 switch (hlo.opcode()) { in SupportsBF16Output()
54 return hlo.shape().element_type() == BF16; in SupportsBF16Output()
61 bool BFloat16Support::SupportsMixedPrecisions(const HloInstruction& hlo) const { in SupportsMixedPrecisions()
62 switch (hlo.opcode()) { in SupportsMixedPrecisions()
79 const HloInstruction& hlo, int64 operand_index) { in EffectiveOperandPrecisionIsOutputPrecision() argument
80 switch (hlo.opcode()) { in EffectiveOperandPrecisionIsOutputPrecision()
[all …]
Dhlo_cost_analysis.h53 Status HandleElementwiseUnary(const HloInstruction* hlo) override;
54 Status HandleElementwiseBinary(const HloInstruction* hlo) override;
59 Status HandleSelect(const HloInstruction* hlo) override;
60 Status HandleTupleSelect(const HloInstruction* hlo) override;
63 Status HandleReducePrecision(const HloInstruction* hlo) override;
77 Status HandleTriangularSolve(const HloInstruction* hlo) override;
78 Status HandleCholesky(const HloInstruction* hlo) override;
79 Status HandleAllGather(const HloInstruction* hlo) override;
81 Status HandleAllToAll(const HloInstruction* hlo) override;
82 Status HandleCollectivePermute(const HloInstruction* hlo) override;
[all …]
Ddfs_hlo_visitor_with_default.h55 Status HandleElementwiseUnary(HloInstructionPtr hlo) override { in HandleElementwiseUnary() argument
56 return DefaultAction(hlo); in HandleElementwiseUnary()
58 Status HandleElementwiseBinary(HloInstructionPtr hlo) override { in HandleElementwiseBinary() argument
59 return DefaultAction(hlo); in HandleElementwiseBinary()
62 Status HandleBatchNormTraining(HloInstructionPtr hlo) override { in HandleBatchNormTraining() argument
63 return DefaultAction(hlo); in HandleBatchNormTraining()
66 Status HandleBatchNormInference(HloInstructionPtr hlo) override { in HandleBatchNormInference() argument
67 return DefaultAction(hlo); in HandleBatchNormInference()
70 Status HandleBatchNormGrad(HloInstructionPtr hlo) override { in HandleBatchNormGrad() argument
71 return DefaultAction(hlo); in HandleBatchNormGrad()
[all …]
Dhlo_cost_analysis.cc47 Status HloCostAnalysis::Preprocess(const HloInstruction* hlo) { in Preprocess() argument
57 float bytes_accessed = GetShapeSize(hlo->shape()); in Preprocess()
58 SetOutputBytesAccessed(GetShapeSize(hlo->shape())); in Preprocess()
59 for (int64 i = 0; i < hlo->operand_count(); ++i) { in Preprocess()
60 const HloInstruction* operand = hlo->operand(i); in Preprocess()
69 Status HloCostAnalysis::Postprocess(const HloInstruction* hlo) { in Postprocess() argument
85 TF_RET_CHECK(hlo_properties_.emplace(hlo, current_properties_).second); in Postprocess()
128 const HloInstruction& hlo, const string& key, in GetPropertyForHlo() argument
130 auto it = hlo_to_properties.find(&hlo); in GetPropertyForHlo()
146 const HloInstruction* hlo) const { in FusionParameterReadBytes()
[all …]
Dconvert_operand_folding.cc21 bool IsUpcastConvert(const HloInstruction* hlo) { in IsUpcastConvert() argument
22 return hlo->opcode() == HloOpcode::kConvert && in IsUpcastConvert()
23 ShapeUtil::ElementIsFloating(hlo->shape()) == in IsUpcastConvert()
24 ShapeUtil::ElementIsFloating(hlo->operand(0)->shape()) && in IsUpcastConvert()
25 ShapeUtil::ElementIsSigned(hlo->shape()) == in IsUpcastConvert()
26 ShapeUtil::ElementIsSigned(hlo->operand(0)->shape()) && in IsUpcastConvert()
27 ShapeUtil::HigherPrecisionElementType(hlo->operand(0)->shape(), in IsUpcastConvert()
28 hlo->shape()) == in IsUpcastConvert()
29 hlo->shape().element_type(); in IsUpcastConvert()
/external/tensorflow/tensorflow/compiler/xla/service/spmd/
Dspmd_partitioner.cc82 void SpmdLogger::RegisterLogEntry(HloInstruction* hlo, in RegisterLogEntry() argument
84 string report = hlo->ToString(); in RegisterLogEntry()
104 [](const HloInstruction* hlo) { in ReportBeforePartition() argument
105 return !hlo->has_sharding() || in ReportBeforePartition()
106 hlo->sharding().IsReplicated(); in ReportBeforePartition()
112 module, [](const HloInstruction* hlo) { return true; }, in ReportBeforePartition() argument
124 module, [](const HloInstruction* hlo) { return true; }, in ReportAfterPartition() argument
140 for (auto hlo : computation->instructions()) { in ReportMemoryUsage() local
141 if (hlo->shape().IsTuple() || in ReportMemoryUsage()
142 ShapeUtil::IsEffectiveScalar(hlo->shape())) { in ReportMemoryUsage()
[all …]
Dfft_handler.cc53 HloInstruction* hlo, int64 num_partitions, const HloSharding& sharding, in PadEachPartitionWithHaloExchange() argument
56 int64 size_per_partition = hlo->shape().dimensions().back(); in PadEachPartitionWithHaloExchange()
60 return hlo; in PadEachPartitionWithHaloExchange()
77 auto concat = hlo; in PadEachPartitionWithHaloExchange()
80 ExchangeHalo(hlo, left_halo_size_function, right_halo_size_function, in PadEachPartitionWithHaloExchange()
81 hlo->shape().rank() - 1, sharding, collective_ops_creator, in PadEachPartitionWithHaloExchange()
112 HloInstruction* ShuffleWithinEachPartitionUsingOneHot(HloInstruction* hlo, in ShuffleWithinEachPartitionUsingOneHot() argument
115 int64 size_per_partition = hlo->shape().dimensions().back(); in ShuffleWithinEachPartitionUsingOneHot()
140 hlo->shape().element_type()), in ShuffleWithinEachPartitionUsingOneHot()
146 dot_dnums.add_lhs_contracting_dimensions(hlo->shape().rank() - 1); in ShuffleWithinEachPartitionUsingOneHot()
[all …]
Dspmd_partitioner.h72 SpmdBuilder(const std::string& name, HloInstruction* hlo) in SpmdBuilder() argument
74 visiting_hlo_ = hlo; in SpmdBuilder()
79 HloInstruction* hlo) { in derived_instructions() argument
80 return instructions_.at(hlo); in derived_instructions()
83 void set_visiting_hlo(HloInstruction* hlo) { visiting_hlo_ = hlo; } in set_visiting_hlo() argument
89 const HloInstruction* hlo) { in BroadcastDimsForCreatedHlo() argument
90 auto it = broadcast_dims_.find(hlo); in BroadcastDimsForCreatedHlo()
163 void RegisterLogEntry(HloInstruction* hlo,
293 PartitionedHlo(HloInstruction* hlo, Shape base_shape, PartitioningState state) in PartitionedHlo() argument
294 : hlo_(hlo), base_shape_(base_shape), state_(std::move(state)) { in PartitionedHlo()
[all …]
Dgather_scatter_handler.cc84 operand.hlo()->shape().dimensions(dim) - 1, b); in IndexBoundsForGatherScatterOperandPartitionedOnTrivialSliceDims()
148 ShapeInference::InferGatherShape(replicated_operand.hlo()->shape(), in PartitionIndexOnlyPartition()
149 indices.hlo()->shape(), dnums, in PartitionIndexOnlyPartition()
152 partitioned_output_shape, {replicated_operand.hlo(), indices.hlo()})); in PartitionIndexOnlyPartition()
170 .hlo(); in PartitionIndexOnlyPartition()
196 pslice_sizes[i] = operand.hlo()->shape().dimensions(i); in ParititonPassthroughOperand()
200 pshape, operand.hlo(), indices.hlo(), dnums, pslice_sizes, in ParititonPassthroughOperand()
207 .hlo(); in ParititonPassthroughOperand()
242 indices_min, indices.hlo(), indices_max)); in ParititonTrivialIndexedOperandDimension()
249 output_shape, operand.hlo(), adjusted_indices, dnums, in ParititonTrivialIndexedOperandDimension()
[all …]
/external/tensorflow/tensorflow/compiler/xla/service/gpu/
Dstream_assignment.cc29 bool StreamAssignment::HasStreamAssigned(const HloInstruction& hlo) const { in HasStreamAssigned()
30 return hlo_to_stream_number_.contains(&hlo); in HasStreamAssigned()
33 int StreamAssignment::StreamNumberForHlo(const HloInstruction& hlo) const { in StreamNumberForHlo()
34 return FindOrDie(hlo_to_stream_number_, &hlo); in StreamNumberForHlo()
37 void StreamAssignment::AssignStreamToHlo(const HloInstruction* hlo, in AssignStreamToHlo() argument
43 InsertOrDie(&hlo_to_stream_number_, hlo, stream_num); in AssignStreamToHlo()
44 VLOG(2) << "Assign stream #" << stream_num << " to " << hlo->ToString(); in AssignStreamToHlo()
67 const HloInstruction& hlo, const StreamAssignment& stream_assignment, in ComputeStreamToAssign() argument
70 if (hlo.opcode() == HloOpcode::kParameter || in ComputeStreamToAssign()
71 hlo.opcode() == HloOpcode::kConstant) { in ComputeStreamToAssign()
[all …]
Dtree_reduction_rewriter.cc58 Status HandleReduce(HloInstruction *hlo) override { in HandleReduce() argument
59 if (!hlo->shape().IsArray()) { in HandleReduce()
64 if (!IsReductionFromOrToContiguousDimensions(*hlo)) { in HandleReduce()
67 return RewriteReduction(hlo); in HandleReduce()
71 Status RewriteReduction(HloInstruction *hlo) { in RewriteReduction() argument
73 GetReductionKindAndContiguousComponents(*hlo); in RewriteReduction()
74 VLOG(3) << "Input: " << hlo->ToString(); in RewriteReduction()
76 HloInstruction *input = hlo->mutable_operand(0); in RewriteReduction()
77 HloInstruction *initial_value = hlo->mutable_operand(1); in RewriteReduction()
81 bool reduce_batch_dimension = hlo->dimensions().size() > 1; in RewriteReduction()
[all …]
Dgpu_hlo_schedule.cc86 for (const HloInstruction* hlo : thunk_launch_order) { in GpuHloOrdering() local
87 predecessor_map->SetReachable(hlo, hlo); in GpuHloOrdering()
88 if (stream_assignment.HasStreamAssigned(*hlo)) { in GpuHloOrdering()
92 immediate_preds.insert(immediate_preds.end(), hlo->operands().begin(), in GpuHloOrdering()
93 hlo->operands().end()); in GpuHloOrdering()
95 hlo->control_predecessors().begin(), in GpuHloOrdering()
96 hlo->control_predecessors().end()); in GpuHloOrdering()
100 const int stream_no = stream_assignment.StreamNumberForHlo(*hlo); in GpuHloOrdering()
104 predecessor_map->FastSetReachabilityToUnion(immediate_preds, hlo); in GpuHloOrdering()
105 last_instruction_per_stream[stream_no] = hlo; in GpuHloOrdering()
[all …]
/external/tensorflow/tensorflow/compiler/mlir/hlo/
DBUILD38 exports_files(["include/mlir-hlo/Dialect/mhlo/IR/hlo_ops.td"])
40 exports_files(["include/mlir-hlo/Dialect/mhlo/IR/lhlo_ops.td"])
45 "include/mlir-hlo/Dialect/mhlo/IR/chlo_ops.td",
46 "include/mlir-hlo/Dialect/mhlo/IR/hlo_ops.td",
47 "include/mlir-hlo/Dialect/mhlo/IR/hlo_ops_base.td",
48 "include/mlir-hlo/Dialect/mhlo/IR/hlo_ops_base_enums.td",
49 "include/mlir-hlo/Dialect/mhlo/IR/hlo_ops_base_structs.td",
50 "include/mlir-hlo/Dialect/mhlo/IR/hlo_utils.td",
51 "include/mlir-hlo/Dialect/mhlo/IR/infer_fusibility_op_interface.td",
52 "include/mlir-hlo/Dialect/mhlo/IR/lhlo_ops.td",
[all …]
/external/tensorflow/tensorflow/compiler/xla/service/cpu/
Dcpu_instruction_fusion.cc27 bool CanBeLoopFused(const HloInstruction& hlo) { in CanBeLoopFused() argument
30 return hlo.IsElementwise() || // in CanBeLoopFused()
31 hlo.opcode() == HloOpcode::kBitcast || in CanBeLoopFused()
32 hlo.opcode() == HloOpcode::kBroadcast || in CanBeLoopFused()
33 hlo.opcode() == HloOpcode::kConcatenate || in CanBeLoopFused()
34 hlo.opcode() == HloOpcode::kDynamicSlice || in CanBeLoopFused()
35 hlo.opcode() == HloOpcode::kDynamicUpdateSlice || in CanBeLoopFused()
36 hlo.opcode() == HloOpcode::kGather || in CanBeLoopFused()
37 hlo.opcode() == HloOpcode::kIota || hlo.opcode() == HloOpcode::kPad || in CanBeLoopFused()
38 hlo.opcode() == HloOpcode::kReduce || in CanBeLoopFused()
[all …]
Dconv_canonicalization.cc34 for (HloInstruction* hlo : in Run()
36 if (hlo->opcode() == HloOpcode::kConvolution && in Run()
37 !PotentiallyImplementedAsEigenConvolution(*hlo, in Run()
40 hlo->convolution_dimension_numbers(); in Run()
58 HloInstruction* input = hlo->mutable_operand(0); in Run()
79 HloInstruction* kernel = hlo->mutable_operand(1); in Run()
106 new_conv_dims[0] = hlo->shape().dimensions(output_batch_dim); in Run()
110 hlo->shape().dimensions(dnums.output_spatial_dimensions(i)); in Run()
113 new_conv_dims[num_dims - 1] = hlo->shape().dimensions(output_feature_dim); in Run()
115 ShapeUtil::MakeShape(hlo->shape().element_type(), new_conv_dims); in Run()
[all …]
/external/tensorflow/tensorflow/compiler/mlir/xla/
DBUILD40 "../hlo/include",
43 "//tensorflow/compiler/mlir/hlo:hlo_ops_td_files",
60 "../hlo/include",
64 "//tensorflow/compiler/mlir/hlo:hlo_ops_td_files",
84 "//tensorflow/compiler/mlir/hlo",
110 "//tensorflow/compiler/mlir/hlo",
111 "//tensorflow/compiler/mlir/hlo:chlo_legalize_to_hlo",
112 "//tensorflow/compiler/mlir/hlo:convert_op_folder",
144 "//tensorflow/compiler/mlir/hlo",
190 "//tensorflow/compiler/mlir/hlo",
[all …]

12345678910>>...13