1 /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15
16 #ifndef TENSORFLOW_COMPILER_XLA_SERVICE_GPU_GPU_FUSIBLE_H_
17 #define TENSORFLOW_COMPILER_XLA_SERVICE_GPU_GPU_FUSIBLE_H_
18
19 #include "tensorflow/compiler/xla/service/hlo_instruction.h"
20 #include "tensorflow/compiler/xla/service/instruction_fusion.h"
21
22 // TODO(b/112957171): Extract logic to determine fusibility of HLO ops from
23 // GpuInstructionFusion, FusionMerger, and GpuMultiOutputFusion.
24
25 namespace xla {
26 namespace gpu {
27
28 // Fusion passes frequently do checks across all pairs of "interesting" nodes.
29 // Computing e.g. FusionFitsInBudget(a, b) requires computing expensive
30 // properties of `a` and `b` individually. This cache lets us avoid recomputing
31 // those properties n^2 times.
32 //
33 // Invariant: After modifying or removing a fusion node, call Invalidate(node).
34 struct FusionInfoCache {
35 public:
36 // Must be called after modifying or removing a fusion node (or other node
37 // that's part of this cache).
InvalidateFusionInfoCache38 void Invalidate(const HloInstruction* instr) {
39 shared_memory_usage.erase(instr);
40 num_unnested_reductions.erase(instr);
41 }
42
43 // The rest of the members of this class are for internal use within
44 // gpu_fusible. You shouldn't need to use them yourself.
45 absl::flat_hash_map<const HloInstruction*, int64_t> shared_memory_usage;
46 absl::flat_hash_map<const HloInstruction*, int64_t> num_unnested_reductions;
47 };
48
MaxOperandsAndOutputsPerFusion()49 inline constexpr int64_t MaxOperandsAndOutputsPerFusion() { return 64; }
50
51 bool IsInputFusible(const HloInstruction& instr);
52
53 bool IsLoopFusible(const HloInstruction& instr);
54
55 // Whether the op tranposes the physical data layout. Fusing such ops may lead
56 // to uncoalesced data access and may thus not be beneficial.
57 bool IsPhysicallyTransposing(const HloInstruction& instr);
58
59 // Note that reduction ops are lowered in different ways. Reduce input fusions
60 // are lowered by IrEmitterUnnested::EmitReductionToVector and must be rooted at
61 // reduction-to-vector ops. Other reduction ops are lowered by
62 // GpuElementalIrEmitter and fused like elementwise ops.
63
64 // Whether `instr` is an input fusion rooted at a reduction-to-vector op or a
65 // multi-output input fusion with at least one reduction-to-vector op root.
66 bool IsReduceInputFusion(const HloInstruction& instr);
67
68 // Whether `instr` is fusible as root of a reduce input fusions, i.e. `instr`
69 // is either an unfused reduction-to-vector op or a reduce input fusion.
70 bool IsInputFusibleReduction(const HloInstruction& instr);
71
72 // Whether `instr` is fusible as root of a scatter input fusions, i.e. `instr`
73 // is either an unfused scatter op or a scatter input fusion.
74 bool IsInputFusibleScatter(const HloInstruction& instr);
75
76 // Determines whether the combination of `instr1` and `instr2` into a (possibly
77 // multi-output) fusion fits within a "budget" -- i.e., does have more operands
78 // and outputs than is allowed or occupy too much shared memory. If the fusion
79 // is a producer/consumer fusion and `instr1` is the consumer and `instr2` is
80 // the producer, set consumer_producer_fusion to true to enable more fusion.
81 FusionDecision FusionFitsInBudget(const HloInstruction& instr1,
82 const HloInstruction& instr2,
83 bool is_consumer_producer_fusion = false,
84 FusionInfoCache* cache = nullptr);
85
86 // Check if fusing producer and consumer will generate a nested loop, e.g. both
87 // producer and consumer are `reduce-window` HLO instructions.
88 bool CreatesNestedLoop(const HloInstruction& producer,
89 const HloInstruction& consumer);
90
91 // Returns the instruction that determines the emitter used for lowering,
92 // sometimes referred to as "the real hero".
93 const HloInstruction* GetRealHeroForMultiOutputFusion(
94 const HloInstruction& instr);
95
96 // Whether instruction shapes are compatible for multi-output fusion, i.e.
97 // whether the emitters support lowering the resulting fusion.
98 // This function works for both, sibling and producer-consumer multi-output
99 // fusion.
100 // So far, multi-output fusion is supported for loop fusions and reduce
101 // input fusions only. It is up to the caller to ensure the instructions
102 // themselves are fusible!
103 bool ShapesCompatibleForMultiOutputFusion(const HloInstruction& instr1,
104 const HloInstruction& instr2);
105
106 // Whether the instructions are compatible for producer-consumer fusion
107 // i.e. whether the producer and consumer are loop/input fusible and
108 // they are not library calls.
109 FusionDecision IsProducerConsumerFusible(const HloInstruction& producer,
110 const HloInstruction& consumer);
111
112 // Whether the instructions are producer-consumer fusible with multiple outputs.
113 // That is, the root tuple of the multi-output fusion will contain the results
114 // of both, the producer and consumer.
115 bool IsProducerConsumerMultiOutputFusible(const HloInstruction& producer,
116 const HloInstruction& consumer);
117 // Whether `instr` is a candidate for sibling fusion or as a consumer in
118 // a producer-consumer multi-output fusion.
119 bool IsFusibleAsMultiOutputFusionRoot(const HloInstruction& instr);
120
121 // Determines the fusion kind to be used when fusing `producer` and `consumer`.
122 HloInstruction::FusionKind ChooseFusionKind(const HloInstruction& producer,
123 const HloInstruction& consumer);
124
125 // Returns whether `consumer` is the only non-root user of `instr`.
126 bool IsConsumerTheOnlyNonRootUser(const HloInstruction& instr,
127 const HloInstruction& consumer);
128
129 // Returns number of instructions in the fusible `instr`. If `instr` is not a
130 // fusion instruction, 1 is returned.
131 size_t GetInstrCountOfFusible(const HloInstruction& instr);
132
133 // Returns the outputs of the fusible `instr`.
134 absl::InlinedVector<const HloInstruction*, 2> GetOutputsOfFusible(
135 const HloInstruction& instr);
136
137 // Returns the output size of the fusible `instr`.
138 size_t GetOutputSizeOfFusible(const HloInstruction& instr);
139
140 } // namespace gpu
141 } // namespace xla
142
143 #endif // TENSORFLOW_COMPILER_XLA_SERVICE_GPU_GPU_FUSIBLE_H_
144