• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #ifndef TENSORFLOW_COMPILER_XLA_SERVICE_LLVM_IR_DYNAMIC_UPDATE_SLICE_UTIL_H_
17 #define TENSORFLOW_COMPILER_XLA_SERVICE_LLVM_IR_DYNAMIC_UPDATE_SLICE_UTIL_H_
18 
19 #include "tensorflow/compiler/xla/service/buffer_assignment.h"
20 #include "tensorflow/compiler/xla/service/elemental_ir_emitter.h"
21 #include "tensorflow/compiler/xla/service/gpu/launch_dimensions.h"
22 #include "tensorflow/compiler/xla/service/hlo_instruction.h"
23 #include "tensorflow/compiler/xla/service/llvm_ir/fused_ir_emitter.h"
24 #include "tensorflow/compiler/xla/service/llvm_ir/ir_array.h"
25 
26 // Utilities related to emitting LLVM IR for various HLO ops.
27 
28 namespace xla {
29 namespace llvm_ir {
30 
31 using GeneratorForOperandIrArrays =
32     std::function<std::vector<llvm_ir::IrArray>()>;
33 
34 // Determines whether the given instruction might be implemented as an
35 // in-place dynamic-update-slice after we have a buffer assignment.
36 //
37 // If this returns false, then CanUpdateDynamicSliceInPlace and
38 // CanEmitFusedDynamicUpdateSliceInPlace will also return false.
39 //
40 // This is useful if you want to check whether an instruction might be an
41 // in-place DUS during an HLO pass, at which point you don't have a buffer
42 // assignment.
43 //
44 // Note that simplifications to the HLO graph might change this function from
45 // returning false to returning true.  Specifically, simplifying the contents of
46 // fusion nodes might cause a false->true transition.  In general this isn't a
47 // problem by the time you're calling this function, but beware.
48 bool MayBeImplementedAsInPlaceDynamicUpdateSlice(const HloInstruction* instr);
49 
50 // Checks if we can emit code for the given DynamicUpdateSlice node that updates
51 // its input in place.  Returns true if the dynamic-update-slice's
52 // array-to-be-updated and output share the same BufferAllocation::Slice.
53 //
54 // dynamic_update_slice must be a DynamicUpdateSlice op.
55 bool CanUpdateDynamicSliceInPlace(HloInstruction* dynamic_update_slice,
56                                   const BufferAssignment& assignment);
57 
58 // Checks if the given fusion node is amenable to being implemented by
59 // EmitFusedDynamicUpdateSliceInPlace.
60 bool CanEmitFusedDynamicUpdateSliceInPlace(HloInstruction* fusion,
61                                            const BufferAssignment& assignment);
62 
63 // Emits IR for running the given dynamic-update-slice op in-place -- that is,
64 // where the input and output buffers share the same slice, so we can simply
65 // modify the input/output buffer without touching any of the other elements.
66 Status EmitDynamicUpdateSliceInPlace(absl::Span<const IrArray> operand_arrays,
67                                      const IrArray& output_array,
68                                      absl::string_view name,
69                                      llvm::IRBuilder<>* b);
70 
71 // Given a loop-fusion node whose root is a dynamic-update-slice op whose
72 // array-to-be-updated and output share the same buffer slice, emits
73 // (sequential) code for a fusion node that does the dynamic-update-slice in
74 // place.
75 Status EmitFusedDynamicUpdateSliceInPlace(HloInstruction* fusion,
76                                           const IrArray& fusion_output_array,
77                                           FusedIrEmitter* fused_emitter,
78                                           llvm::IRBuilder<>* b);
79 
80 // Same as EmitFusedDynamicUpdateSliceInPlace, except emits a parallel loop with
81 // the given launch dimensions.
82 Status EmitParallelFusedDynamicUpdateSliceInPlace(
83     const HloComputation* fusion, const IrArray& fusion_output_array,
84     FusedIrEmitter* fused_emitter,
85     const gpu::LaunchDimensions& launch_dimensions, llvm::IRBuilder<>* b);
86 
87 }  // namespace llvm_ir
88 }  // namespace xla
89 
90 #endif  // TENSORFLOW_COMPILER_XLA_SERVICE_LLVM_IR_DYNAMIC_UPDATE_SLICE_UTIL_H_
91