• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #include "tensorflow/compiler/xla/service/gpu/stream_assignment.h"
17 
18 #include "absl/container/flat_hash_set.h"
19 #include "absl/memory/memory.h"
20 #include "tensorflow/compiler/xla/map_util.h"
21 #include "tensorflow/compiler/xla/service/gpu/ir_emission_utils.h"
22 #include "tensorflow/compiler/xla/service/hlo_computation.h"
23 #include "tensorflow/compiler/xla/service/hlo_reachability.h"
24 #include "tensorflow/core/platform/random.h"
25 
26 namespace xla {
27 namespace gpu {
28 
HasStreamAssigned(const HloInstruction & hlo) const29 bool StreamAssignment::HasStreamAssigned(const HloInstruction& hlo) const {
30   return hlo_to_stream_number_.contains(&hlo);
31 }
32 
StreamNumberForHlo(const HloInstruction & hlo) const33 int StreamAssignment::StreamNumberForHlo(const HloInstruction& hlo) const {
34   return FindOrDie(hlo_to_stream_number_, &hlo);
35 }
36 
AssignStreamToHlo(const HloInstruction * hlo,int stream_num)37 void StreamAssignment::AssignStreamToHlo(const HloInstruction* hlo,
38                                          int stream_num) {
39   CHECK_GE(stream_num, 0);
40   if (stream_num >= stream_count_) {
41     stream_count_ = stream_num + 1;
42   }
43   InsertOrDie(&hlo_to_stream_number_, hlo, stream_num);
44   VLOG(2) << "Assign stream #" << stream_num << " to " << hlo->ToString();
45 }
46 
47 namespace {
48 
49 // Returns whether the two HLOs can run concurrently, i.e., neither is a
50 // transitive consumer of the other.
CanRunConcurrently(const HloInstruction & a,const HloInstruction & b,const HloReachabilityMap & reachability)51 bool CanRunConcurrently(const HloInstruction& a, const HloInstruction& b,
52                         const HloReachabilityMap& reachability) {
53   return !reachability.IsConnected(&a, &b);
54 }
55 
56 constexpr int kInvalidStreamNum = -1;
57 //  Returns true iff `stream_num` is an invalid stream number.
IsStreamNumValid(int stream_num)58 inline bool IsStreamNumValid(int stream_num) {
59   return stream_num != kInvalidStreamNum;
60 }
61 
62 // Returns which existing stream to assign to `hlo`, or -1 if a stream is not
63 // needed. `stream_assignment` is the existing stream assignment for all
64 // instructions topologically before `hlo`. `seen_gemms` contains all GEMMs that
65 // are topologically before `hlo`.
ComputeStreamToAssign(const HloInstruction & hlo,const StreamAssignment & stream_assignment,const HloReachabilityMap & reachability,const std::vector<const HloInstruction * > & seen_gemms)66 int ComputeStreamToAssign(
67     const HloInstruction& hlo, const StreamAssignment& stream_assignment,
68     const HloReachabilityMap& reachability,
69     const std::vector<const HloInstruction*>& seen_gemms) {
70   if (hlo.opcode() == HloOpcode::kParameter ||
71       hlo.opcode() == HloOpcode::kConstant) {
72     // kParameter and kConstant do not need a thunk.
73     return kInvalidStreamNum;
74   }
75 
76   const auto& debug_options = hlo.GetModule()->config().debug_options();
77   if (!debug_options.xla_gpu_disable_multi_streaming()) {
78     LOG(ERROR) << "Multi streaming is not supported";
79   }
80   return 0;
81 }
82 
83 }  // namespace
84 
AssignStreams(const HloModule & module)85 std::unique_ptr<StreamAssignment> AssignStreams(const HloModule& module) {
86   auto stream_assignment = absl::make_unique<StreamAssignment>();
87   const HloComputation& computation = *module.entry_computation();
88   std::unique_ptr<HloReachabilityMap> reachability =
89       HloReachabilityMap::Build(&computation);
90   std::vector<const HloInstruction*> seen_gemms;
91   // The execution of different RNG Hlo instructions in the same module updates
92   // a common global variable. To avoid a race condition, we simply assign all
93   // RNG kernels to the same stream to make them run sequentially.
94   //
95   // TODO(b/111791052): If we remove such a common variable, we will need to
96   // clean up the code here.
97   int stream_num_for_rng = kInvalidStreamNum;
98   for (const auto* hlo : computation.MakeInstructionPostOrder()) {
99     // If we ever enable fusion of RNG instructions, we will need to extend this
100     // code to look inside a fused instruction.
101     int stream_num = (hlo->opcode() == HloOpcode::kRng &&
102                       IsStreamNumValid(stream_num_for_rng))
103                          ? stream_num_for_rng
104                          : ComputeStreamToAssign(*hlo, *stream_assignment,
105                                                  *reachability, seen_gemms);
106     if (IsStreamNumValid(stream_num)) {
107       stream_assignment->AssignStreamToHlo(hlo, stream_num);
108       if (hlo->opcode() == HloOpcode::kRng &&
109           !IsStreamNumValid(stream_num_for_rng)) {
110         stream_num_for_rng = stream_num;
111       }
112     }
113     if (IsCublasGemm(*hlo) || IsMatrixMultiplication(*hlo)) {
114       seen_gemms.push_back(hlo);
115     }
116   }
117   return stream_assignment;
118 }
119 
120 }  // namespace gpu
121 }  // namespace xla
122