• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #ifndef TENSORFLOW_COMPILER_XLA_SERVICE_GPU_GPU_HLO_SCHEDULE_H_
17 #define TENSORFLOW_COMPILER_XLA_SERVICE_GPU_GPU_HLO_SCHEDULE_H_
18 
19 #include <memory>
20 #include <vector>
21 
22 #include "tensorflow/compiler/xla/service/gpu/stream_assignment.h"
23 #include "tensorflow/compiler/xla/service/hlo_module.h"
24 #include "tensorflow/compiler/xla/service/hlo_ordering.h"
25 #include "tensorflow/compiler/xla/statusor.h"
26 
27 namespace xla {
28 namespace gpu {
29 
30 // Determines the schedule of HLO instructions, represented by the total order
31 // of thunk launches, and the partial order of HLO instructions. The HLO
32 // instructions are only partially ordered, despite the total ordering of thunk
33 // launches, because thunks may be scheduled onto concurrent streams. This
34 // schedule is used by BufferAssigner to determine buffer liveness (i.e. to
35 // minimize allocations), and also by ThunkSchedule to determine the thunk
36 // launch order. This class differs from xla::HloSchedule in that HloSchedule
37 // represents a total order of all instructions in the module for backends which
38 // execute HLO instructions strictly sequentially.
39 class GpuHloSchedule {
40  public:
41   // Constructs an GpuHloSchedule for the given module, based on the given
42   // stream assignment.
43   static StatusOr<std::unique_ptr<GpuHloSchedule>> Build(
44       const HloModule& module, const StreamAssignment& stream_assignment,
45       int64 pointer_size);
46 
47   // Returns the total order of thunk launches, represented in terms of HLO
48   // instructions.
ThunkLaunchOrder()49   const std::vector<HloInstruction*>& ThunkLaunchOrder() const {
50     return thunk_launch_order_;
51   }
52 
53   // Returns the partial order of HLO instructions. This method may only be
54   // called once. The order is based on the total order of thunk lanches, the
55   // stream assignment, and the data dependencies in the HLO DAG.
ConsumeHloOrdering()56   std::unique_ptr<HloOrdering> ConsumeHloOrdering() {
57     return std::move(hlo_ordering_);
58   }
59 
60  private:
61   GpuHloSchedule();
62 
63   std::vector<HloInstruction*> thunk_launch_order_;
64   std::unique_ptr<HloOrdering> hlo_ordering_;
65 };
66 
67 }  // namespace gpu
68 }  // namespace xla
69 
70 #endif  // TENSORFLOW_COMPILER_XLA_SERVICE_GPU_GPU_HLO_SCHEDULE_H_
71