• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #ifndef TENSORFLOW_COMPILER_XLA_SERVICE_GPU_GPU_SPMD_PARTITIONER_H_
17 #define TENSORFLOW_COMPILER_XLA_SERVICE_GPU_GPU_SPMD_PARTITIONER_H_
18 
19 #include "tensorflow/compiler/xla/service/hlo_computation.h"
20 #include "tensorflow/compiler/xla/service/hlo_instruction.h"
21 #include "tensorflow/compiler/xla/service/hlo_module.h"
22 #include "tensorflow/compiler/xla/service/hlo_pass_interface.h"
23 #include "tensorflow/compiler/xla/service/spmd/spmd_partitioner.h"
24 
25 namespace xla {
26 namespace gpu {
27 
28 class GpuSpmdPartitioningVisitor : public spmd::SpmdPartitioningVisitor {
29  public:
GpuSpmdPartitioningVisitor(HloComputation * computation,int64_t num_partitions,int64_t num_replicas,const spmd::SPMDCollectiveOpsCreator & collective_ops_creator,int64 * next_channel_id,spmd::SpmdLogger * logger,spmd::SpmdPartitionerOptions options,spmd::SpmdPartitioner * partitioner)30   GpuSpmdPartitioningVisitor(
31       HloComputation* computation, int64_t num_partitions, int64_t num_replicas,
32       const spmd::SPMDCollectiveOpsCreator& collective_ops_creator,
33       int64* next_channel_id, spmd::SpmdLogger* logger,
34       spmd::SpmdPartitionerOptions options, spmd::SpmdPartitioner* partitioner)
35       : spmd::SpmdPartitioningVisitor(computation, num_partitions, num_replicas,
36                                       collective_ops_creator, next_channel_id,
37                                       logger, std::move(options), partitioner) {
38   }
39   Status HandleRngGetAndUpdateState(HloInstruction* hlo) override;
40 };
41 
42 class GpuSpmdPartitioner : public spmd::SpmdPartitioner {
43  public:
GpuSpmdPartitioner(int64_t num_partitions,int64_t num_replicas)44   GpuSpmdPartitioner(int64_t num_partitions, int64_t num_replicas)
45       : spmd::SpmdPartitioner(num_partitions, num_replicas,
46                               GetSpmdPartitionerOptions()) {}
47 
48  protected:
49   std::unique_ptr<spmd::SpmdPartitioningVisitor> CreateVisitor(
50       HloComputation* computation, int64_t num_partitions, int64_t num_replicas,
51       const spmd::SPMDCollectiveOpsCreator& collective_ops_creator,
52       int64* next_channel_id, spmd::SpmdLogger* logger,
53       spmd::SpmdPartitionerOptions options) override;
54 
55   Status PreprocessSharding(HloModule* module) override;
56   bool CanSideEffectingHaveReplicatedSharding(
57       const HloInstruction* hlo) override;
58 
59  private:
GetSpmdPartitionerOptions()60   static spmd::SpmdPartitionerOptions GetSpmdPartitionerOptions() {
61     spmd::SpmdPartitionerOptions options;
62     options.allow_module_signature_change = true;
63     return options;
64   }
65 };
66 
67 }  // namespace gpu
68 }  // namespace xla
69 
70 #endif  // TENSORFLOW_COMPILER_XLA_SERVICE_GPU_GPU_SPMD_PARTITIONER_H_
71