• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #ifndef TENSORFLOW_COMPILER_XLA_SERVICE_GPU_LAUNCH_DIMENSIONS_H_
17 #define TENSORFLOW_COMPILER_XLA_SERVICE_GPU_LAUNCH_DIMENSIONS_H_
18 
19 #include <map>
20 #include <memory>
21 #include <string>
22 
23 #include "tensorflow/compiler/xla/service/gpu/gpu_device_info.h"
24 #include "tensorflow/compiler/xla/shape.h"
25 
26 namespace xla {
27 namespace gpu {
28 
29 // Encapsulates the launch dimensions of a kernel, e.g., the block count and the
30 // number of threads per block.
31 class LaunchDimensions {
32  public:
33   struct Dim3D {
34     int64 x, y, z;
35   };
36 
37   // The default constructor creates a launch dimension that indicate
38   // single-threaded execution.
LaunchDimensions()39   LaunchDimensions()
40       : block_counts_({1, 1, 1}), thread_counts_per_block_({1, 1, 1}) {}
41 
LaunchDimensions(int64_t block_x_count,int64_t thread_x_count_per_block)42   LaunchDimensions(int64_t block_x_count, int64_t thread_x_count_per_block)
43       : block_counts_({block_x_count, 1, 1}),
44         thread_counts_per_block_({thread_x_count_per_block, 1, 1}) {}
45 
LaunchDimensions(const Dim3D & block_counts,const Dim3D & thread_counts_per_block)46   LaunchDimensions(const Dim3D& block_counts,
47                    const Dim3D& thread_counts_per_block)
48       : block_counts_(block_counts),
49         thread_counts_per_block_(thread_counts_per_block) {}
50 
block_counts()51   Dim3D block_counts() const { return block_counts_; }
52 
thread_counts_per_block()53   Dim3D thread_counts_per_block() const { return thread_counts_per_block_; }
54 
launch_bound()55   int64 launch_bound() const {
56     return block_counts_.x * thread_counts_per_block_.x * block_counts_.y *
57            thread_counts_per_block_.y * block_counts_.z *
58            thread_counts_per_block_.z;
59   }
60 
ToString()61   std::string ToString() const {
62     return absl::StrCat("blocks: {", block_counts_.x, ", ", block_counts_.y,
63                         ", ", block_counts_.z, "}, threads/block: {",
64                         thread_counts_per_block_.x, ", ",
65                         thread_counts_per_block_.y, ", ",
66                         thread_counts_per_block_.z, "}");
67   }
68 
69  private:
70   Dim3D block_counts_;
71   Dim3D thread_counts_per_block_;
72 };
73 
74 std::ostream& operator<<(std::ostream& out,
75                          const LaunchDimensions& launch_dims);
76 
77 struct LaunchDimensionsConfig {
78   // The kernel implementation will be unrolled if `unroll_factor` is
79   // greater than one.
80   int unroll_factor = 1;
81   // A wave is a group of blocks that execute at the same time on the
82   // GPU. If there are more blocks then the number that can run
83   // concurrently, there are multiple waves of blocks running
84   // sequentially.  If `few_waves` is true, each thread will loop over
85   // a block of unroll_factor elements. Otherwise each thread will
86   // handle only unroll_factor.
87   bool few_waves = false;
88   // If `row_optimized` is true, then the block size will equal to
89   // `hlo.shape().dimensions().back()/unroll_factor`.
90   // Currently few_waves and row_vectorized do not work together.
91   bool row_vectorized = false;
92 
ToStringLaunchDimensionsConfig93   std::string ToString() {
94     return absl::StrCat("unroll_factor=", unroll_factor,
95                         ", few_waves=", few_waves,
96                         ", row_vectorized=", row_vectorized);
97   }
98 };
99 
100 // Returns -1 if the shape doesn't allows the row vectorization code path.
101 // If supported, return the number of threads to use in that case.
102 int64 ThreadsPerBlockRowVectorized(const Shape& shape,
103                                    GpuDeviceInfo gpu_device_info,
104                                    LaunchDimensionsConfig dim_config);
105 
106 // Calculates the launch dimensions used to invoke `hlo`.
107 StatusOr<LaunchDimensions> CalculateLaunchDimensions(
108     const Shape& shape, GpuDeviceInfo gpu_device_info,
109     LaunchDimensionsConfig dim_config = {});
110 
111 }  // namespace gpu
112 }  // namespace xla
113 
114 #endif  // TENSORFLOW_COMPILER_XLA_SERVICE_GPU_LAUNCH_DIMENSIONS_H_
115