• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #include "tensorflow/compiler/xla/service/gpu/partition_assignment.h"
17 
18 #include <ostream>
19 #include <string>
20 
21 #include "absl/memory/memory.h"
22 #include "absl/strings/str_format.h"
23 #include "tensorflow/compiler/xla/map_util.h"
24 #include "tensorflow/compiler/xla/service/hlo_computation.h"
25 #include "tensorflow/compiler/xla/service/hlo_opcode.h"
26 #include "tensorflow/compiler/xla/shape_util.h"
27 #include "tensorflow/compiler/xla/types.h"
28 #include "tensorflow/compiler/xla/util.h"
29 #include "tensorflow/core/lib/core/bits.h"
30 #include "tensorflow/core/platform/logging.h"
31 
32 namespace xla {
33 namespace gpu {
34 
operator <<(std::ostream & out,const LaunchDimensions & launch_dims)35 std::ostream& operator<<(std::ostream& out,
36                          const LaunchDimensions& launch_dims) {
37   out << absl::StrFormat("[block: %d, thread: %d]", launch_dims.block_count(),
38                          launch_dims.threads_per_block());
39   return out;
40 }
41 
ThreadsPerBlockLimit(const se::DeviceDescription & device_desc)42 int64 ThreadsPerBlockLimit(const se::DeviceDescription& device_desc) {
43   int64 threads_per_block = device_desc.threads_per_block_limit();
44   if (threads_per_block <= 0) {
45     static std::atomic<int64> log_count{0};
46     if (log_count.fetch_add(1) < 8) {
47       LOG(WARNING) << "Attempting to calculate launch dimensions for GPU "
48                       "without full information about its capabilities.  "
49                       "StreamExecutor's PopulateDeviceDescription should be "
50                       "updated for this device.";
51     }
52     threads_per_block = device_desc.threads_per_warp();
53     if (threads_per_block == 0) {
54       // Fall back to *something* if we can't even get num threads per warp.
55       threads_per_block = 32;
56     }
57   }
58   return threads_per_block;
59 }
60 
61 // Calculates the launch dimensions used to invoke `hlo`.
CalculateLaunchDimensions(const Shape & shape,const se::DeviceDescription & device_desc,int unroll_factor)62 LaunchDimensions CalculateLaunchDimensions(
63     const Shape& shape, const se::DeviceDescription& device_desc,
64     int unroll_factor) {
65   int64 num_elements = ShapeUtil::ElementsIn(shape);
66   if (num_elements <= 1) {
67     return LaunchDimensions();
68   }
69 
70   CHECK_EQ(num_elements % unroll_factor, 0);
71   num_elements = num_elements / unroll_factor;
72 
73   // Since we don't do any inter-warp communication, we're free to choose any
74   // block size we want, subject to hardware constraints.  We choose the largest
75   // block size allowed, as empirically, this is a performance win on almost
76   // (but not all) benchmarks.
77   //
78   // My guess is that using a larger block size encourages ptxas to decrease
79   // per-thread register usage, thus allowing for higher occupancy, but I
80   // haven't verified this.
81   //
82   // TODO(jlebar): Investigate this further, and tune this heuristic so we can
83   // run faster on the few benchmarks where smaller block size helps.
84   int64 threads_per_block = ThreadsPerBlockLimit(device_desc);
85   if (num_elements < threads_per_block) {
86     threads_per_block = num_elements;
87     VLOG(2) << "Update # of threads per block to the element count ("
88             << threads_per_block << ") because the latter is smaller.";
89   }
90 
91   int64 block_count = CeilOfRatio(num_elements, threads_per_block);
92   VLOG(2) << absl::StrFormat(
93       "Initialized the block count to ceil(# of elements / threads per "
94       "block) = ceil(%d/%d) = %d",
95       num_elements, threads_per_block, block_count);
96 
97   return LaunchDimensions(block_count, threads_per_block);
98 }
99 
100 }  // namespace gpu
101 }  // namespace xla
102