• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #ifndef TENSORFLOW_COMPILER_XLA_SERVICE_GPU_STREAM_EXECUTOR_UTIL_H_
17 #define TENSORFLOW_COMPILER_XLA_SERVICE_GPU_STREAM_EXECUTOR_UTIL_H_
18 
19 #include "absl/strings/string_view.h"
20 #include "absl/types/span.h"
21 #include "tensorflow/compiler/xla/layout.h"
22 #include "tensorflow/compiler/xla/service/gpu/ir_emission_utils.h"
23 #include "tensorflow/compiler/xla/service/gpu/launch_dimensions.h"
24 #include "tensorflow/compiler/xla/service/hlo_module_config.h"
25 #include "tensorflow/compiler/xla/statusor.h"
26 #include "tensorflow/compiler/xla/types.h"
27 #include "tensorflow/compiler/xla/xla_data.pb.h"
28 #include "tensorflow/core/platform/stream_executor_no_cuda.h"
29 #include "tensorflow/core/protobuf/autotuning.pb.h"
30 #include "tensorflow/stream_executor/gpu/gpu_asm_opts.h"
31 #include "tensorflow/stream_executor/kernel_spec.h"
32 
33 // Helper functions for interacting with StreamExecutor.
34 
35 namespace xla {
36 namespace gpu {
37 
38 // Returns (input, filter, output) XLA Layout protos given the StreamExecutor
39 // layouts.
40 StatusOr<std::tuple<Layout, Layout, Layout>>
41 StreamExecutorConvLayoutsToXlaLayouts(const ConvolutionDimensionNumbers& dnums,
42                                       se::dnn::DataLayout input,
43                                       se::dnn::FilterLayout filter,
44                                       se::dnn::DataLayout output);
45 
46 // Returns (input, filter, output) StreamExecutor layouts given the XLA layouts.
47 StatusOr<
48     std::tuple<se::dnn::DataLayout, se::dnn::FilterLayout, se::dnn::DataLayout>>
49 XlaConvShapesToStreamExecutorLayouts(const ConvolutionDimensionNumbers& dnums,
50                                      const Shape& input, const Shape& filter,
51                                      const Shape& output);
52 
53 // Finds the VECT_C dimension in input/filter/output, if present.
54 //
55 // A cudnn convolution may have layout NCHW_VECT_C, which means instead of
56 // [N,C,H,W], the layout is [N,C/k,H,W,k] for some k (usually 4 or 32).
57 //
58 // ConvolutionDimensionNumbers doesn't explicitly store which is the `k`
59 // dimension, because only cudnn convolutions have this feature; it's not
60 // applicable elsewhere.  We find it by finding a dimension in the
61 // input/filter/output shape that is *not* in dnums.
62 std::tuple<absl::optional<int64>, absl::optional<int64>, absl::optional<int64>>
63 FindVectorizedFeatureDims(const ConvolutionDimensionNumbers& dnums,
64                           const Shape& input, const Shape& filter,
65                           const Shape& output);
66 
67 // Generates and returns a unique lock per each provided executor.
68 // Guarantees that blocks of code both holding a lock for the same provided
69 // executor (as given by this function) will not be running concurrently.
70 //
71 // This is used to prevent other XLA instances from trying to autotune on a
72 // device while another thread is using it.
73 tensorflow::mutex_lock LockGpu(const se::StreamExecutor* stream_exec);
74 
75 // Creates a kernel with a provided name, based from provided PTX in ptx.
76 // The kernel should be executed using the provided executor.
77 // The argument cubin_data represents compiled PTX and may be left empty.
78 //
79 // The canonical storage for both ptx and cubin_data should outlive
80 // the lifetime of the kernel.
81 StatusOr<std::unique_ptr<se::KernelBase>> CreateKernel(
82     absl::string_view kernel_name, uint64 num_args, absl::string_view ptx,
83     absl::Span<const uint8> cubin_data, se::StreamExecutor* stream_exec);
84 
85 // Runs loaded kernel on the stream with the provided arguments.
86 Status ExecuteKernelOnStream(const se::KernelBase& kernel,
87                              absl::Span<const se::DeviceMemoryBase> args,
88                              const LaunchDimensions& dims, se::Stream* stream);
89 
90 // Create GpuAsmOpts out of HloModuleConfig.
91 se::GpuAsmOpts PtxOptsFromConfig(const HloModuleConfig& hlo_module_config);
92 
93 // Initializes `buffer` with random data on `stream`.
94 // `rng_state` is an inout parameter for the pseudorandom generator state.
95 // `buffer_type` determines what buffer would be filled out with.
96 //
97 // Precondition: `buffer_type` is a floating point type, `rng_state` needs to be
98 // initialized to zero on the first use.
99 void InitializeBuffer(se::Stream* stream, PrimitiveType buffer_type,
100                       int64* rng_state, se::DeviceMemoryBase buffer);
101 
102 StatusOr<se::dnn::ConvolutionKind> GetDNNConvKindFromCudnnConvKind(
103     CudnnConvKind kind);
104 StatusOr<se::dnn::DataType> GetDNNDataTypeFromPrimitiveType(PrimitiveType type);
105 
106 // Returns result with the smallest time which has not failed.
107 // If deterministic output is requested, returns first (not failing) result.
108 StatusOr<tensorflow::AutotuneResult> PickBestResult(
109     absl::Span<tensorflow::AutotuneResult const> profile_results,
110     const HloInstruction& instr);
111 
112 // Returns whether determinism is required.
113 bool RequireDeterminism(const HloModuleConfig& config);
114 
115 }  // namespace gpu
116 }  // namespace xla
117 
118 #endif  // TENSORFLOW_COMPILER_XLA_SERVICE_GPU_STREAM_EXECUTOR_UTIL_H_
119