• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #include "tensorflow/core/util/use_cudnn.h"
17 
18 #include "tensorflow/core/lib/core/stringpiece.h"
19 #include "tensorflow/core/lib/strings/str_util.h"
20 #include "tensorflow/core/platform/types.h"
21 #include "tensorflow/core/util/env_var.h"
22 
23 #if GOOGLE_CUDA
24 #include "third_party/gpus/cudnn/cudnn.h"
25 #endif  // GOOGLE_CUDA
26 
27 namespace tensorflow {
28 
29 #define ADD_BOOL_CUDNN_FLAG(func_name, flag_name, default_value)           \
30   bool func_name() {                                                       \
31     bool value = default_value;                                            \
32     Status status = ReadBoolFromEnvVar(#flag_name, default_value, &value); \
33     if (!status.ok()) {                                                    \
34       LOG(ERROR) << status;                                                \
35     }                                                                      \
36     return value;                                                          \
37   }
38 
CudnnUseFrontend()39 bool CudnnUseFrontend() {
40   static bool result = [] {
41     bool value = false;
42 #if GOOGLE_CUDA
43     if (CUDNN_VERSION >= 8100) {
44       // cuDNN 8.1.0 + the frontend has issues regarding fused convolution.
45       Status status = ReadBoolFromEnvVar("TF_CUDNN_USE_FRONTEND",
46                                          CUDNN_VERSION >= 8200, &value);
47       if (!status.ok()) {
48         LOG(ERROR) << status;
49       }
50     }
51 #endif  // GOOGLE_CUDA
52     return value;
53   }();
54   return result;
55 }
56 
57 // Whether to enable Cudnn runtime compiled kernels which are able to support
58 // more general fusion patterns but might increase the warmup time.
59 // TODO(kaixih@nvidia): we can make it default when Cudnn further improves the
60 // runtime compilation overhead.
CudnnUseRuntimeFusion()61 bool CudnnUseRuntimeFusion() {
62   static bool result = [] {
63     bool value = false;
64 #if GOOGLE_CUDA
65     if (CUDNN_VERSION >= 8400) {
66       Status status =
67           ReadBoolFromEnvVar("TF_CUDNN_USE_RUNTIME_FUSION", false, &value);
68       if (!status.ok()) {
69         LOG(ERROR) << status;
70       }
71     }
72 #endif  // GOOGLE_CUDA
73     return value;
74   }();
75   return result;
76 }
77 
78 ADD_BOOL_CUDNN_FLAG(CudnnUseAutotune, TF_CUDNN_USE_AUTOTUNE, true);
79 // Whether to auto-tuning Cudnn RNN forward and backward pass to pick
80 // statistically the best cudnnRNNAlgo_t and cudnnMathType_t.
81 // The flag is disabled when TF_DEBUG_CUDNN_RNN is turned on.
82 ADD_BOOL_CUDNN_FLAG(CudnnRnnUseAutotune, TF_CUDNN_RNN_USE_AUTOTUNE, true);
83 ADD_BOOL_CUDNN_FLAG(CudnnDisableConv1x1Optimization,
84                     TF_CUDNN_DISABLE_CONV_1X1_OPTIMIZATION, false);
85 
86 // Whether to run Cudnn RNN forward and backward in debug mode, where users can
87 // force a specified cudnnRNNAlgo_t and cudnnMathType_t, when used together with
88 // the following two env vars:
89 // TF_DEBUG_CUDNN_RNN_USE_TENSOR_OPS
90 // TF_DEBUG_CUDNN_RNN_ALGO
91 // By default it is disabled and only intended for testing and profiling.
92 ADD_BOOL_CUDNN_FLAG(DebugCudnnRnn, TF_DEBUG_CUDNN_RNN, false);
93 // If using TENSOR_OP_MATH in Cudnn RNN for both forward and backward pass. Only
94 // effective when TF_DEBUG_CUDNN_RNN is true.
95 // Note none of the persistent RNN algorithm support TENSOR_OP_MATH before
96 // Cudnn 7.1. See Nvidia Cudnn manual for more details.
97 ADD_BOOL_CUDNN_FLAG(DebugCudnnRnnUseTensorOps,
98                     TF_DEBUG_CUDNN_RNN_USE_TENSOR_OPS, false);
99 #undef ADD_BOOL_CUDNN_FLAG
100 
101 #define ADD_INT64_CUDNN_FLAG(func_name, flag_name, default_value)           \
102   int64_t func_name() {                                                     \
103     int64_t value = default_value;                                          \
104     Status status = ReadInt64FromEnvVar(#flag_name, default_value, &value); \
105     if (!status.ok()) {                                                     \
106       LOG(ERROR) << status;                                                 \
107     }                                                                       \
108     return value;                                                           \
109   }
110 // Cudnn RNN algorithm to use for both forward and backward pass. Only effective
111 // when TF_DEBUG_CUDNN_RNN is true. See Nvidia Cudnn manual for allowed
112 // cudnnRNNAlgo_t.
113 ADD_INT64_CUDNN_FLAG(DebugCudnnRnnAlgo, TF_DEBUG_CUDNN_RNN_ALGO, -1);
114 #undef ADD_INT64_CUDNN_FLAG
115 
ShouldCudnnGroupedConvolutionBeUsed(const int32_t filter_rows,const int32_t filter_cols,const int32_t in_depth,const int32_t out_depth)116 bool ShouldCudnnGroupedConvolutionBeUsed(const int32_t filter_rows,
117                                          const int32_t filter_cols,
118                                          const int32_t in_depth,
119                                          const int32_t out_depth) {
120   return in_depth == out_depth && filter_rows == filter_cols &&
121          (filter_rows == 1 || filter_rows == 3 || filter_rows == 5 ||
122           filter_rows == 7);
123 }
124 
125 }  // namespace tensorflow
126