1 /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15
16 #include "tensorflow/core/util/use_cudnn.h"
17
18 #include "tensorflow/core/lib/core/stringpiece.h"
19 #include "tensorflow/core/lib/strings/str_util.h"
20 #include "tensorflow/core/platform/types.h"
21 #include "tensorflow/core/util/env_var.h"
22
23 #if GOOGLE_CUDA
24 #include "third_party/gpus/cudnn/cudnn.h"
25 #endif // GOOGLE_CUDA
26
27 namespace tensorflow {
28
29 #define ADD_BOOL_CUDNN_FLAG(func_name, flag_name, default_value) \
30 bool func_name() { \
31 bool value = default_value; \
32 Status status = ReadBoolFromEnvVar(#flag_name, default_value, &value); \
33 if (!status.ok()) { \
34 LOG(ERROR) << status; \
35 } \
36 return value; \
37 }
38
CudnnUseFrontend()39 bool CudnnUseFrontend() {
40 static bool result = [] {
41 bool value = false;
42 #if GOOGLE_CUDA
43 if (CUDNN_VERSION >= 8100) {
44 // cuDNN 8.1.0 + the frontend has issues regarding fused convolution.
45 Status status = ReadBoolFromEnvVar("TF_CUDNN_USE_FRONTEND",
46 CUDNN_VERSION >= 8200, &value);
47 if (!status.ok()) {
48 LOG(ERROR) << status;
49 }
50 }
51 #endif // GOOGLE_CUDA
52 return value;
53 }();
54 return result;
55 }
56
57 ADD_BOOL_CUDNN_FLAG(CudnnUseAutotune, TF_CUDNN_USE_AUTOTUNE, true);
58 // Whether to auto-tuning Cudnn RNN forward and backward pass to pick
59 // statistically the best cudnnRNNAlgo_t and cudnnMathType_t.
60 // The flag is disabled when TF_DEBUG_CUDNN_RNN is turned on.
61 ADD_BOOL_CUDNN_FLAG(CudnnRnnUseAutotune, TF_CUDNN_RNN_USE_AUTOTUNE, true);
62 ADD_BOOL_CUDNN_FLAG(CudnnDisableConv1x1Optimization,
63 TF_CUDNN_DISABLE_CONV_1X1_OPTIMIZATION, false);
64
65 // Whether to run Cudnn RNN forward and backward in debug mode, where users can
66 // force a specified cudnnRNNAlgo_t and cudnnMathType_t, when used together with
67 // the following two env vars:
68 // TF_DEBUG_CUDNN_RNN_USE_TENSOR_OPS
69 // TF_DEBUG_CUDNN_RNN_ALGO
70 // By default it is disabled and only intended for testing and profiling.
71 ADD_BOOL_CUDNN_FLAG(DebugCudnnRnn, TF_DEBUG_CUDNN_RNN, false);
72 // If using TENSOR_OP_MATH in Cudnn RNN for both forward and backward pass. Only
73 // effective when TF_DEBUG_CUDNN_RNN is true.
74 // Note none of the persistent RNN algorithm support TENSOR_OP_MATH before
75 // Cudnn 7.1. See Nvidia Cudnn manual for more details.
76 ADD_BOOL_CUDNN_FLAG(DebugCudnnRnnUseTensorOps,
77 TF_DEBUG_CUDNN_RNN_USE_TENSOR_OPS, false);
78 #undef ADD_BOOL_CUDNN_FLAG
79
80 #define ADD_INT64_CUDNN_FLAG(func_name, flag_name, default_value) \
81 int64 func_name() { \
82 int64 value = default_value; \
83 Status status = ReadInt64FromEnvVar(#flag_name, default_value, &value); \
84 if (!status.ok()) { \
85 LOG(ERROR) << status; \
86 } \
87 return value; \
88 }
89 // Cudnn RNN algorithm to use for both forward and backward pass. Only effective
90 // when TF_DEBUG_CUDNN_RNN is true. See Nvidia Cudnn manual for allowed
91 // cudnnRNNAlgo_t.
92 ADD_INT64_CUDNN_FLAG(DebugCudnnRnnAlgo, TF_DEBUG_CUDNN_RNN_ALGO, -1);
93 #undef ADD_INT64_CUDNN_FLAG
94
IsCudnnSupportedFilterSize(const int32_t filter_rows,const int32_t filter_cols,const int32_t in_depth,const int32_t out_depth)95 bool IsCudnnSupportedFilterSize(const int32_t filter_rows,
96 const int32_t filter_cols,
97 const int32_t in_depth,
98 const int32_t out_depth) {
99 return in_depth == out_depth && filter_rows == filter_cols &&
100 (filter_rows == 1 || filter_rows == 3 || filter_rows == 5 ||
101 filter_rows == 7);
102 }
103
104 } // namespace tensorflow
105