1 /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15
16 #include "tensorflow/core/util/util.h"
17
18 #include <string>
19 #include <vector>
20
21 #include "absl/base/call_once.h"
22 #include "tensorflow/core/framework/device_factory.h"
23 #include "tensorflow/core/lib/gtl/inlined_vector.h"
24 #include "tensorflow/core/lib/strings/strcat.h"
25 #include "tensorflow/core/platform/logging.h"
26 #include "tensorflow/core/util/env_var.h"
27
28 namespace tensorflow {
29
NodeNamePrefix(const StringPiece & op_name)30 StringPiece NodeNamePrefix(const StringPiece& op_name) {
31 StringPiece sp(op_name);
32 auto p = sp.find('/');
33 if (p == StringPiece::npos || p == 0) {
34 return "";
35 } else {
36 return StringPiece(sp.data(), p);
37 }
38 }
39
NodeNameFullPrefix(const StringPiece & op_name)40 StringPiece NodeNameFullPrefix(const StringPiece& op_name) {
41 StringPiece sp(op_name);
42 auto p = sp.rfind('/');
43 if (p == StringPiece::npos || p == 0) {
44 return "";
45 } else {
46 return StringPiece(sp.data(), p);
47 }
48 }
49
MovingAverage(int window)50 MovingAverage::MovingAverage(int window)
51 : window_(window),
52 sum_(0.0),
53 data_(new double[window_]),
54 head_(0),
55 count_(0) {
56 CHECK_GE(window, 1);
57 }
58
~MovingAverage()59 MovingAverage::~MovingAverage() { delete[] data_; }
60
Clear()61 void MovingAverage::Clear() {
62 count_ = 0;
63 head_ = 0;
64 sum_ = 0;
65 }
66
GetAverage() const67 double MovingAverage::GetAverage() const {
68 if (count_ == 0) {
69 return 0;
70 } else {
71 return static_cast<double>(sum_) / count_;
72 }
73 }
74
AddValue(double v)75 void MovingAverage::AddValue(double v) {
76 if (count_ < window_) {
77 // This is the warmup phase. We don't have a full window's worth of data.
78 head_ = count_;
79 data_[count_++] = v;
80 } else {
81 if (window_ == ++head_) {
82 head_ = 0;
83 }
84 // Toss the oldest element
85 sum_ -= data_[head_];
86 // Add the newest element
87 data_[head_] = v;
88 }
89 sum_ += v;
90 }
91
92 static char hex_char[] = "0123456789abcdef";
93
PrintMemory(const char * ptr,size_t n)94 string PrintMemory(const char* ptr, size_t n) {
95 string ret;
96 ret.resize(n * 3);
97 for (int i = 0; i < n; ++i) {
98 ret[i * 3] = ' ';
99 ret[i * 3 + 1] = hex_char[ptr[i] >> 4];
100 ret[i * 3 + 2] = hex_char[ptr[i] & 0xf];
101 }
102 return ret;
103 }
104
SliceDebugString(const TensorShape & shape,const int64_t flat)105 string SliceDebugString(const TensorShape& shape, const int64_t flat) {
106 // Special case rank 0 and 1
107 const int dims = shape.dims();
108 if (dims == 0) return "";
109 if (dims == 1) return strings::StrCat("[", flat, "]");
110
111 // Compute strides
112 gtl::InlinedVector<int64, 32> strides(dims);
113 strides.back() = 1;
114 for (int i = dims - 2; i >= 0; i--) {
115 strides[i] = strides[i + 1] * shape.dim_size(i + 1);
116 }
117
118 // Unflatten index
119 int64_t left = flat;
120 string result;
121 for (int i = 0; i < dims; i++) {
122 strings::StrAppend(&result, i ? "," : "[", left / strides[i]);
123 left %= strides[i];
124 }
125 strings::StrAppend(&result, "]");
126 return result;
127 }
128
IsMKLEnabled()129 bool IsMKLEnabled() {
130 #ifndef INTEL_MKL
131 return false;
132 #endif // !INTEL_MKL
133 static absl::once_flag once;
134 #ifdef ENABLE_MKL
135 // Keeping TF_DISABLE_MKL env variable for legacy reasons.
136 static bool oneDNN_disabled = false;
137 absl::call_once(once, [&] {
138 TF_CHECK_OK(ReadBoolFromEnvVar("TF_DISABLE_MKL", false, &oneDNN_disabled));
139 if (oneDNN_disabled) VLOG(2) << "TF-MKL: Disabling oneDNN";
140 });
141 return (!oneDNN_disabled);
142 #else
143 static bool oneDNN_enabled = false;
144 absl::call_once(once, [&] {
145 TF_CHECK_OK(
146 ReadBoolFromEnvVar("TF_ENABLE_ONEDNN_OPTS", false, &oneDNN_enabled));
147 if (oneDNN_enabled) {
148 // Warn that this is not tested with GPU if there are GPUs available.
149 std::vector<std::string> devices;
150 Status s = DeviceFactory::ListAllPhysicalDevices(&devices);
151 std::string gpu_message = "";
152 for (const auto& device : devices) {
153 if (device.find("GPU") != std::string::npos) {
154 gpu_message =
155 "We do NOT recommend turning them on with GPUs in the system. ";
156 break;
157 }
158 }
159 LOG(INFO) << "Experimental oneDNN custom operations are on. "
160 << gpu_message
161 << "To turn them off, set the environment variable "
162 "`TF_ENABLE_ONEDNN_OPTS=0`.";
163 }
164 });
165 return oneDNN_enabled;
166 #endif // ENABLE_MKL
167 }
168
169 } // namespace tensorflow
170