• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2016 The TensorFlow Authors All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 // TFProf representation of a Tensor's value.
17 // 1. Multi-dimension tensor is flattened in row major, and stored in proto.
18 // 2. integer are up-casted to int64. floats are up-casted to double. string
19 //    is not supported by TensorFlow CheckPointReader library, though it is
20 //    supported in current code.
21 
22 #ifndef TENSORFLOW_CORE_PROFILER_INTERNAL_TFPROF_TENSOR_H_
23 #define TENSORFLOW_CORE_PROFILER_INTERNAL_TFPROF_TENSOR_H_
24 
25 #include <typeinfo>
26 
27 #include "tensorflow/core/framework/tensor.h"
28 #include "tensorflow/core/lib/strings/numbers.h"
29 #include "tensorflow/core/lib/strings/strcat.h"
30 #include "tensorflow/core/lib/strings/stringprintf.h"
31 #include "tensorflow/core/profiler/tfprof_output.pb.h"
32 
33 namespace tensorflow {
34 namespace tfprof {
35 
36 class TFProfTensor {
37  public:
TFProfTensor(std::unique_ptr<Tensor> tensor)38   explicit TFProfTensor(std::unique_ptr<Tensor> tensor)
39       : tensor_(std::move(tensor)) {
40     Build();
41   }
42 
43   // If pointers are provided, they are filled by the method.
44   void Display(string* formatted_str, TFProfTensorProto* tfprof_tensor_pb);
45 
46  private:
47   // Max length of tensor value displayed to CLI.
48   const int64 kTFProfTenosrMaxDisplayLen = 10000;
49   // Max length after which a latency warning will be printed.
50   const int64 kTFProfTensorMaxWarnLen = 100000;
51 
52   void Build();
53 
54   template <typename T>
AddValue(const T & value,TFProfTensorProto * dim)55   bool AddValue(const T& value, TFProfTensorProto* dim) {
56     std::ostringstream sstream;
57     sstream << value;
58     if (typeid(value) == typeid(double)) {
59       double double_val;
60       CHECK(strings::safe_strtod(sstream.str().c_str(), &double_val));
61       dim->add_value_double(double_val);
62       formatted_str_ += strings::Printf(
63           "%.2f ", dim->value_double(dim->value_double_size() - 1));
64     } else if (typeid(value) == typeid(int64)) {
65       int64 int64_val;
66       CHECK(strings::safe_strto64(sstream.str().c_str(), &int64_val));
67       dim->add_value_int64(int64_val);
68       formatted_str_ += strings::Printf(
69           "%lld ",
70           static_cast<int64>(dim->value_int64(dim->value_int64_size() - 1)));
71     } else if (typeid(value) == typeid(string)) {
72       dim->add_value_str(sstream.str());
73       formatted_str_ =
74           strings::StrCat(formatted_str_, "'",
75                           dim->value_str(dim->value_str_size() - 1) + "' ");
76     } else {
77       CHECK(false) << "Unsupported type: " << typeid(value).name();
78     }
79   }
80 
81   // It assumes the flatten values are stored in row-major, which is mentioned
82   // indirectly at various places:
83   // TODO(xpan): Further verifying it.
84   template <typename T>
BuildOutput(int64 start,int depth,const std::vector<T> & values,TFProfTensorProto * dim)85   int64 BuildOutput(int64 start, int depth, const std::vector<T>& values,
86                     TFProfTensorProto* dim) {
87     formatted_str_ += "[";
88     int64 nstart = start;
89     if (tensor_->dims() == 0 && values.size() == 1) {
90       std::ostringstream sstream;
91       sstream << values[nstart];
92 
93       if (typeid(values[nstart]) == typeid(double)) {
94         double double_val;
95         CHECK(strings::safe_strtod(sstream.str().c_str(), &double_val));
96         dim->add_value_double(double_val);
97         formatted_str_ += strings::Printf(
98             "%.2f ", dim->value_double(dim->value_double_size() - 1));
99       } else if (typeid(values[nstart]) == typeid(int64)) {
100         int64 int64_val;
101         CHECK(strings::safe_strto64(sstream.str().c_str(), &int64_val));
102         dim->add_value_int64(int64_val);
103         formatted_str_ += strings::Printf(
104             "%lld ",
105             static_cast<int64>(dim->value_int64(dim->value_int64_size() - 1)));
106       } else if (typeid(values[nstart]) == typeid(string)) {
107         dim->add_value_str(sstream.str());
108         formatted_str_ =
109             strings::StrCat(formatted_str_, "'",
110                             dim->value_str(dim->value_str_size() - 1) + "' ");
111       } else {
112         CHECK(false) << "Unsupported type: " << typeid(values[nstart]).name();
113       }
114     } else {
115       for (int i = 0; i < tensor_->dim_size(depth); i++) {
116         // Last dimension, pull the values.
117         if (depth == tensor_->dims() - 1) {
118           std::ostringstream sstream;
119           sstream << values[nstart];
120 
121           if (typeid(values[nstart]) == typeid(double)) {
122             double double_val;
123             CHECK(strings::safe_strtod(sstream.str().c_str(), &double_val));
124             dim->add_value_double(double_val);
125             formatted_str_ += strings::Printf(
126                 "%.2f ", dim->value_double(dim->value_double_size() - 1));
127           } else if (typeid(values[nstart]) == typeid(int64)) {
128             int64 int64_val;
129             CHECK(strings::safe_strto64(sstream.str().c_str(), &int64_val));
130             dim->add_value_int64(int64_val);
131             formatted_str_ += strings::Printf(
132                 "%lld ", static_cast<int64>(
133                              dim->value_int64(dim->value_int64_size() - 1)));
134           } else if (typeid(values[nstart]) == typeid(string)) {
135             dim->add_value_str(sstream.str());
136             formatted_str_ = strings::StrCat(
137                 formatted_str_, "'",
138                 dim->value_str(dim->value_str_size() - 1) + "' ");
139           } else {
140             CHECK(false) << "Unsupported type: "
141                          << typeid(values[nstart]).name();
142           }
143           ++nstart;
144         } else {
145           // Not-last dimension. Drill deeper.
146           nstart = BuildOutput<T>(nstart, depth + 1, values, dim);
147         }
148       }
149     }
150     if (formatted_str_.length() > kTFProfTenosrMaxDisplayLen) {
151       formatted_str_ = formatted_str_.substr(0, kTFProfTenosrMaxDisplayLen);
152     }
153     formatted_str_ += "],\n";
154     return nstart;
155   }
156 
157   template <typename T, typename U>
GetValueVec(std::vector<U> * value_vec)158   void GetValueVec(std::vector<U>* value_vec) {
159     // TODO(xpan): Address the huge tensor problem.
160     if (tensor_->NumElements() > kTFProfTensorMaxWarnLen) {
161       fprintf(stderr, "Showing huge tensor, the tool might halt...\n");
162     }
163     auto values = tensor_->flat<T>();
164     for (int64 i = 0; i < tensor_->NumElements(); i++) {
165       value_vec->push_back(static_cast<U>(values(i)));
166     }
167   }
168 
169   TFProfTensorProto tfprof_tensor_pb_;
170   std::unique_ptr<Tensor> tensor_;
171   string formatted_str_;
172 };
173 }  // namespace tfprof
174 }  // namespace tensorflow
175 
176 #endif  // TENSORFLOW_CORE_PROFILER_INTERNAL_TFPROF_TENSOR_H_
177