• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #ifndef TENSORFLOW_LITE_TOOLS_BENCHMARK_BENCHMARK_TFLITE_MODEL_H_
17 #define TENSORFLOW_LITE_TOOLS_BENCHMARK_BENCHMARK_TFLITE_MODEL_H_
18 
19 #include <algorithm>
20 #include <map>
21 #include <memory>
22 #include <random>
23 #include <string>
24 #include <utility>
25 #include <vector>
26 
27 #include "tensorflow/lite/model.h"
28 #include "tensorflow/lite/profiling/profiler.h"
29 #include "tensorflow/lite/tools/benchmark/benchmark_model.h"
30 
31 namespace tflite {
32 namespace benchmark {
33 
34 // Benchmarks a TFLite model by running tflite interpreter.
35 class BenchmarkTfLiteModel : public BenchmarkModel {
36  public:
37   struct InputLayerInfo {
InputLayerInfoInputLayerInfo38     InputLayerInfo() : has_value_range(false) {}
39 
40     std::string name;
41     std::vector<int> shape;
42 
43     // The input value is randomly generated when benchmarking the NN model.
44     // However, the NN model might require the value be limited to a certain
45     // range [low, high] for this particular input layer. For simplicity,
46     // support integer value first.
47     bool has_value_range;
48     int low;
49     int high;
50 
51     // The input value will be loaded from 'input_file_path' INSTEAD OF being
52     // randomly generated. Note the input file will be opened in binary mode.
53     std::string input_file_path;
54   };
55 
56   explicit BenchmarkTfLiteModel(BenchmarkParams params = DefaultParams());
57   ~BenchmarkTfLiteModel() override;
58 
59   std::vector<Flag> GetFlags() override;
60   void LogParams() override;
61   TfLiteStatus ValidateParams() override;
62   uint64_t ComputeInputBytes() override;
63   TfLiteStatus Init() override;
64   TfLiteStatus RunImpl() override;
65   static BenchmarkParams DefaultParams();
66 
67  protected:
68   TfLiteStatus PrepareInputData() override;
69   TfLiteStatus ResetInputsAndOutputs() override;
70 
71   int64_t MayGetModelFileSize() override;
72 
73   virtual TfLiteStatus LoadModel();
74 
75   // Allow subclasses to create a customized Op resolver during init.
76   virtual std::unique_ptr<tflite::OpResolver> GetOpResolver() const;
77 
78   // Allow subclass to initialize a customized tflite interpereter.
79   virtual TfLiteStatus InitInterpreter();
80 
81   // Create a BenchmarkListener that's specifically for TFLite profiling if
82   // necessary.
83   virtual std::unique_ptr<BenchmarkListener> MayCreateProfilingListener() const;
84 
85   void CleanUp();
86 
87   std::unique_ptr<tflite::FlatBufferModel> model_;
88   std::unique_ptr<tflite::Interpreter> interpreter_;
89   std::unique_ptr<tflite::ExternalCpuBackendContext> external_context_;
90 
91  private:
92   // Implement type erasure with unique_ptr with custom deleter.
93   using VoidUniquePtr = std::unique_ptr<void, void (*)(void*)>;
94 
95   struct InputTensorData {
InputTensorDataInputTensorData96     InputTensorData() : data(nullptr, nullptr) {}
97 
98     VoidUniquePtr data;
99     size_t bytes;
100   };
101 
102   template <typename T, typename Distribution>
CreateInputTensorData(int num_elements,Distribution distribution)103   inline InputTensorData CreateInputTensorData(int num_elements,
104                                                Distribution distribution) {
105     InputTensorData tmp;
106     tmp.bytes = sizeof(T) * num_elements;
107     T* raw = new T[num_elements];
108     std::generate_n(raw, num_elements, [&]() {
109       return static_cast<T>(distribution(random_engine_));
110     });
111     tmp.data = VoidUniquePtr(static_cast<void*>(raw),
112                              [](void* ptr) { delete[] static_cast<T*>(ptr); });
113     return tmp;
114   }
115 
116   InputTensorData CreateRandomTensorData(const TfLiteTensor& t,
117                                          const InputLayerInfo* layer_info);
118 
119   InputTensorData LoadInputTensorData(const TfLiteTensor& t,
120                                       const std::string& input_file_path);
121 
AddOwnedListener(std::unique_ptr<BenchmarkListener> listener)122   void AddOwnedListener(std::unique_ptr<BenchmarkListener> listener) {
123     if (listener == nullptr) return;
124     owned_listeners_.emplace_back(std::move(listener));
125     AddListener(owned_listeners_.back().get());
126   }
127 
128   std::vector<InputLayerInfo> inputs_;
129   std::vector<InputTensorData> inputs_data_;
130   std::vector<std::unique_ptr<BenchmarkListener>> owned_listeners_;
131   std::mt19937 random_engine_;
132   std::vector<Interpreter::TfLiteDelegatePtr> owned_delegates_;
133   // Always TFLITE_LOG the benchmark result.
134   BenchmarkLoggingListener log_output_;
135 };
136 
137 }  // namespace benchmark
138 }  // namespace tflite
139 
140 #endif  // TENSORFLOW_LITE_TOOLS_BENCHMARK_BENCHMARK_TFLITE_MODEL_H_
141