1 /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 #include "tensorflow/lite/testing/kernel_test/input_generator.h"
16
17 #include <fstream>
18 #include <limits>
19 #include <random>
20
21 #include "tensorflow/lite/c/common.h"
22 #include "tensorflow/lite/kernels/register.h"
23 #include "tensorflow/lite/testing/join.h"
24 #include "tensorflow/lite/testing/split.h"
25
26 namespace tflite {
27 namespace testing {
28
29 namespace {
30
31 template <typename T>
GenerateRandomTensor(TfLiteIntArray * dims,const std::function<T (int)> & random_func)32 std::vector<T> GenerateRandomTensor(TfLiteIntArray* dims,
33 const std::function<T(int)>& random_func) {
34 int64_t num_elements = 1;
35 for (int i = 0; i < dims->size; i++) {
36 num_elements *= dims->data[i];
37 }
38
39 std::vector<T> result(num_elements);
40 for (int i = 0; i < num_elements; i++) {
41 result[i] = random_func(i);
42 }
43 return result;
44 }
45
46 template <typename T>
GenerateUniform(TfLiteIntArray * dims,float min,float max)47 std::vector<T> GenerateUniform(TfLiteIntArray* dims, float min, float max) {
48 auto random_float = [](float min, float max) {
49 // TODO(yunluli): Change seed for each invocation if needed.
50 // Used rand() instead of rand_r() here to make it runnable on android.
51 return min + (max - min) * static_cast<float>(rand()) / RAND_MAX;
52 };
53
54 std::function<T(int)> random_t = [&](int) {
55 return static_cast<T>(random_float(min, max));
56 };
57 std::vector<T> data = GenerateRandomTensor(dims, random_t);
58 return data;
59 }
60
61 template <typename T>
GenerateGaussian(TfLiteIntArray * dims,float min,float max)62 std::vector<T> GenerateGaussian(TfLiteIntArray* dims, float min, float max) {
63 auto random_float = [](float min, float max) {
64 static std::default_random_engine generator;
65 // We generate a float number within [0, 1) following a mormal distribution
66 // with mean = 0.5 and stddev = 1/3, and use it to scale the final random
67 // number into the desired range.
68 static std::normal_distribution<double> distribution(0.5, 1.0 / 3);
69 auto rand_n = distribution(generator);
70 while (rand_n < 0 || rand_n >= 1) {
71 rand_n = distribution(generator);
72 }
73
74 return min + (max - min) * static_cast<float>(rand_n);
75 };
76
77 std::function<T(int)> random_t = [&](int) {
78 return static_cast<T>(random_float(min, max));
79 };
80 std::vector<T> data = GenerateRandomTensor(dims, random_t);
81 return data;
82 }
83
84 } // namespace
85
LoadModel(const string & model_dir)86 TfLiteStatus InputGenerator::LoadModel(const string& model_dir) {
87 model_ = FlatBufferModel::BuildFromFile(model_dir.c_str());
88 if (!model_) {
89 fprintf(stderr, "Cannot load model %s", model_dir.c_str());
90 return kTfLiteError;
91 }
92
93 ::tflite::ops::builtin::BuiltinOpResolver builtin_ops;
94 InterpreterBuilder(*model_, builtin_ops)(&interpreter_);
95 if (!interpreter_) {
96 fprintf(stderr, "Failed to build interpreter.");
97 return kTfLiteError;
98 }
99
100 return kTfLiteOk;
101 }
102
ReadInputsFromFile(const string & filename)103 TfLiteStatus InputGenerator::ReadInputsFromFile(const string& filename) {
104 if (filename.empty()) {
105 fprintf(stderr, "Empty input file name.");
106 return kTfLiteError;
107 }
108
109 std::ifstream input_file(filename);
110 string input;
111 while (std::getline(input_file, input, '\n')) {
112 inputs_.push_back(input);
113 }
114 input_file.close();
115 return kTfLiteOk;
116 }
117
WriteInputsToFile(const string & filename)118 TfLiteStatus InputGenerator::WriteInputsToFile(const string& filename) {
119 if (filename.empty()) {
120 fprintf(stderr, "Empty input file name.");
121 return kTfLiteError;
122 }
123
124 std::ofstream output_file;
125 output_file.open(filename, std::fstream::out | std::fstream::trunc);
126 if (!output_file) {
127 fprintf(stderr, "Failed to open output file %s.", filename.c_str());
128 return kTfLiteError;
129 }
130
131 for (const auto& input : inputs_) {
132 output_file << input << "\n";
133 }
134 output_file.close();
135
136 return kTfLiteOk;
137 }
138
139 // TODO(yunluli): Support more tensor types when needed.
GenerateInput(const string & distribution)140 TfLiteStatus InputGenerator::GenerateInput(const string& distribution) {
141 auto input_tensor_ids = interpreter_->inputs();
142 for (auto id : input_tensor_ids) {
143 auto* tensor = interpreter_->tensor(id);
144 if (distribution == "UNIFORM") {
145 switch (tensor->type) {
146 case kTfLiteInt8: {
147 auto data = GenerateUniform<int8_t>(
148 tensor->dims, std::numeric_limits<int8_t>::min(),
149 std::numeric_limits<int8_t>::max());
150 inputs_.push_back(Join(data.data(), data.size(), ","));
151 break;
152 }
153 case kTfLiteUInt8: {
154 auto data = GenerateUniform<uint8_t>(
155 tensor->dims, std::numeric_limits<uint8_t>::min(),
156 std::numeric_limits<uint8_t>::max());
157 inputs_.push_back(Join(data.data(), data.size(), ","));
158 break;
159 }
160 case kTfLiteFloat32: {
161 auto data = GenerateUniform<float>(tensor->dims, -1, 1);
162 inputs_.push_back(JoinDefault(data.data(), data.size(), ","));
163 break;
164 }
165 default:
166 fprintf(stderr, "Unsupported input tensor type %s.",
167 TfLiteTypeGetName(tensor->type));
168 break;
169 }
170 } else if (distribution == "GAUSSIAN") {
171 switch (tensor->type) {
172 case kTfLiteInt8: {
173 auto data = GenerateGaussian<int8_t>(
174 tensor->dims, std::numeric_limits<int8_t>::min(),
175 std::numeric_limits<int8_t>::max());
176 inputs_.push_back(Join(data.data(), data.size(), ","));
177 break;
178 }
179 case kTfLiteUInt8: {
180 auto data = GenerateGaussian<uint8_t>(
181 tensor->dims, std::numeric_limits<uint8_t>::min(),
182 std::numeric_limits<uint8_t>::max());
183 inputs_.push_back(Join(data.data(), data.size(), ","));
184 break;
185 }
186 case kTfLiteFloat32: {
187 auto data = GenerateGaussian<float>(tensor->dims, -1, 1);
188 inputs_.push_back(JoinDefault(data.data(), data.size(), ","));
189 break;
190 }
191 default:
192 fprintf(stderr, "Unsupported input tensor type %s.",
193 TfLiteTypeGetName(tensor->type));
194 break;
195 }
196 } else {
197 fprintf(stderr, "Unsupported distribution %s.", distribution.c_str());
198 return kTfLiteError;
199 }
200 }
201
202 return kTfLiteOk;
203 }
204
GetInputs()205 std::vector<string> InputGenerator::GetInputs() { return inputs_; }
206
207 } // namespace testing
208 } // namespace tflite
209