1 /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15
16 #include "tensorflow/lite/c/c_api.h"
17
18 #include <stdarg.h>
19 #include <stdint.h>
20
21 #include <array>
22 #include <fstream>
23 #include <vector>
24
25 #include <gtest/gtest.h>
26 #include "tensorflow/lite/c/common.h"
27 #include "tensorflow/lite/testing/util.h"
28
29 namespace {
30
TEST(CAPI,Version)31 TEST(CAPI, Version) { EXPECT_STRNE("", TfLiteVersion()); }
32
TEST(CApiSimple,Smoke)33 TEST(CApiSimple, Smoke) {
34 TfLiteModel* model =
35 TfLiteModelCreateFromFile("tensorflow/lite/testdata/add.bin");
36 ASSERT_NE(model, nullptr);
37
38 TfLiteInterpreterOptions* options = TfLiteInterpreterOptionsCreate();
39 ASSERT_NE(options, nullptr);
40 TfLiteInterpreterOptionsSetNumThreads(options, 2);
41
42 TfLiteInterpreter* interpreter = TfLiteInterpreterCreate(model, options);
43 ASSERT_NE(interpreter, nullptr);
44
45 // The options/model can be deleted immediately after interpreter creation.
46 TfLiteInterpreterOptionsDelete(options);
47 TfLiteModelDelete(model);
48
49 ASSERT_EQ(TfLiteInterpreterAllocateTensors(interpreter), kTfLiteOk);
50 ASSERT_EQ(TfLiteInterpreterGetInputTensorCount(interpreter), 1);
51 ASSERT_EQ(TfLiteInterpreterGetOutputTensorCount(interpreter), 1);
52
53 std::array<int, 1> input_dims = {2};
54 ASSERT_EQ(TfLiteInterpreterResizeInputTensor(
55 interpreter, 0, input_dims.data(), input_dims.size()),
56 kTfLiteOk);
57 ASSERT_EQ(TfLiteInterpreterAllocateTensors(interpreter), kTfLiteOk);
58
59 TfLiteTensor* input_tensor = TfLiteInterpreterGetInputTensor(interpreter, 0);
60 ASSERT_NE(input_tensor, nullptr);
61 EXPECT_EQ(TfLiteTensorType(input_tensor), kTfLiteFloat32);
62 EXPECT_EQ(TfLiteTensorNumDims(input_tensor), 1);
63 EXPECT_EQ(TfLiteTensorDim(input_tensor, 0), 2);
64 EXPECT_EQ(TfLiteTensorByteSize(input_tensor), sizeof(float) * 2);
65 EXPECT_NE(TfLiteTensorData(input_tensor), nullptr);
66 EXPECT_STREQ(TfLiteTensorName(input_tensor), "input");
67
68 TfLiteQuantizationParams input_params =
69 TfLiteTensorQuantizationParams(input_tensor);
70 EXPECT_EQ(input_params.scale, 0.f);
71 EXPECT_EQ(input_params.zero_point, 0);
72
73 std::array<float, 2> input = {1.f, 3.f};
74 ASSERT_EQ(TfLiteTensorCopyFromBuffer(input_tensor, input.data(),
75 input.size() * sizeof(float)),
76 kTfLiteOk);
77
78 ASSERT_EQ(TfLiteInterpreterInvoke(interpreter), kTfLiteOk);
79
80 const TfLiteTensor* output_tensor =
81 TfLiteInterpreterGetOutputTensor(interpreter, 0);
82 ASSERT_NE(output_tensor, nullptr);
83 EXPECT_EQ(TfLiteTensorType(output_tensor), kTfLiteFloat32);
84 EXPECT_EQ(TfLiteTensorNumDims(output_tensor), 1);
85 EXPECT_EQ(TfLiteTensorDim(output_tensor, 0), 2);
86 EXPECT_EQ(TfLiteTensorByteSize(output_tensor), sizeof(float) * 2);
87 EXPECT_NE(TfLiteTensorData(output_tensor), nullptr);
88 EXPECT_STREQ(TfLiteTensorName(output_tensor), "output");
89
90 TfLiteQuantizationParams output_params =
91 TfLiteTensorQuantizationParams(output_tensor);
92 EXPECT_EQ(output_params.scale, 0.f);
93 EXPECT_EQ(output_params.zero_point, 0);
94
95 std::array<float, 2> output;
96 ASSERT_EQ(TfLiteTensorCopyToBuffer(output_tensor, output.data(),
97 output.size() * sizeof(float)),
98 kTfLiteOk);
99 EXPECT_EQ(output[0], 3.f);
100 EXPECT_EQ(output[1], 9.f);
101
102 TfLiteInterpreterDelete(interpreter);
103 }
104
TEST(CApiSimple,QuantizationParams)105 TEST(CApiSimple, QuantizationParams) {
106 TfLiteModel* model = TfLiteModelCreateFromFile(
107 "tensorflow/lite/testdata/add_quantized.bin");
108 ASSERT_NE(model, nullptr);
109
110 TfLiteInterpreter* interpreter = TfLiteInterpreterCreate(model, nullptr);
111 ASSERT_NE(interpreter, nullptr);
112
113 TfLiteModelDelete(model);
114
115 const std::array<int, 1> input_dims = {2};
116 ASSERT_EQ(TfLiteInterpreterResizeInputTensor(
117 interpreter, 0, input_dims.data(), input_dims.size()),
118 kTfLiteOk);
119 ASSERT_EQ(TfLiteInterpreterAllocateTensors(interpreter), kTfLiteOk);
120
121 TfLiteTensor* input_tensor = TfLiteInterpreterGetInputTensor(interpreter, 0);
122 ASSERT_NE(input_tensor, nullptr);
123 EXPECT_EQ(TfLiteTensorType(input_tensor), kTfLiteUInt8);
124 EXPECT_EQ(TfLiteTensorNumDims(input_tensor), 1);
125 EXPECT_EQ(TfLiteTensorDim(input_tensor, 0), 2);
126
127 TfLiteQuantizationParams input_params =
128 TfLiteTensorQuantizationParams(input_tensor);
129 EXPECT_EQ(input_params.scale, 0.003922f);
130 EXPECT_EQ(input_params.zero_point, 0);
131
132 const std::array<uint8_t, 2> input = {1, 3};
133 ASSERT_EQ(TfLiteTensorCopyFromBuffer(input_tensor, input.data(),
134 input.size() * sizeof(uint8_t)),
135 kTfLiteOk);
136
137 ASSERT_EQ(TfLiteInterpreterInvoke(interpreter), kTfLiteOk);
138
139 const TfLiteTensor* output_tensor =
140 TfLiteInterpreterGetOutputTensor(interpreter, 0);
141 ASSERT_NE(output_tensor, nullptr);
142
143 TfLiteQuantizationParams output_params =
144 TfLiteTensorQuantizationParams(output_tensor);
145 EXPECT_EQ(output_params.scale, 0.003922f);
146 EXPECT_EQ(output_params.zero_point, 0);
147
148 std::array<uint8_t, 2> output;
149 ASSERT_EQ(TfLiteTensorCopyToBuffer(output_tensor, output.data(),
150 output.size() * sizeof(uint8_t)),
151 kTfLiteOk);
152 EXPECT_EQ(output[0], 3);
153 EXPECT_EQ(output[1], 9);
154
155 const float dequantizedOutput0 =
156 output_params.scale * (output[0] - output_params.zero_point);
157 const float dequantizedOutput1 =
158 output_params.scale * (output[1] - output_params.zero_point);
159 EXPECT_EQ(dequantizedOutput0, 0.011766f);
160 EXPECT_EQ(dequantizedOutput1, 0.035298f);
161
162 TfLiteInterpreterDelete(interpreter);
163 }
164
TEST(CApiSimple,Delegate)165 TEST(CApiSimple, Delegate) {
166 TfLiteModel* model =
167 TfLiteModelCreateFromFile("tensorflow/lite/testdata/add.bin");
168
169 // Create and install a delegate instance.
170 bool delegate_prepared = false;
171 TfLiteDelegate delegate = TfLiteDelegateCreate();
172 delegate.data_ = &delegate_prepared;
173 delegate.Prepare = [](TfLiteContext* context, TfLiteDelegate* delegate) {
174 *static_cast<bool*>(delegate->data_) = true;
175 return kTfLiteOk;
176 };
177 TfLiteInterpreterOptions* options = TfLiteInterpreterOptionsCreate();
178 TfLiteInterpreterOptionsAddDelegate(options, &delegate);
179 TfLiteInterpreter* interpreter = TfLiteInterpreterCreate(model, options);
180
181 // The delegate should have been applied.
182 EXPECT_TRUE(delegate_prepared);
183
184 // Subsequent execution should behave properly (the delegate is a no-op).
185 TfLiteInterpreterOptionsDelete(options);
186 TfLiteModelDelete(model);
187 EXPECT_EQ(TfLiteInterpreterInvoke(interpreter), kTfLiteOk);
188 TfLiteInterpreterDelete(interpreter);
189 }
190
TEST(CApiSimple,DelegateFails)191 TEST(CApiSimple, DelegateFails) {
192 TfLiteModel* model =
193 TfLiteModelCreateFromFile("tensorflow/lite/testdata/add.bin");
194
195 // Create and install a delegate instance.
196 TfLiteDelegate delegate = TfLiteDelegateCreate();
197 delegate.Prepare = [](TfLiteContext* context, TfLiteDelegate* delegate) {
198 return kTfLiteError;
199 };
200 TfLiteInterpreterOptions* options = TfLiteInterpreterOptionsCreate();
201 TfLiteInterpreterOptionsAddDelegate(options, &delegate);
202 TfLiteInterpreter* interpreter = TfLiteInterpreterCreate(model, options);
203
204 // Interpreter creation should fail as delegate preparation failed.
205 EXPECT_EQ(nullptr, interpreter);
206
207 TfLiteInterpreterOptionsDelete(options);
208 TfLiteModelDelete(model);
209 }
210
TEST(CApiSimple,ErrorReporter)211 TEST(CApiSimple, ErrorReporter) {
212 TfLiteModel* model =
213 TfLiteModelCreateFromFile("tensorflow/lite/testdata/add.bin");
214 TfLiteInterpreterOptions* options = TfLiteInterpreterOptionsCreate();
215
216 // Install a custom error reporter into the interpreter by way of options.
217 tflite::TestErrorReporter reporter;
218 TfLiteInterpreterOptionsSetErrorReporter(
219 options,
220 [](void* user_data, const char* format, va_list args) {
221 reinterpret_cast<tflite::TestErrorReporter*>(user_data)->Report(format,
222 args);
223 },
224 &reporter);
225 TfLiteInterpreter* interpreter = TfLiteInterpreterCreate(model, options);
226
227 // The options/model can be deleted immediately after interpreter creation.
228 TfLiteInterpreterOptionsDelete(options);
229 TfLiteModelDelete(model);
230
231 // Invoke the interpreter before tensor allocation.
232 EXPECT_EQ(TfLiteInterpreterInvoke(interpreter), kTfLiteError);
233
234 // The error should propagate to the custom error reporter.
235 EXPECT_EQ(reporter.error_messages(),
236 "Invoke called on model that is not ready.");
237 EXPECT_EQ(reporter.num_calls(), 1);
238
239 TfLiteInterpreterDelete(interpreter);
240 }
241
TEST(CApiSimple,ValidModel)242 TEST(CApiSimple, ValidModel) {
243 std::ifstream model_file("tensorflow/lite/testdata/add.bin");
244
245 model_file.seekg(0, std::ios_base::end);
246 std::vector<char> model_buffer(model_file.tellg());
247
248 model_file.seekg(0, std::ios_base::beg);
249 model_file.read(model_buffer.data(), model_buffer.size());
250
251 TfLiteModel* model =
252 TfLiteModelCreate(model_buffer.data(), model_buffer.size());
253 ASSERT_NE(model, nullptr);
254 TfLiteModelDelete(model);
255 }
256
TEST(CApiSimple,ValidModelFromFile)257 TEST(CApiSimple, ValidModelFromFile) {
258 TfLiteModel* model =
259 TfLiteModelCreateFromFile("tensorflow/lite/testdata/add.bin");
260 ASSERT_NE(model, nullptr);
261 TfLiteModelDelete(model);
262 }
263
TEST(CApiSimple,InvalidModel)264 TEST(CApiSimple, InvalidModel) {
265 std::vector<char> invalid_model(20, 'c');
266 TfLiteModel* model =
267 TfLiteModelCreate(invalid_model.data(), invalid_model.size());
268 ASSERT_EQ(model, nullptr);
269 }
270
TEST(CApiSimple,InvalidModelFromFile)271 TEST(CApiSimple, InvalidModelFromFile) {
272 TfLiteModel* model = TfLiteModelCreateFromFile("invalid/path/foo.tflite");
273 ASSERT_EQ(model, nullptr);
274 }
275
276 } // namespace
277