• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 #include <string>
16 #include <vector>
17 
18 #include "flatbuffers/flatbuffers.h"
19 #include "flatbuffers/util.h"
20 #include <gmock/gmock.h>
21 #include <gtest/gtest.h>
22 #include "tensorflow/core/framework/numeric_types.h"
23 #include "tensorflow/lite/allocation.h"
24 #include "tensorflow/lite/error_reporter.h"
25 #include "tensorflow/lite/op_resolver.h"
26 #include "tensorflow/lite/schema/schema_generated.h"
27 #include "tensorflow/lite/testing/util.h"
28 #include "tensorflow/lite/tools/verifier.h"
29 #include "tensorflow/lite/version.h"
30 
31 namespace tflite {
32 
33 using flatbuffers::FlatBufferBuilder;
34 using flatbuffers::Offset;
35 
36 class MockErrorReporter : public ErrorReporter {
37  public:
MockErrorReporter()38   MockErrorReporter() : buffer_size_(0) {}
Report(const char * format,va_list args)39   int Report(const char* format, va_list args) override {
40     buffer_size_ = vsnprintf(buffer_, kBufferSize, format, args);
41     return buffer_size_;
42   }
GetBufferSize()43   int GetBufferSize() { return buffer_size_; }
44 
GetAsString() const45   string GetAsString() const { return string(buffer_, buffer_size_); }
46 
47  private:
48   static constexpr int kBufferSize = 256;
49   char buffer_[kBufferSize];
50   int buffer_size_;
51 };
52 
53 // Build single subgraph model.
54 class TfLiteFlatbufferModelBuilder {
55  public:
TfLiteFlatbufferModelBuilder()56   TfLiteFlatbufferModelBuilder() {
57     buffers_.push_back(
58         CreateBuffer(builder_, builder_.CreateVector(std::vector<uint8_t>{})));
59   }
60 
TfLiteFlatbufferModelBuilder(const std::vector<BuiltinOperator> & builtin_ops,const std::vector<std::string> & custom_ops)61   TfLiteFlatbufferModelBuilder(const std::vector<BuiltinOperator>& builtin_ops,
62                                const std::vector<std::string>& custom_ops) {
63     buffers_.push_back(
64         CreateBuffer(builder_, builder_.CreateVector(std::vector<uint8_t>{})));
65 
66     for (const auto& iter : builtin_ops) {
67       resolver_.AddBuiltin(iter, &fake_op_);
68     }
69     for (const auto& iter : custom_ops) {
70       resolver_.AddCustom(iter.data(), &fake_op_);
71     }
72   }
73 
AddTensor(const std::vector<int> & shape,tflite::TensorType type,const std::vector<uint8_t> & buffer,const char * name,const bool is_variable=false)74   void AddTensor(const std::vector<int>& shape, tflite::TensorType type,
75                  const std::vector<uint8_t>& buffer, const char* name,
76                  const bool is_variable = false) {
77     int buffer_index = 0;
78     if (!buffer.empty()) {
79       buffer_index = buffers_.size();
80       buffers_.push_back(CreateBuffer(builder_, builder_.CreateVector(buffer)));
81     }
82     if (shape.empty()) {
83       tensors_.push_back(CreateTensorDirect(builder_, /*shape=*/nullptr, type,
84                                             buffer_index, name,
85                                             /*quantization=*/0, is_variable));
86       return;
87     }
88     tensors_.push_back(CreateTensorDirect(builder_, &shape, type, buffer_index,
89                                           name, /*quantization=*/0,
90                                           is_variable));
91   }
92 
AddOperator(const std::vector<int32_t> & inputs,const std::vector<int32_t> & outputs,tflite::BuiltinOperator builtin_op,const char * custom_op)93   void AddOperator(const std::vector<int32_t>& inputs,
94                    const std::vector<int32_t>& outputs,
95                    tflite::BuiltinOperator builtin_op, const char* custom_op) {
96     operator_codes_.push_back(
97         CreateOperatorCodeDirect(builder_, builtin_op, custom_op));
98     operators_.push_back(CreateOperator(
99         builder_, operator_codes_.size() - 1, builder_.CreateVector(inputs),
100         builder_.CreateVector(outputs), BuiltinOptions_NONE,
101         /*builtin_options=*/0,
102         /*custom_options=*/0, tflite::CustomOptionsFormat_FLEXBUFFERS));
103   }
104 
FinishModel(const std::vector<int32_t> & inputs,const std::vector<int32_t> & outputs)105   void FinishModel(const std::vector<int32_t>& inputs,
106                    const std::vector<int32_t>& outputs) {
107     auto subgraph = std::vector<Offset<SubGraph>>({CreateSubGraph(
108         builder_, builder_.CreateVector(tensors_),
109         builder_.CreateVector(inputs), builder_.CreateVector(outputs),
110         builder_.CreateVector(operators_),
111         builder_.CreateString("test_subgraph"))});
112     auto result = CreateModel(
113         builder_, TFLITE_SCHEMA_VERSION, builder_.CreateVector(operator_codes_),
114         builder_.CreateVector(subgraph), builder_.CreateString("test_model"),
115         builder_.CreateVector(buffers_));
116     tflite::FinishModelBuffer(builder_, result);
117   }
118 
Verify()119   bool Verify() {
120     return tflite::Verify(builder_.GetBufferPointer(), builder_.GetSize(),
121                           resolver_, &mock_reporter_);
122   }
123 
GetErrorString()124   string GetErrorString() { return mock_reporter_.GetAsString(); }
125 
126  private:
127   FlatBufferBuilder builder_;
128   MutableOpResolver resolver_;
129   TfLiteRegistration fake_op_;
130   MockErrorReporter mock_reporter_;
131   std::vector<Offset<Operator>> operators_;
132   std::vector<Offset<OperatorCode>> operator_codes_;
133   std::vector<Offset<Tensor>> tensors_;
134   std::vector<Offset<Buffer>> buffers_;
135 };
136 
TEST(VerifyModel,TestEmptyModel)137 TEST(VerifyModel, TestEmptyModel) {
138   FlatBufferBuilder builder;
139   auto model = CreateModel(builder, /*version=*/TFLITE_SCHEMA_VERSION,
140                            /*operator_codes=*/0, /*subgraphs=*/0,
141                            /*description=*/0, /*buffers=*/0);
142   ::tflite::FinishModelBuffer(builder, model);
143 
144   MockErrorReporter mock_reporter;
145   ASSERT_FALSE(Verify(builder.GetBufferPointer(), builder.GetSize(),
146                       MutableOpResolver{}, &mock_reporter));
147   EXPECT_THAT(mock_reporter.GetAsString(),
148               ::testing::ContainsRegex("Missing 'subgraphs' section."));
149 }
150 
TEST(VerifyModel,TestEmptyVector)151 TEST(VerifyModel, TestEmptyVector) {
152   TfLiteFlatbufferModelBuilder builder({}, {"test"});
153   builder.AddOperator({0, 1}, {3}, BuiltinOperator_CUSTOM, "test");
154   builder.AddTensor({2, 3}, TensorType_UINT8, {1, 2, 3, 4, 5, 6}, "input");
155   builder.AddTensor({}, TensorType_UINT8, {}, "empty_vector");
156   builder.AddTensor(
157       {2}, TensorType_STRING,
158       {2, 0, 0, 0, 16, 0, 0, 0, 17, 0, 0, 0, 19, 0, 0, 0, 'A', 'B', 'C'},
159       "data");
160   builder.AddTensor({2, 3}, TensorType_INT32, {}, "output");
161   builder.FinishModel({0, 1}, {3});
162   ASSERT_TRUE(builder.Verify());
163 }
164 
TEST(VerifyModel,TestSimpleModel)165 TEST(VerifyModel, TestSimpleModel) {
166   TfLiteFlatbufferModelBuilder builder({}, {"test"});
167   builder.AddOperator({0, 1}, {2}, BuiltinOperator_CUSTOM, "test");
168   builder.AddTensor({2, 3}, TensorType_UINT8, {1, 2, 3, 4, 5, 6}, "input");
169   builder.AddTensor(
170       {2}, TensorType_STRING,
171       {2, 0, 0, 0, 16, 0, 0, 0, 17, 0, 0, 0, 19, 0, 0, 0, 'A', 'B', 'C'},
172       "data");
173   builder.AddTensor({2, 3}, TensorType_INT32, {}, "output");
174   builder.FinishModel({0, 1}, {2});
175   ASSERT_TRUE(builder.Verify());
176   EXPECT_EQ("", builder.GetErrorString());
177 }
178 
TEST(VerifyModel,TestCorruptedData)179 TEST(VerifyModel, TestCorruptedData) {
180   std::string model = "123";
181   MockErrorReporter mock_reporter;
182   ASSERT_FALSE(
183       Verify(model.data(), model.size(), MutableOpResolver{}, &mock_reporter));
184   EXPECT_THAT(mock_reporter.GetAsString(),
185               ::testing::ContainsRegex("Invalid flatbuffer format"));
186 }
187 
TEST(VerifyModel,TestUnsupportedVersion)188 TEST(VerifyModel, TestUnsupportedVersion) {
189   FlatBufferBuilder builder;
190   auto model = CreateModel(builder, /*version=*/1, /*operator_codes=*/0,
191                            /*subgraphs=*/0, /*description=*/0, /*buffers=*/0);
192   ::tflite::FinishModelBuffer(builder, model);
193   MockErrorReporter mock_reporter;
194   ASSERT_FALSE(Verify(builder.GetBufferPointer(), builder.GetSize(),
195                       MutableOpResolver{}, &mock_reporter));
196   EXPECT_THAT(mock_reporter.GetAsString(),
197               ::testing::ContainsRegex("Invalid model version 1"));
198 }
199 
TEST(VerifyModel,TestRandomModificationIsNotAllowed)200 TEST(VerifyModel, TestRandomModificationIsNotAllowed) {
201   FlatBufferBuilder builder;
202   auto model = CreateModel(builder, /*version=*/TFLITE_SCHEMA_VERSION,
203                            /*operator_codes=*/0,
204                            /*subgraphs=*/0, /*description=*/0, /*buffers=*/0);
205   ::tflite::FinishModelBuffer(builder, model);
206 
207   std::string model_content(reinterpret_cast<char*>(builder.GetBufferPointer()),
208                             builder.GetSize());
209   for (size_t i = 0; i < model_content.size(); i++) {
210     model_content[i] = (model_content[i] + 137) % 255;
211     EXPECT_FALSE(Verify(model_content.data(), model_content.size(),
212                         MutableOpResolver{}, DefaultErrorReporter()))
213         << "Fail at position: " << i;
214   }
215 }
216 
TEST(VerifyModel,TestIntTensorShapeIsGreaterThanBuffer)217 TEST(VerifyModel, TestIntTensorShapeIsGreaterThanBuffer) {
218   TfLiteFlatbufferModelBuilder builder;
219   builder.AddTensor({2, 3}, TensorType_UINT8, {1, 2, 3, 4}, "input");
220   builder.FinishModel({}, {});
221   ASSERT_FALSE(builder.Verify());
222   EXPECT_THAT(builder.GetErrorString(),
223               ::testing::ContainsRegex("Tensor input requires 6 bytes, but is "
224                                        "allocated with 4 bytes buffer"));
225 }
226 
TEST(VerifyModel,TestIntTensorShapeIsSmallerThanBuffer)227 TEST(VerifyModel, TestIntTensorShapeIsSmallerThanBuffer) {
228   TfLiteFlatbufferModelBuilder builder;
229   builder.AddTensor({2, 1}, TensorType_UINT8, {1, 2, 3, 4}, "input");
230   builder.FinishModel({}, {});
231   ASSERT_FALSE(builder.Verify());
232   EXPECT_THAT(builder.GetErrorString(),
233               ::testing::ContainsRegex("Tensor input requires 2 bytes, but is "
234                                        "allocated with 4 bytes buffer"));
235 }
236 
TEST(VerifyModel,TestIntTensorShapeOverflow)237 TEST(VerifyModel, TestIntTensorShapeOverflow) {
238   TfLiteFlatbufferModelBuilder builder;
239   builder.AddTensor({1024, 2048, 4096}, TensorType_UINT8, {1, 2, 3, 4},
240                     "input");
241   builder.FinishModel({}, {});
242   ASSERT_FALSE(builder.Verify());
243   EXPECT_THAT(builder.GetErrorString(),
244               ::testing::ContainsRegex("Tensor input dimension overflow"));
245 }
246 
TEST(VerifyModel,TensorBufferIsNotValid)247 TEST(VerifyModel, TensorBufferIsNotValid) {
248   FlatBufferBuilder builder;
249   std::vector<int> shape = {2, 3};
250   auto tensors = builder.CreateVector(std::vector<Offset<Tensor>>{
251       CreateTensorDirect(builder, &shape, TensorType_INT32, /*buffer=*/2,
252                          "input", /*quantization=*/0)});
253   auto subgraph = std::vector<Offset<SubGraph>>(
254       {CreateSubGraph(builder, tensors, /*inputs=*/0, /*outputs=*/0,
255                       /*operators=*/0, builder.CreateString("Main"))});
256 
257   auto buffers = builder.CreateVector(std::vector<Offset<Buffer>>{
258       CreateBuffer(builder, builder.CreateVector(
259                                 std::vector<uint8_t>{1, 2, 3, 4, 5, 6})),
260   });
261 
262   auto model = CreateModel(builder, TFLITE_SCHEMA_VERSION, /*operator_codes=*/0,
263                            builder.CreateVector(subgraph),
264                            builder.CreateString("SmartReply"), buffers);
265 
266   ::tflite::FinishModelBuffer(builder, model);
267   MockErrorReporter mock_reporter;
268   ASSERT_FALSE(Verify(builder.GetBufferPointer(), builder.GetSize(),
269                       MutableOpResolver{}, &mock_reporter));
270   EXPECT_THAT(
271       mock_reporter.GetAsString(),
272       ::testing::ContainsRegex("Missing 'operators' section in subgraph."));
273 }
274 
TEST(VerifyModel,StringTensorHasInvalidNumString)275 TEST(VerifyModel, StringTensorHasInvalidNumString) {
276   TfLiteFlatbufferModelBuilder builder;
277   builder.AddTensor(
278       {2}, TensorType_STRING,
279       {0x00, 0x00, 0x00, 0x20, 16, 0, 0, 0, 17, 0, 0, 0, 18, 0, 0, 0, 'A', 'B'},
280       "input");
281   builder.FinishModel({}, {});
282   ASSERT_FALSE(builder.Verify());
283   EXPECT_THAT(
284       builder.GetErrorString(),
285       ::testing::ContainsRegex(
286           "String tensor input buffer requires at least -2147483640 bytes, "
287           "but is allocated with 18 bytes"));
288 }
289 
TEST(VerifyModel,StringTensorOffsetTooSmall)290 TEST(VerifyModel, StringTensorOffsetTooSmall) {
291   TfLiteFlatbufferModelBuilder builder;
292   builder.AddTensor(
293       {2}, TensorType_STRING,
294       {2, 0, 0, 0, 12, 0, 0, 0, 17, 0, 0, 0, 18, 0, 0, 0, 'A', 'B'}, "input");
295   builder.FinishModel({}, {});
296   ASSERT_FALSE(builder.Verify());
297   EXPECT_THAT(builder.GetErrorString(),
298               ::testing::ContainsRegex(
299                   "String tensor input buffer initial offset must be: 16"));
300 }
301 
TEST(VerifyModel,StringTensorOffsetOutOfRange)302 TEST(VerifyModel, StringTensorOffsetOutOfRange) {
303   TfLiteFlatbufferModelBuilder builder;
304   builder.AddTensor(
305       {2}, TensorType_STRING,
306       {2, 0, 0, 0, 16, 0, 0, 0, 17, 0, 0, 0, 22, 0, 0, 0, 'A', 'B'}, "input");
307   builder.FinishModel({}, {});
308   ASSERT_FALSE(builder.Verify());
309   EXPECT_THAT(builder.GetErrorString(),
310               ::testing::ContainsRegex(
311                   "String tensor input buffer is invalid: index 2"));
312 }
313 
TEST(VerifyModel,StringTensorIsLargerThanRequired)314 TEST(VerifyModel, StringTensorIsLargerThanRequired) {
315   TfLiteFlatbufferModelBuilder builder;
316   builder.AddTensor(
317       {2}, TensorType_STRING,
318       {2, 0, 0, 0, 16, 0, 0, 0, 17, 0, 0, 0, 18, 0, 0, 0, 'A', 'B', 'C'},
319       "input");
320   builder.FinishModel({}, {});
321   ASSERT_FALSE(builder.Verify());
322   EXPECT_THAT(builder.GetErrorString(),
323               ::testing::ContainsRegex(
324                   "String tensor input buffer last offset must be 19"));
325 }
326 
TEST(VerifyModel,AllOpsAreSupported)327 TEST(VerifyModel, AllOpsAreSupported) {
328   TfLiteFlatbufferModelBuilder builder({BuiltinOperator_ADD}, {"CustomOp"});
329   builder.AddTensor({2, 2}, TensorType_UINT8, {1, 2, 3, 4}, "input1");
330   builder.AddTensor({2, 2}, TensorType_UINT8, {1, 2, 3, 4}, "input2");
331   builder.AddTensor({2, 2}, TensorType_UINT8, {}, "output1");
332   builder.AddTensor({2, 2}, TensorType_UINT8, {}, "output2");
333   builder.AddOperator({0, 1}, {2}, BuiltinOperator_ADD, nullptr);
334   builder.AddOperator({0, 1}, {3}, BuiltinOperator_CUSTOM, "CustomOp");
335   builder.FinishModel({}, {});
336   ASSERT_TRUE(builder.Verify());
337   EXPECT_EQ("", builder.GetErrorString());
338 }
339 
TEST(VerifyModel,UseUnsupportedBuiltinOps)340 TEST(VerifyModel, UseUnsupportedBuiltinOps) {
341   TfLiteFlatbufferModelBuilder builder({BuiltinOperator_SUB}, {"CustomOp"});
342   builder.AddTensor({2, 2}, TensorType_UINT8, {1, 2, 3, 4}, "input1");
343   builder.AddTensor({2, 2}, TensorType_UINT8, {1, 2, 3, 4}, "input2");
344   builder.AddTensor({2, 2}, TensorType_UINT8, {}, "output");
345   builder.AddOperator({0, 1}, {2}, BuiltinOperator_ADD, nullptr);
346   builder.FinishModel({}, {});
347   ASSERT_FALSE(builder.Verify());
348   EXPECT_THAT(
349       builder.GetErrorString(),
350       ::testing::ContainsRegex("Unsupported builtin op: ADD, version: 1"));
351 }
352 
TEST(VerifyModel,UseUnsupportedCustomOps)353 TEST(VerifyModel, UseUnsupportedCustomOps) {
354   TfLiteFlatbufferModelBuilder builder({BuiltinOperator_ADD}, {"NewOp"});
355   builder.AddTensor({2, 2}, TensorType_UINT8, {1, 2, 3, 4}, "input1");
356   builder.AddTensor({2, 2}, TensorType_UINT8, {1, 2, 3, 4}, "input2");
357   builder.AddTensor({2, 2}, TensorType_UINT8, {}, "output");
358   builder.AddOperator({0, 1}, {2}, BuiltinOperator_CUSTOM, "Not supported");
359   builder.FinishModel({}, {});
360   ASSERT_FALSE(builder.Verify());
361   EXPECT_THAT(builder.GetErrorString(),
362               ::testing::ContainsRegex(
363                   "Unsupported custom op: Not supported, version: 1"));
364 }
365 
TEST(VerifyModel,UnpopulatedInputToOp)366 TEST(VerifyModel, UnpopulatedInputToOp) {
367   TfLiteFlatbufferModelBuilder builder({}, {"test"});
368   builder.AddOperator({1, 2}, {3}, BuiltinOperator_CUSTOM, "test");
369   builder.AddTensor({2, 3}, TensorType_UINT8, {1, 2, 3, 4, 5, 6}, "input");
370   // This tensor will never be populated.
371   builder.AddTensor({2, 3}, TensorType_UINT8, {}, "invalid_input");
372   builder.AddTensor(
373       {2}, TensorType_STRING,
374       {2, 0, 0, 0, 16, 0, 0, 0, 17, 0, 0, 0, 19, 0, 0, 0, 'A', 'B', 'C'},
375       "data");
376   builder.AddTensor({2, 3}, TensorType_INT32, {}, "output");
377   builder.FinishModel({0, 2}, {3});
378   ASSERT_FALSE(builder.Verify());
379   EXPECT_EQ("Input tensor 1 to op 0 (CUSTOM) is not produced",
380             builder.GetErrorString());
381 }
382 
TEST(VerifyModel,MultipleOpsOutputToSameTensor)383 TEST(VerifyModel, MultipleOpsOutputToSameTensor) {
384   TfLiteFlatbufferModelBuilder builder({BuiltinOperator_ADD}, {"CustomOp"});
385   builder.AddTensor({2, 2}, TensorType_UINT8, {1, 2, 3, 4}, "input1");
386   builder.AddTensor({2, 2}, TensorType_UINT8, {1, 2, 3, 4}, "input2");
387   builder.AddTensor({2, 2}, TensorType_UINT8, {}, "output1");
388   builder.AddOperator({0, 1}, {2}, BuiltinOperator_ADD, nullptr);
389   // This can't output to "output1", since the first operator does that.
390   builder.AddOperator({0, 1}, {2}, BuiltinOperator_CUSTOM, "CustomOp");
391   builder.FinishModel({}, {});
392   ASSERT_FALSE(builder.Verify());
393   EXPECT_EQ(
394       "Output tensor 2 to op 1 (CUSTOM) is an output from another op. "
395       "There is a cycle in the graph",
396       builder.GetErrorString());
397 }
398 
TEST(VerifyModel,OutputIsAConstantTensor)399 TEST(VerifyModel, OutputIsAConstantTensor) {
400   TfLiteFlatbufferModelBuilder builder({}, {"test"});
401   builder.AddOperator({0, 1}, {2}, BuiltinOperator_CUSTOM, "test");
402   builder.AddTensor({2, 3}, TensorType_UINT8, {1, 2, 3, 4, 5, 6}, "input");
403   builder.AddTensor(
404       {2}, TensorType_STRING,
405       {2, 0, 0, 0, 16, 0, 0, 0, 17, 0, 0, 0, 19, 0, 0, 0, 'A', 'B', 'C'},
406       "data");
407   // Output shouldn't be populated with constant value.
408   builder.AddTensor({2, 3}, TensorType_INT32, {1, 2, 3, 4, 5, 6}, "output");
409   builder.FinishModel({0, 1}, {2});
410   ASSERT_FALSE(builder.Verify());
411   EXPECT_EQ("Output tensor 2 to op 0 (CUSTOM) is a constant",
412             builder.GetErrorString());
413 }
414 
TEST(VerifyModel,OutputIsSubgraphInput)415 TEST(VerifyModel, OutputIsSubgraphInput) {
416   TfLiteFlatbufferModelBuilder builder({}, {"test"});
417   builder.AddOperator({0, 1}, {2}, BuiltinOperator_CUSTOM, "test");
418   builder.AddTensor({2, 3}, TensorType_UINT8, {1, 2, 3, 4, 5, 6}, "input");
419   builder.AddTensor(
420       {2}, TensorType_STRING,
421       {2, 0, 0, 0, 16, 0, 0, 0, 17, 0, 0, 0, 19, 0, 0, 0, 'A', 'B', 'C'},
422       "data");
423   builder.AddTensor({2, 3}, TensorType_INT32, {}, "output");
424   // Output shouldn't be a subgraph input.
425   builder.FinishModel({0, 1, 2}, {2});
426   ASSERT_FALSE(builder.Verify());
427   EXPECT_EQ("Output tensor 2 to op 0 (CUSTOM) is a subgraph input",
428             builder.GetErrorString());
429 }
430 
TEST(VerifyModel,OutputIsAVariable)431 TEST(VerifyModel, OutputIsAVariable) {
432   TfLiteFlatbufferModelBuilder builder({}, {"test"});
433   builder.AddOperator({0, 1}, {2}, BuiltinOperator_CUSTOM, "test");
434   builder.AddTensor({2, 3}, TensorType_UINT8, {1, 2, 3, 4, 5, 6}, "input");
435   builder.AddTensor(
436       {2}, TensorType_STRING,
437       {2, 0, 0, 0, 16, 0, 0, 0, 17, 0, 0, 0, 19, 0, 0, 0, 'A', 'B', 'C'},
438       "data");
439   // Output shouldn't be a variable.
440   builder.AddTensor({2, 3}, TensorType_INT32, {}, "output", /*variable*/ true);
441   builder.FinishModel({0, 1}, {2});
442   ASSERT_FALSE(builder.Verify());
443   EXPECT_EQ("Output tensor 2 to op 0 (CUSTOM) is a variable",
444             builder.GetErrorString());
445 }
446 
TEST(VerifyModel,OpWithOptionalTensor)447 TEST(VerifyModel, OpWithOptionalTensor) {
448   TfLiteFlatbufferModelBuilder builder({}, {"test"});
449   builder.AddOperator({kOptionalTensor, 0, 1}, {2}, BuiltinOperator_CUSTOM,
450                       "test");
451   builder.AddTensor({2, 3}, TensorType_UINT8, {1, 2, 3, 4, 5, 6}, "input");
452   builder.AddTensor(
453       {2}, TensorType_STRING,
454       {2, 0, 0, 0, 16, 0, 0, 0, 17, 0, 0, 0, 19, 0, 0, 0, 'A', 'B', 'C'},
455       "data");
456   builder.AddTensor({2, 3}, TensorType_INT32, {}, "output");
457   builder.FinishModel({0, 1}, {2});
458   ASSERT_TRUE(builder.Verify());
459   EXPECT_EQ("", builder.GetErrorString());
460 }
461 
462 // TODO(yichengfan): make up malicious files to test with.
463 
464 }  // namespace tflite
465 
main(int argc,char ** argv)466 int main(int argc, char** argv) {
467   ::tflite::LogToStderr();
468   ::testing::InitGoogleTest(&argc, argv);
469   return RUN_ALL_TESTS();
470 }
471