1 /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15
16 #include "tensorflow/lite/kernels/subgraph_test_util.h"
17
18 #include <stddef.h>
19 #include <stdint.h>
20 #include <stdlib.h>
21
22 #include <random>
23 #include <vector>
24
25 #include <gtest/gtest.h>
26 #include "tensorflow/lite/builtin_ops.h"
27 #include "tensorflow/lite/c/builtin_op_data.h"
28 #include "tensorflow/lite/c/common.h"
29 #include "tensorflow/lite/core/subgraph.h"
30 #include "tensorflow/lite/kernels/builtin_op_kernels.h"
31 #include "tensorflow/lite/kernels/kernel_util.h"
32 #include "tensorflow/lite/string_util.h"
33
34 namespace tflite {
35
36 // Forward declaration for op kernels.
37 namespace ops {
38 namespace custom {
39
40 TfLiteRegistration* Register_ASSIGN_VARIABLE();
41 TfLiteRegistration* Register_READ_VARIABLE();
42
43 namespace random_int {
44
Prepare(TfLiteContext * context,TfLiteNode * node)45 TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
46 TF_LITE_ENSURE_EQ(context, NumInputs(node), 0);
47 TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
48
49 TfLiteTensor* output = GetOutput(context, node, 0);
50 TfLiteIntArray* outputSize = TfLiteIntArrayCreate(1);
51 outputSize->data[0] = 1;
52 // TODO(jaesung): Make output size be changeable depending on user's input to
53 // make it generic.
54 return context->ResizeTensor(context, output, outputSize);
55 }
56
Eval(TfLiteContext * context,TfLiteNode * node)57 TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
58 TfLiteTensor& output = context->tensors[node->outputs->data[0]];
59
60 std::random_device rd;
61 std::uniform_int_distribution<int> dist(1, 32768);
62 output.data.i32[0] = dist(rd);
63 return kTfLiteOk;
64 }
65
66 } // namespace random_int
67
Register_RANDOM_INT()68 TfLiteRegistration* Register_RANDOM_INT() {
69 static TfLiteRegistration r = {nullptr, nullptr, random_int::Prepare,
70 random_int::Eval};
71 return &r;
72 }
73
74 } // namespace custom
75 } // namespace ops
76
77 namespace subgraph_test_util {
78
79 namespace {
80
SetupTensor(Subgraph * subgraph,int tensor_index,TfLiteType type)81 void SetupTensor(Subgraph* subgraph, int tensor_index, TfLiteType type) {
82 ASSERT_EQ(subgraph->SetTensorParametersReadWrite(tensor_index, type, "", 0,
83 nullptr, {}, false),
84 kTfLiteOk);
85 }
86
87 } // namespace
88
~SubgraphBuilder()89 SubgraphBuilder::~SubgraphBuilder() {
90 for (auto buffer : buffers_) {
91 free(buffer);
92 }
93 }
94
BuildAddSubgraph(Subgraph * subgraph)95 void SubgraphBuilder::BuildAddSubgraph(Subgraph* subgraph) {
96 const int kInput1 = 0;
97 const int kInput2 = 1;
98 const int kOutput = 2;
99 const int kTensorCount = 3;
100 // kInput1(0) --> +---+
101 // |ADD| --> kOutput(2)
102 // kInput2(1) --> +---+
103
104 int first_new_tensor_index;
105 ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
106 kTfLiteOk);
107 ASSERT_EQ(first_new_tensor_index, 0);
108 ASSERT_EQ(subgraph->SetInputs({kInput1, kInput2}), kTfLiteOk);
109 ASSERT_EQ(subgraph->SetOutputs({kOutput}), kTfLiteOk);
110
111 SetupTensor(subgraph, kInput1, kTfLiteInt32);
112 SetupTensor(subgraph, kInput2, kTfLiteInt32);
113 SetupTensor(subgraph, kOutput, kTfLiteInt32);
114
115 TfLiteAddParams* params =
116 reinterpret_cast<TfLiteAddParams*>(malloc(sizeof(TfLiteAddParams)));
117 params->activation = kTfLiteActNone;
118 auto* add_reg = ops::builtin::Register_ADD();
119 add_reg->builtin_code = kTfLiteBuiltinAdd;
120 int node_index;
121 subgraph->AddNodeWithParameters({kInput1, kInput2}, {kOutput}, {}, nullptr, 0,
122 params, add_reg, &node_index);
123 }
124
125 // Build a subgraph with an mul op. Helper function for testing.
BuildMulSubgraph(Subgraph * subgraph)126 void SubgraphBuilder::BuildMulSubgraph(Subgraph* subgraph) {
127 const int kInput1 = 0;
128 const int kInput2 = 1;
129 const int kOutput = 2;
130 const int kTensorCount = 3;
131 // kInput1(0) --> +---+
132 // |MUL| --> kOutput(2)
133 // kInput2(1) --> +---+
134
135 int first_new_tensor_index;
136 ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
137 kTfLiteOk);
138 ASSERT_EQ(first_new_tensor_index, 0);
139 ASSERT_EQ(subgraph->SetInputs({kInput1, kInput2}), kTfLiteOk);
140 ASSERT_EQ(subgraph->SetOutputs({kOutput}), kTfLiteOk);
141
142 SetupTensor(subgraph, kInput1, kTfLiteInt32);
143 SetupTensor(subgraph, kInput2, kTfLiteInt32);
144 SetupTensor(subgraph, kOutput, kTfLiteInt32);
145
146 TfLiteMulParams* params =
147 reinterpret_cast<TfLiteMulParams*>(malloc(sizeof(TfLiteMulParams)));
148 params->activation = kTfLiteActNone;
149 auto* mul_reg = ops::builtin::Register_MUL();
150 mul_reg->builtin_code = kTfLiteBuiltinMul;
151 int node_index;
152 subgraph->AddNodeWithParameters({kInput1, kInput2}, {kOutput}, {}, nullptr, 0,
153 params, mul_reg, &node_index);
154 }
155
156 // Build a subgraph with a pad op. Helper function for testing.
BuildPadSubgraph(Subgraph * subgraph)157 void SubgraphBuilder::BuildPadSubgraph(Subgraph* subgraph) {
158 const int kInput1 = 0;
159 const int kInput2 = 1;
160 const int kOutput = 2;
161 const int kTensorCount = 3;
162 // kInput1(0) --> +---+
163 // |PAD| --> kOutput(2)
164 // kInput2(1) --> +---+
165
166 int first_new_tensor_index;
167 ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
168 kTfLiteOk);
169 ASSERT_EQ(first_new_tensor_index, 0);
170 ASSERT_EQ(subgraph->SetInputs({kInput1, kInput2}), kTfLiteOk);
171 ASSERT_EQ(subgraph->SetOutputs({kOutput}), kTfLiteOk);
172
173 SetupTensor(subgraph, kInput1, kTfLiteInt32);
174 SetupTensor(subgraph, kInput2, kTfLiteInt32);
175 SetupTensor(subgraph, kOutput, kTfLiteInt32);
176
177 TfLitePadParams* params =
178 reinterpret_cast<TfLitePadParams*>(malloc(sizeof(TfLitePadParams)));
179 auto* pad_reg = ops::builtin::Register_PAD();
180 pad_reg->builtin_code = kTfLiteBuiltinPad;
181 int node_index;
182 subgraph->AddNodeWithParameters({kInput1, kInput2}, {kOutput}, {}, nullptr, 0,
183 params, pad_reg, &node_index);
184 }
185
BuildIfSubgraph(Subgraph * subgraph)186 void SubgraphBuilder::BuildIfSubgraph(Subgraph* subgraph) {
187 const int kCondInput = 0;
188 const int kInput1 = 1;
189 const int kInput2 = 2;
190 const int kOutput = 3;
191 const int kTensorCount = 4;
192
193 // kCondInput(0) --> +----+
194 // kInput1(1) ----> | IF | --> kOutput(3)
195 // kInput2(2) ----> +----+
196
197 int first_new_tensor_index;
198 ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
199 kTfLiteOk);
200 ASSERT_EQ(first_new_tensor_index, 0);
201 ASSERT_EQ(subgraph->SetInputs({kCondInput, kInput1, kInput2}), kTfLiteOk);
202 ASSERT_EQ(subgraph->SetOutputs({kOutput}), kTfLiteOk);
203
204 SetupTensor(subgraph, kCondInput, kTfLiteBool);
205 SetupTensor(subgraph, kInput1, kTfLiteInt32);
206 SetupTensor(subgraph, kInput2, kTfLiteInt32);
207 SetupTensor(subgraph, kOutput, kTfLiteInt32);
208
209 TfLiteIfParams* params =
210 reinterpret_cast<TfLiteIfParams*>(malloc(sizeof(TfLiteIfParams)));
211 params->then_subgraph_index = 1;
212 params->else_subgraph_index = 2;
213 auto* if_reg = ops::builtin::Register_IF();
214 if_reg->builtin_code = kTfLiteBuiltinIf;
215
216 int node_index;
217 subgraph->AddNodeWithParameters({kCondInput, kInput1, kInput2}, {kOutput}, {},
218 nullptr, 0, params, if_reg, &node_index);
219 }
220
BuildLessEqualCondSubgraph(Subgraph * subgraph,int rhs)221 void SubgraphBuilder::BuildLessEqualCondSubgraph(Subgraph* subgraph, int rhs) {
222 const int kInput1 = 0;
223 const int kInput2 = 1;
224 const int kOutput = 2;
225 const int kConstRhs = 3;
226 const int kTensorCount = 4;
227
228 // kInput1(0) ----> +------------+
229 // | LESS_EQUAL | --> kOutput(2)
230 // kConstRhs(3) --> +------------+
231 //
232 // kInput2(1) --> (unused)
233
234 int first_new_tensor_index;
235 ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
236 kTfLiteOk);
237 ASSERT_EQ(first_new_tensor_index, 0);
238 ASSERT_EQ(subgraph->SetInputs({kInput1, kInput2}), kTfLiteOk);
239 ASSERT_EQ(subgraph->SetOutputs({kOutput}), kTfLiteOk);
240
241 SetupTensor(subgraph, kInput1, kTfLiteInt32);
242 SetupTensor(subgraph, kInput2, kTfLiteInt32);
243 SetupTensor(subgraph, kOutput, kTfLiteBool);
244
245 auto* le_reg = ops::builtin::Register_LESS_EQUAL();
246 le_reg->builtin_code = kTfLiteBuiltinLessEqual;
247
248 CreateConstantInt32Tensor(subgraph, kConstRhs, {1}, {rhs});
249 int node_index;
250 subgraph->AddNodeWithParameters({kInput1, kConstRhs}, {kOutput}, {}, nullptr,
251 0, nullptr, le_reg, &node_index);
252 }
253
BuildAccumulateLoopBodySubgraph(Subgraph * subgraph)254 void SubgraphBuilder::BuildAccumulateLoopBodySubgraph(Subgraph* subgraph) {
255 const int kInputCounter = 0;
256 const int kInputValue = 1;
257 const int kOutputCounter = 2;
258 const int kOutputValue = 3;
259 const int kConstStep = 4;
260 const int kTensorCount = 5;
261
262 // kInputCounter(0) --> +-----+
263 // | ADD | --> kOutputCounter(2)
264 // kConstStep(4) -----> +-----+ |
265 // |
266 // v
267 // +-----+
268 // | ADD | --> kOutputValue(3)
269 // kInputValue(1) ----------------------+-----+
270
271 int first_new_tensor_index;
272 ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
273 kTfLiteOk);
274 ASSERT_EQ(first_new_tensor_index, 0);
275 ASSERT_EQ(subgraph->SetInputs({kInputCounter, kInputValue}), kTfLiteOk);
276 ASSERT_EQ(subgraph->SetOutputs({kOutputCounter, kOutputValue}), kTfLiteOk);
277
278 SetupTensor(subgraph, kInputCounter, kTfLiteInt32);
279 SetupTensor(subgraph, kInputValue, kTfLiteInt32);
280 SetupTensor(subgraph, kOutputCounter, kTfLiteInt32);
281 SetupTensor(subgraph, kOutputValue, kTfLiteInt32);
282 CreateConstantInt32Tensor(subgraph, kConstStep, {1}, {1});
283
284 int node_index;
285 TfLiteAddParams* params =
286 reinterpret_cast<TfLiteAddParams*>(malloc(sizeof(TfLiteAddParams)));
287 params->activation = kTfLiteActNone;
288 params->pot_scale_int16 = false;
289 auto* add_reg = ops::builtin::Register_ADD();
290 add_reg->builtin_code = kTfLiteBuiltinAdd;
291 subgraph->AddNodeWithParameters({0, 4}, {2}, {}, nullptr, 0, params, add_reg,
292 &node_index);
293 params = reinterpret_cast<TfLiteAddParams*>(malloc(sizeof(TfLiteAddParams)));
294 params->activation = kTfLiteActNone;
295 params->pot_scale_int16 = false;
296 subgraph->AddNodeWithParameters({2, 1}, {3}, {}, nullptr, 0, params, add_reg,
297 &node_index);
298 }
299
BuildPadLoopBodySubgraph(Subgraph * subgraph,const std::vector<int> padding)300 void SubgraphBuilder::BuildPadLoopBodySubgraph(Subgraph* subgraph,
301 const std::vector<int> padding) {
302 const int kInputCounter = 0;
303 const int kInputValue = 1;
304 const int kOutputCounter = 2;
305 const int kOutputValue = 3;
306 const int kConstStep = 4;
307 const int kConstPadding = 5;
308 const int kTensorCount = 6;
309
310 // kInputCounter(0) --> +-----+
311 // | ADD | --> kOutputCounter(2)
312 // kConstStep(4) -----> +-----+
313 //
314 // kInputValue(1) ----> +-----+
315 // | PAD | --> kOutputValue(3)
316 // kConstPadding(5) --> +-----+
317
318 int first_new_tensor_index;
319 ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
320 kTfLiteOk);
321 ASSERT_EQ(first_new_tensor_index, 0);
322 ASSERT_EQ(subgraph->SetInputs({kInputCounter, kInputValue}), kTfLiteOk);
323 ASSERT_EQ(subgraph->SetOutputs({kOutputCounter, kOutputValue}), kTfLiteOk);
324
325 SetupTensor(subgraph, kInputCounter, kTfLiteInt32);
326 SetupTensor(subgraph, kInputValue, kTfLiteInt32);
327 SetupTensor(subgraph, kOutputCounter, kTfLiteInt32);
328 SetupTensor(subgraph, kOutputValue, kTfLiteInt32);
329
330 CreateConstantInt32Tensor(subgraph, kConstStep, {1}, {1});
331 ASSERT_EQ(padding.size() % 2, 0);
332 int padding_dims = padding.size();
333 CreateConstantInt32Tensor(subgraph, kConstPadding, {1, padding_dims},
334 padding);
335
336 int node_index;
337 TfLiteAddParams* add_params =
338 reinterpret_cast<TfLiteAddParams*>(malloc(sizeof(TfLiteAddParams)));
339 add_params->activation = kTfLiteActNone;
340 auto* add_reg = ops::builtin::Register_ADD();
341 add_reg->builtin_code = kTfLiteBuiltinAdd;
342 subgraph->AddNodeWithParameters({kInputCounter, kConstStep}, {kOutputCounter},
343 {}, nullptr, 0, add_params, add_reg,
344 &node_index);
345 TfLitePadParams* pad_params =
346 reinterpret_cast<TfLitePadParams*>(malloc(sizeof(TfLiteAddParams)));
347 auto* pad_reg = ops::builtin::Register_PAD();
348 pad_reg->builtin_code = kTfLiteBuiltinPad;
349 subgraph->AddNodeWithParameters({kInputValue, kConstPadding}, {kOutputValue},
350 {}, nullptr, 0, pad_params, pad_reg,
351 &node_index);
352 }
353
BuildWhileSubgraph(Subgraph * subgraph)354 void SubgraphBuilder::BuildWhileSubgraph(Subgraph* subgraph) {
355 const int kInput1 = 0;
356 const int kInput2 = 1;
357 const int kOutput1 = 2;
358 const int kOutput2 = 3;
359 const int kTensorCount = 4;
360
361 // kInput1(0) --> +-------+ --> kOutput1(2)
362 // | WHILE |
363 // kInput2(1) --> +-------+ --> kOutput2(3)
364
365 int first_new_tensor_index;
366 ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
367 kTfLiteOk);
368 ASSERT_EQ(first_new_tensor_index, 0);
369 ASSERT_EQ(subgraph->SetInputs({kInput1, kInput2}), kTfLiteOk);
370 ASSERT_EQ(subgraph->SetOutputs({kOutput1, kOutput2}), kTfLiteOk);
371
372 SetupTensor(subgraph, kInput1, kTfLiteInt32);
373 SetupTensor(subgraph, kInput2, kTfLiteInt32);
374 SetupTensor(subgraph, kOutput1, kTfLiteInt32);
375 SetupTensor(subgraph, kOutput2, kTfLiteInt32);
376
377 TfLiteWhileParams* params =
378 reinterpret_cast<TfLiteWhileParams*>(malloc(sizeof(TfLiteWhileParams)));
379 params->cond_subgraph_index = 1;
380 params->body_subgraph_index = 2;
381 auto* while_reg = ops::builtin::Register_WHILE();
382 while_reg->builtin_code = kTfLiteBuiltinWhile;
383
384 int node_index;
385 subgraph->AddNodeWithParameters({0, 1}, {2, 3}, {}, nullptr, 0, params,
386 while_reg, &node_index);
387 }
388
BuildAssignRandomValueToVariableSubgraph(Subgraph * subgraph)389 void SubgraphBuilder::BuildAssignRandomValueToVariableSubgraph(
390 Subgraph* subgraph) {
391 const int kConstResourceId = 0;
392 const int kRandomValue = 1;
393 const int kTensorCount = 3;
394
395 // Construct a graph like ths:
396 // %1 = random_int()
397 // variable_assign(%0, %1)
398
399 int first_new_tensor_index;
400 ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
401 kTfLiteOk);
402 ASSERT_EQ(subgraph->SetInputs({}), kTfLiteOk);
403 ASSERT_EQ(subgraph->SetOutputs({}), kTfLiteOk);
404
405 SetupTensor(subgraph, kRandomValue, kTfLiteInt32);
406 CreateConstantInt32Tensor(subgraph, kConstResourceId, {1}, {1024});
407
408 int node_index;
409 subgraph->AddNodeWithParameters({}, {kRandomValue}, {}, nullptr, 0, nullptr,
410 ::tflite::ops::custom::Register_RANDOM_INT(),
411 &node_index);
412 subgraph->AddNodeWithParameters(
413 {kConstResourceId, kRandomValue}, {}, {}, nullptr, 0, nullptr,
414 ::tflite::ops::custom::Register_ASSIGN_VARIABLE(), &node_index);
415 }
416
BuildCallOnceAndReadVariableSubgraph(Subgraph * subgraph)417 void SubgraphBuilder::BuildCallOnceAndReadVariableSubgraph(Subgraph* subgraph) {
418 const int kConstResourceId = 0;
419 const int kOutput = 1;
420 const int kTensorCount = 2;
421
422 // Construct a graph like ths:
423 // Output: %1
424 // %1 = read_variable(%0)
425
426 int first_new_tensor_index;
427 ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
428 kTfLiteOk);
429 ASSERT_EQ(subgraph->SetInputs({}), kTfLiteOk);
430 ASSERT_EQ(subgraph->SetOutputs({kOutput}), kTfLiteOk);
431
432 SetupTensor(subgraph, kOutput, kTfLiteInt32);
433 CreateConstantInt32Tensor(subgraph, kConstResourceId, {1}, {1024});
434
435 TfLiteCallOnceParams* params = reinterpret_cast<TfLiteCallOnceParams*>(
436 malloc(sizeof(TfLiteCallOnceParams)));
437 params->init_subgraph_index = 1;
438
439 int node_index;
440 subgraph->AddNodeWithParameters({}, {}, {}, nullptr, 0, params,
441 ::tflite::ops::builtin::Register_CALL_ONCE(),
442 &node_index);
443 subgraph->AddNodeWithParameters(
444 {kConstResourceId}, {kOutput}, {}, nullptr, 0, nullptr,
445 ::tflite::ops::custom::Register_READ_VARIABLE(), &node_index);
446 }
447
BuildLessEqualCondSubgraphWithDynamicTensor(Subgraph * subgraph,int rhs)448 void SubgraphBuilder::BuildLessEqualCondSubgraphWithDynamicTensor(
449 Subgraph* subgraph, int rhs) {
450 const int kStringInput1 = 0;
451 const int kStringInput2 = 1;
452 const int kIntegerInput = 2;
453 const int kOutput = 3;
454 const int kConstRhs = 4;
455 const int kTensorCount = 5;
456
457 // kIntegerInput(2) --> +------------+
458 // | LESS_EQUAL | --> kOutput(3)
459 // kConstRhs(4) --> +------------+
460 //
461 // kStringInput1(0) --> (unused)
462 // kStringInput2(1) --> (unused)
463
464 int first_new_tensor_index;
465 ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
466 kTfLiteOk);
467 ASSERT_EQ(first_new_tensor_index, 0);
468 ASSERT_EQ(subgraph->SetInputs({kStringInput1, kStringInput2, kIntegerInput}),
469 kTfLiteOk);
470 ASSERT_EQ(subgraph->SetOutputs({kOutput}), kTfLiteOk);
471
472 SetupTensor(subgraph, kStringInput1, kTfLiteString);
473 SetupTensor(subgraph, kStringInput2, kTfLiteString);
474 SetupTensor(subgraph, kIntegerInput, kTfLiteInt32);
475 SetupTensor(subgraph, kOutput, kTfLiteBool);
476
477 auto* le_reg = ops::builtin::Register_LESS_EQUAL();
478 le_reg->builtin_code = kTfLiteBuiltinLessEqual;
479
480 CreateConstantInt32Tensor(subgraph, kConstRhs, {1}, {rhs});
481 int node_index;
482 subgraph->AddNodeWithParameters({kIntegerInput, kConstRhs}, {kOutput}, {},
483 nullptr, 0, nullptr, le_reg, &node_index);
484 }
485
BuildBodySubgraphWithDynamicTensor(Subgraph * subgraph)486 void SubgraphBuilder::BuildBodySubgraphWithDynamicTensor(Subgraph* subgraph) {
487 const int kStringInput1 = 0;
488 const int kStringInput2 = 1;
489 const int kIntegerInput = 2;
490 const int kStringOutput1 = 0; // Forwarded of the `kStringInput1` tensor.
491 const int kStringOutput2 = 4;
492 const int kIntegerOutput = 5;
493 const int kConst = 6;
494 const int kTensorCount = 7;
495
496 // Construct a graph like this:
497 // %5 = tf.Add(%2, 1)
498 // %4 = tf.Fill(%0, %5)
499 // yield(%0, %4, %5)
500
501 int first_new_tensor_index;
502 ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
503 kTfLiteOk);
504 ASSERT_EQ(first_new_tensor_index, 0);
505 ASSERT_EQ(subgraph->SetInputs({kStringInput1, kStringInput2, kIntegerInput}),
506 kTfLiteOk);
507 ASSERT_EQ(
508 subgraph->SetOutputs({kStringOutput1, kStringOutput2, kIntegerOutput}),
509 kTfLiteOk);
510
511 SetupTensor(subgraph, kStringInput1, kTfLiteString);
512 SetupTensor(subgraph, kStringInput2, kTfLiteString);
513 SetupTensor(subgraph, kIntegerInput, kTfLiteInt32);
514 SetupTensor(subgraph, kStringOutput1, kTfLiteString);
515 SetupTensor(subgraph, kStringOutput2, kTfLiteString);
516 SetupTensor(subgraph, kIntegerOutput, kTfLiteInt32);
517 SetupTensor(subgraph, kConst, kTfLiteInt32);
518
519 TfLiteAddParams* add_params =
520 reinterpret_cast<TfLiteAddParams*>(malloc(sizeof(TfLiteAddParams)));
521 add_params->activation = kTfLiteActNone;
522
523 auto* add_reg = ops::builtin::Register_ADD();
524 add_reg->builtin_code = kTfLiteBuiltinAdd;
525
526 CreateConstantInt32Tensor(subgraph, kConst, {1}, {1});
527 int node_index;
528 subgraph->AddNodeWithParameters({kIntegerInput, kConst}, {kIntegerOutput}, {},
529 nullptr, 0, add_params, add_reg, &node_index);
530
531 auto* fill_reg = ops::builtin::Register_FILL();
532 fill_reg->builtin_code = kTfLiteBuiltinFill;
533 subgraph->AddNodeWithParameters({kIntegerOutput, kStringInput1},
534 {kStringOutput2}, {}, nullptr, 0, nullptr,
535 fill_reg, &node_index);
536 }
537
BuildWhileSubgraphWithDynamicTensor(Subgraph * subgraph)538 void SubgraphBuilder::BuildWhileSubgraphWithDynamicTensor(Subgraph* subgraph) {
539 const int kStringInput1 = 0;
540 const int kStringInput2 = 1;
541 const int kIntegerInput = 2;
542 const int kStringOutput1 = 3;
543 const int kStringOutput2 = 4;
544 const int kIntegerOutput = 5;
545 const int kTensorCount = 6;
546
547 // Create a while op with 2 string tensor and 1 integer tensor.
548 int first_new_tensor_index;
549 ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
550 kTfLiteOk);
551 ASSERT_EQ(first_new_tensor_index, 0);
552 ASSERT_EQ(subgraph->SetInputs({kStringInput1, kStringInput2, kIntegerInput}),
553 kTfLiteOk);
554 ASSERT_EQ(
555 subgraph->SetOutputs({kStringOutput1, kStringOutput2, kIntegerOutput}),
556 kTfLiteOk);
557
558 SetupTensor(subgraph, kStringInput1, kTfLiteString);
559 SetupTensor(subgraph, kStringInput2, kTfLiteString);
560 SetupTensor(subgraph, kIntegerInput, kTfLiteInt32);
561 SetupTensor(subgraph, kStringOutput1, kTfLiteString);
562 SetupTensor(subgraph, kStringOutput2, kTfLiteString);
563 SetupTensor(subgraph, kIntegerOutput, kTfLiteInt32);
564
565 TfLiteWhileParams* params =
566 reinterpret_cast<TfLiteWhileParams*>(malloc(sizeof(TfLiteWhileParams)));
567 params->cond_subgraph_index = 1;
568 params->body_subgraph_index = 2;
569 auto* while_reg = ops::builtin::Register_WHILE();
570 while_reg->builtin_code = kTfLiteBuiltinWhile;
571
572 int node_index;
573 subgraph->AddNodeWithParameters(
574 {kStringInput1, kStringInput2, kIntegerInput},
575 {kStringOutput1, kStringOutput2, kIntegerOutput}, {}, nullptr, 0, params,
576 while_reg, &node_index);
577 }
578
CreateConstantInt32Tensor(Subgraph * subgraph,int tensor_index,const std::vector<int> & shape,const std::vector<int> & data)579 void SubgraphBuilder::CreateConstantInt32Tensor(Subgraph* subgraph,
580 int tensor_index,
581 const std::vector<int>& shape,
582 const std::vector<int>& data) {
583 ASSERT_GT(shape.size(), 0);
584 int num_elements = 1;
585 for (int dim : shape) {
586 num_elements *= dim;
587 }
588 ASSERT_EQ(data.size(), num_elements);
589 size_t size_in_bytes = sizeof(int32_t) * num_elements;
590 // Maybe aligned.
591 int32_t* buffer = reinterpret_cast<int32_t*>(malloc(size_in_bytes));
592 for (int i = 0; i < num_elements; ++i) {
593 buffer[i] = data[i];
594 }
595 buffers_.push_back(buffer);
596 ASSERT_EQ(subgraph->SetTensorParametersReadOnly(
597 tensor_index, kTfLiteInt32, "", shape, {},
598 reinterpret_cast<const char*>(buffer), size_in_bytes),
599 kTfLiteOk);
600 }
601
FillIntTensor(TfLiteTensor * tensor,const std::vector<int32_t> & data)602 void FillIntTensor(TfLiteTensor* tensor, const std::vector<int32_t>& data) {
603 int count = NumElements(tensor);
604 ASSERT_EQ(count, data.size());
605 for (int i = 0; i < count; ++i) {
606 tensor->data.i32[i] = data[i];
607 }
608 }
609
FillScalarStringTensor(TfLiteTensor * tensor,const std::string & data)610 void FillScalarStringTensor(TfLiteTensor* tensor, const std::string& data) {
611 StringRef str_ref;
612 str_ref.str = data.c_str();
613 str_ref.len = data.size();
614 DynamicBuffer buf;
615 buf.AddString(str_ref);
616 buf.WriteToTensor(tensor, /*new_shape=*/TfLiteIntArrayCreate(0));
617 }
618
CheckScalarStringTensor(const TfLiteTensor * tensor,const std::string & data)619 void CheckScalarStringTensor(const TfLiteTensor* tensor,
620 const std::string& data) {
621 ASSERT_EQ(tensor->dims->size, 0);
622 ASSERT_EQ(tensor->type, kTfLiteString);
623 StringRef str_ref = GetString(tensor, 0);
624 EXPECT_EQ(std::string(str_ref.str, str_ref.len), data);
625 }
626
CheckStringTensor(const TfLiteTensor * tensor,const std::vector<int> & shape,const std::vector<std::string> & data)627 void CheckStringTensor(const TfLiteTensor* tensor,
628 const std::vector<int>& shape,
629 const std::vector<std::string>& data) {
630 ASSERT_EQ(tensor->dims->size, shape.size());
631 for (int i = 0; i < tensor->dims->size; ++i) {
632 ASSERT_EQ(tensor->dims->data[i], shape[i]);
633 }
634 ASSERT_EQ(tensor->type, kTfLiteString);
635 int count = GetStringCount(tensor);
636 ASSERT_EQ(count, data.size());
637 for (int i = 0; i < count; ++i) {
638 StringRef str_ref = GetString(tensor, i);
639 EXPECT_EQ(std::string(str_ref.str, str_ref.len), data[i]);
640 }
641 }
CheckIntTensor(const TfLiteTensor * tensor,const std::vector<int> & shape,const std::vector<int32_t> & data)642 void CheckIntTensor(const TfLiteTensor* tensor, const std::vector<int>& shape,
643 const std::vector<int32_t>& data) {
644 ASSERT_EQ(tensor->dims->size, shape.size());
645 for (int i = 0; i < tensor->dims->size; ++i) {
646 ASSERT_EQ(tensor->dims->data[i], shape[i]);
647 }
648 ASSERT_EQ(tensor->type, kTfLiteInt32);
649 int count = NumElements(tensor);
650 ASSERT_EQ(count, data.size());
651 for (int i = 0; i < count; ++i) {
652 EXPECT_EQ(tensor->data.i32[i], data[i]);
653 }
654 }
655
CheckBoolTensor(const TfLiteTensor * tensor,const std::vector<int> & shape,const std::vector<bool> & data)656 void CheckBoolTensor(const TfLiteTensor* tensor, const std::vector<int>& shape,
657 const std::vector<bool>& data) {
658 ASSERT_EQ(tensor->dims->size, shape.size());
659 for (int i = 0; i < tensor->dims->size; ++i) {
660 ASSERT_EQ(tensor->dims->data[i], shape[i]);
661 }
662 ASSERT_EQ(tensor->type, kTfLiteBool);
663 int count = NumElements(tensor);
664 ASSERT_EQ(count, data.size());
665 for (int i = 0; i < count; ++i) {
666 EXPECT_EQ(tensor->data.b[i], data[i]);
667 }
668 }
669
670 } // namespace subgraph_test_util
671 } // namespace tflite
672