1 // clang-format off
2 // Generated file (from: gather.mod.py). Do not edit
CreateModel(Model * model)3 void CreateModel(Model *model) {
4 OperandType type0(Type::TENSOR_FLOAT32, {2, 2});
5 OperandType type1(Type::INT32, {});
6 OperandType type2(Type::TENSOR_INT32, {2});
7 // Phase 1, operands
8 auto input0 = model->addOperand(&type0);
9 auto param = model->addOperand(&type1);
10 auto param1 = model->addOperand(&type2);
11 auto output0 = model->addOperand(&type0);
12 // Phase 2, operations
13 static int32_t param_init[] = {0};
14 model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
15 static int32_t param1_init[] = {1, 0};
16 model->setOperandValue(param1, param1_init, sizeof(int32_t) * 2);
17 model->addOperation(ANEURALNETWORKS_GATHER, {input0, param, param1}, {output0});
18 // Phase 3, inputs and outputs
19 model->identifyInputsAndOutputs(
20 {input0},
21 {output0});
22 assert(model->isValid());
23 }
24
is_ignored(int i)25 inline bool is_ignored(int i) {
26 static std::set<int> ignore = {};
27 return ignore.find(i) != ignore.end();
28 }
29
CreateModel_relaxed(Model * model)30 void CreateModel_relaxed(Model *model) {
31 OperandType type0(Type::TENSOR_FLOAT32, {2, 2});
32 OperandType type1(Type::INT32, {});
33 OperandType type2(Type::TENSOR_INT32, {2});
34 // Phase 1, operands
35 auto input0 = model->addOperand(&type0);
36 auto param = model->addOperand(&type1);
37 auto param1 = model->addOperand(&type2);
38 auto output0 = model->addOperand(&type0);
39 // Phase 2, operations
40 static int32_t param_init[] = {0};
41 model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
42 static int32_t param1_init[] = {1, 0};
43 model->setOperandValue(param1, param1_init, sizeof(int32_t) * 2);
44 model->addOperation(ANEURALNETWORKS_GATHER, {input0, param, param1}, {output0});
45 // Phase 3, inputs and outputs
46 model->identifyInputsAndOutputs(
47 {input0},
48 {output0});
49 // Phase 4: set relaxed execution
50 model->relaxComputationFloat32toFloat16(true);
51 assert(model->isValid());
52 }
53
is_ignored_relaxed(int i)54 inline bool is_ignored_relaxed(int i) {
55 static std::set<int> ignore = {};
56 return ignore.find(i) != ignore.end();
57 }
58
CreateModel_quant8(Model * model)59 void CreateModel_quant8(Model *model) {
60 OperandType type1(Type::INT32, {});
61 OperandType type13(Type::TENSOR_QUANT8_ASYMM, {2, 2}, 0.5f, 127);
62 OperandType type2(Type::TENSOR_INT32, {2});
63 // Phase 1, operands
64 auto input0 = model->addOperand(&type13);
65 auto param = model->addOperand(&type1);
66 auto param1 = model->addOperand(&type2);
67 auto output0 = model->addOperand(&type13);
68 // Phase 2, operations
69 static int32_t param_init[] = {0};
70 model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
71 static int32_t param1_init[] = {1, 0};
72 model->setOperandValue(param1, param1_init, sizeof(int32_t) * 2);
73 model->addOperation(ANEURALNETWORKS_GATHER, {input0, param, param1}, {output0});
74 // Phase 3, inputs and outputs
75 model->identifyInputsAndOutputs(
76 {input0},
77 {output0});
78 assert(model->isValid());
79 }
80
is_ignored_quant8(int i)81 inline bool is_ignored_quant8(int i) {
82 static std::set<int> ignore = {};
83 return ignore.find(i) != ignore.end();
84 }
85
CreateModel_int32(Model * model)86 void CreateModel_int32(Model *model) {
87 OperandType type1(Type::INT32, {});
88 OperandType type14(Type::TENSOR_INT32, {2, 2});
89 OperandType type2(Type::TENSOR_INT32, {2});
90 // Phase 1, operands
91 auto input0 = model->addOperand(&type14);
92 auto param = model->addOperand(&type1);
93 auto param1 = model->addOperand(&type2);
94 auto output0 = model->addOperand(&type14);
95 // Phase 2, operations
96 static int32_t param_init[] = {0};
97 model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
98 static int32_t param1_init[] = {1, 0};
99 model->setOperandValue(param1, param1_init, sizeof(int32_t) * 2);
100 model->addOperation(ANEURALNETWORKS_GATHER, {input0, param, param1}, {output0});
101 // Phase 3, inputs and outputs
102 model->identifyInputsAndOutputs(
103 {input0},
104 {output0});
105 assert(model->isValid());
106 }
107
is_ignored_int32(int i)108 inline bool is_ignored_int32(int i) {
109 static std::set<int> ignore = {};
110 return ignore.find(i) != ignore.end();
111 }
112
CreateModel_float16(Model * model)113 void CreateModel_float16(Model *model) {
114 OperandType type1(Type::INT32, {});
115 OperandType type15(Type::TENSOR_FLOAT16, {2, 2});
116 OperandType type2(Type::TENSOR_INT32, {2});
117 // Phase 1, operands
118 auto input0 = model->addOperand(&type15);
119 auto param = model->addOperand(&type1);
120 auto param1 = model->addOperand(&type2);
121 auto output0 = model->addOperand(&type15);
122 // Phase 2, operations
123 static int32_t param_init[] = {0};
124 model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
125 static int32_t param1_init[] = {1, 0};
126 model->setOperandValue(param1, param1_init, sizeof(int32_t) * 2);
127 model->addOperation(ANEURALNETWORKS_GATHER, {input0, param, param1}, {output0});
128 // Phase 3, inputs and outputs
129 model->identifyInputsAndOutputs(
130 {input0},
131 {output0});
132 assert(model->isValid());
133 }
134
is_ignored_float16(int i)135 inline bool is_ignored_float16(int i) {
136 static std::set<int> ignore = {};
137 return ignore.find(i) != ignore.end();
138 }
139
CreateModel_dynamic_output_shape(Model * model)140 void CreateModel_dynamic_output_shape(Model *model) {
141 OperandType type0(Type::TENSOR_FLOAT32, {2, 2});
142 OperandType type1(Type::INT32, {});
143 OperandType type16(Type::TENSOR_FLOAT32, {0, 0});
144 OperandType type2(Type::TENSOR_INT32, {2});
145 // Phase 1, operands
146 auto input0 = model->addOperand(&type0);
147 auto param = model->addOperand(&type1);
148 auto param1 = model->addOperand(&type2);
149 auto output0 = model->addOperand(&type16);
150 // Phase 2, operations
151 static int32_t param_init[] = {0};
152 model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
153 static int32_t param1_init[] = {1, 0};
154 model->setOperandValue(param1, param1_init, sizeof(int32_t) * 2);
155 model->addOperation(ANEURALNETWORKS_GATHER, {input0, param, param1}, {output0});
156 // Phase 3, inputs and outputs
157 model->identifyInputsAndOutputs(
158 {input0},
159 {output0});
160 assert(model->isValid());
161 }
162
is_ignored_dynamic_output_shape(int i)163 inline bool is_ignored_dynamic_output_shape(int i) {
164 static std::set<int> ignore = {};
165 return ignore.find(i) != ignore.end();
166 }
167
CreateModel_dynamic_output_shape_relaxed(Model * model)168 void CreateModel_dynamic_output_shape_relaxed(Model *model) {
169 OperandType type0(Type::TENSOR_FLOAT32, {2, 2});
170 OperandType type1(Type::INT32, {});
171 OperandType type16(Type::TENSOR_FLOAT32, {0, 0});
172 OperandType type2(Type::TENSOR_INT32, {2});
173 // Phase 1, operands
174 auto input0 = model->addOperand(&type0);
175 auto param = model->addOperand(&type1);
176 auto param1 = model->addOperand(&type2);
177 auto output0 = model->addOperand(&type16);
178 // Phase 2, operations
179 static int32_t param_init[] = {0};
180 model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
181 static int32_t param1_init[] = {1, 0};
182 model->setOperandValue(param1, param1_init, sizeof(int32_t) * 2);
183 model->addOperation(ANEURALNETWORKS_GATHER, {input0, param, param1}, {output0});
184 // Phase 3, inputs and outputs
185 model->identifyInputsAndOutputs(
186 {input0},
187 {output0});
188 // Phase 4: set relaxed execution
189 model->relaxComputationFloat32toFloat16(true);
190 assert(model->isValid());
191 }
192
is_ignored_dynamic_output_shape_relaxed(int i)193 inline bool is_ignored_dynamic_output_shape_relaxed(int i) {
194 static std::set<int> ignore = {};
195 return ignore.find(i) != ignore.end();
196 }
197
CreateModel_dynamic_output_shape_quant8(Model * model)198 void CreateModel_dynamic_output_shape_quant8(Model *model) {
199 OperandType type1(Type::INT32, {});
200 OperandType type13(Type::TENSOR_QUANT8_ASYMM, {2, 2}, 0.5f, 127);
201 OperandType type17(Type::TENSOR_QUANT8_ASYMM, {0, 0}, 0.5f, 127);
202 OperandType type2(Type::TENSOR_INT32, {2});
203 // Phase 1, operands
204 auto input0 = model->addOperand(&type13);
205 auto param = model->addOperand(&type1);
206 auto param1 = model->addOperand(&type2);
207 auto output0 = model->addOperand(&type17);
208 // Phase 2, operations
209 static int32_t param_init[] = {0};
210 model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
211 static int32_t param1_init[] = {1, 0};
212 model->setOperandValue(param1, param1_init, sizeof(int32_t) * 2);
213 model->addOperation(ANEURALNETWORKS_GATHER, {input0, param, param1}, {output0});
214 // Phase 3, inputs and outputs
215 model->identifyInputsAndOutputs(
216 {input0},
217 {output0});
218 assert(model->isValid());
219 }
220
is_ignored_dynamic_output_shape_quant8(int i)221 inline bool is_ignored_dynamic_output_shape_quant8(int i) {
222 static std::set<int> ignore = {};
223 return ignore.find(i) != ignore.end();
224 }
225
CreateModel_dynamic_output_shape_int32(Model * model)226 void CreateModel_dynamic_output_shape_int32(Model *model) {
227 OperandType type1(Type::INT32, {});
228 OperandType type14(Type::TENSOR_INT32, {2, 2});
229 OperandType type18(Type::TENSOR_INT32, {0, 0});
230 OperandType type2(Type::TENSOR_INT32, {2});
231 // Phase 1, operands
232 auto input0 = model->addOperand(&type14);
233 auto param = model->addOperand(&type1);
234 auto param1 = model->addOperand(&type2);
235 auto output0 = model->addOperand(&type18);
236 // Phase 2, operations
237 static int32_t param_init[] = {0};
238 model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
239 static int32_t param1_init[] = {1, 0};
240 model->setOperandValue(param1, param1_init, sizeof(int32_t) * 2);
241 model->addOperation(ANEURALNETWORKS_GATHER, {input0, param, param1}, {output0});
242 // Phase 3, inputs and outputs
243 model->identifyInputsAndOutputs(
244 {input0},
245 {output0});
246 assert(model->isValid());
247 }
248
is_ignored_dynamic_output_shape_int32(int i)249 inline bool is_ignored_dynamic_output_shape_int32(int i) {
250 static std::set<int> ignore = {};
251 return ignore.find(i) != ignore.end();
252 }
253
CreateModel_dynamic_output_shape_float16(Model * model)254 void CreateModel_dynamic_output_shape_float16(Model *model) {
255 OperandType type1(Type::INT32, {});
256 OperandType type15(Type::TENSOR_FLOAT16, {2, 2});
257 OperandType type19(Type::TENSOR_FLOAT16, {0, 0});
258 OperandType type2(Type::TENSOR_INT32, {2});
259 // Phase 1, operands
260 auto input0 = model->addOperand(&type15);
261 auto param = model->addOperand(&type1);
262 auto param1 = model->addOperand(&type2);
263 auto output0 = model->addOperand(&type19);
264 // Phase 2, operations
265 static int32_t param_init[] = {0};
266 model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
267 static int32_t param1_init[] = {1, 0};
268 model->setOperandValue(param1, param1_init, sizeof(int32_t) * 2);
269 model->addOperation(ANEURALNETWORKS_GATHER, {input0, param, param1}, {output0});
270 // Phase 3, inputs and outputs
271 model->identifyInputsAndOutputs(
272 {input0},
273 {output0});
274 assert(model->isValid());
275 }
276
is_ignored_dynamic_output_shape_float16(int i)277 inline bool is_ignored_dynamic_output_shape_float16(int i) {
278 static std::set<int> ignore = {};
279 return ignore.find(i) != ignore.end();
280 }
281
CreateModel_2(Model * model)282 void CreateModel_2(Model *model) {
283 OperandType type0(Type::TENSOR_FLOAT32, {2, 2});
284 OperandType type1(Type::INT32, {});
285 OperandType type3(Type::TENSOR_FLOAT32, {1, 2});
286 OperandType type4(Type::TENSOR_INT32, {1});
287 // Phase 1, operands
288 auto input01 = model->addOperand(&type0);
289 auto param2 = model->addOperand(&type1);
290 auto param3 = model->addOperand(&type4);
291 auto output01 = model->addOperand(&type3);
292 // Phase 2, operations
293 static int32_t param2_init[] = {0};
294 model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
295 static int32_t param3_init[] = {1};
296 model->setOperandValue(param3, param3_init, sizeof(int32_t) * 1);
297 model->addOperation(ANEURALNETWORKS_GATHER, {input01, param2, param3}, {output01});
298 // Phase 3, inputs and outputs
299 model->identifyInputsAndOutputs(
300 {input01},
301 {output01});
302 assert(model->isValid());
303 }
304
is_ignored_2(int i)305 inline bool is_ignored_2(int i) {
306 static std::set<int> ignore = {};
307 return ignore.find(i) != ignore.end();
308 }
309
CreateModel_relaxed_2(Model * model)310 void CreateModel_relaxed_2(Model *model) {
311 OperandType type0(Type::TENSOR_FLOAT32, {2, 2});
312 OperandType type1(Type::INT32, {});
313 OperandType type3(Type::TENSOR_FLOAT32, {1, 2});
314 OperandType type4(Type::TENSOR_INT32, {1});
315 // Phase 1, operands
316 auto input01 = model->addOperand(&type0);
317 auto param2 = model->addOperand(&type1);
318 auto param3 = model->addOperand(&type4);
319 auto output01 = model->addOperand(&type3);
320 // Phase 2, operations
321 static int32_t param2_init[] = {0};
322 model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
323 static int32_t param3_init[] = {1};
324 model->setOperandValue(param3, param3_init, sizeof(int32_t) * 1);
325 model->addOperation(ANEURALNETWORKS_GATHER, {input01, param2, param3}, {output01});
326 // Phase 3, inputs and outputs
327 model->identifyInputsAndOutputs(
328 {input01},
329 {output01});
330 // Phase 4: set relaxed execution
331 model->relaxComputationFloat32toFloat16(true);
332 assert(model->isValid());
333 }
334
is_ignored_relaxed_2(int i)335 inline bool is_ignored_relaxed_2(int i) {
336 static std::set<int> ignore = {};
337 return ignore.find(i) != ignore.end();
338 }
339
CreateModel_quant8_2(Model * model)340 void CreateModel_quant8_2(Model *model) {
341 OperandType type1(Type::INT32, {});
342 OperandType type13(Type::TENSOR_QUANT8_ASYMM, {2, 2}, 0.5f, 127);
343 OperandType type20(Type::TENSOR_QUANT8_ASYMM, {1, 2}, 0.5f, 127);
344 OperandType type4(Type::TENSOR_INT32, {1});
345 // Phase 1, operands
346 auto input01 = model->addOperand(&type13);
347 auto param2 = model->addOperand(&type1);
348 auto param3 = model->addOperand(&type4);
349 auto output01 = model->addOperand(&type20);
350 // Phase 2, operations
351 static int32_t param2_init[] = {0};
352 model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
353 static int32_t param3_init[] = {1};
354 model->setOperandValue(param3, param3_init, sizeof(int32_t) * 1);
355 model->addOperation(ANEURALNETWORKS_GATHER, {input01, param2, param3}, {output01});
356 // Phase 3, inputs and outputs
357 model->identifyInputsAndOutputs(
358 {input01},
359 {output01});
360 assert(model->isValid());
361 }
362
is_ignored_quant8_2(int i)363 inline bool is_ignored_quant8_2(int i) {
364 static std::set<int> ignore = {};
365 return ignore.find(i) != ignore.end();
366 }
367
CreateModel_int32_2(Model * model)368 void CreateModel_int32_2(Model *model) {
369 OperandType type1(Type::INT32, {});
370 OperandType type14(Type::TENSOR_INT32, {2, 2});
371 OperandType type21(Type::TENSOR_INT32, {1, 2});
372 OperandType type4(Type::TENSOR_INT32, {1});
373 // Phase 1, operands
374 auto input01 = model->addOperand(&type14);
375 auto param2 = model->addOperand(&type1);
376 auto param3 = model->addOperand(&type4);
377 auto output01 = model->addOperand(&type21);
378 // Phase 2, operations
379 static int32_t param2_init[] = {0};
380 model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
381 static int32_t param3_init[] = {1};
382 model->setOperandValue(param3, param3_init, sizeof(int32_t) * 1);
383 model->addOperation(ANEURALNETWORKS_GATHER, {input01, param2, param3}, {output01});
384 // Phase 3, inputs and outputs
385 model->identifyInputsAndOutputs(
386 {input01},
387 {output01});
388 assert(model->isValid());
389 }
390
is_ignored_int32_2(int i)391 inline bool is_ignored_int32_2(int i) {
392 static std::set<int> ignore = {};
393 return ignore.find(i) != ignore.end();
394 }
395
CreateModel_float16_2(Model * model)396 void CreateModel_float16_2(Model *model) {
397 OperandType type1(Type::INT32, {});
398 OperandType type15(Type::TENSOR_FLOAT16, {2, 2});
399 OperandType type22(Type::TENSOR_FLOAT16, {1, 2});
400 OperandType type4(Type::TENSOR_INT32, {1});
401 // Phase 1, operands
402 auto input01 = model->addOperand(&type15);
403 auto param2 = model->addOperand(&type1);
404 auto param3 = model->addOperand(&type4);
405 auto output01 = model->addOperand(&type22);
406 // Phase 2, operations
407 static int32_t param2_init[] = {0};
408 model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
409 static int32_t param3_init[] = {1};
410 model->setOperandValue(param3, param3_init, sizeof(int32_t) * 1);
411 model->addOperation(ANEURALNETWORKS_GATHER, {input01, param2, param3}, {output01});
412 // Phase 3, inputs and outputs
413 model->identifyInputsAndOutputs(
414 {input01},
415 {output01});
416 assert(model->isValid());
417 }
418
is_ignored_float16_2(int i)419 inline bool is_ignored_float16_2(int i) {
420 static std::set<int> ignore = {};
421 return ignore.find(i) != ignore.end();
422 }
423
CreateModel_dynamic_output_shape_2(Model * model)424 void CreateModel_dynamic_output_shape_2(Model *model) {
425 OperandType type0(Type::TENSOR_FLOAT32, {2, 2});
426 OperandType type1(Type::INT32, {});
427 OperandType type16(Type::TENSOR_FLOAT32, {0, 0});
428 OperandType type4(Type::TENSOR_INT32, {1});
429 // Phase 1, operands
430 auto input01 = model->addOperand(&type0);
431 auto param2 = model->addOperand(&type1);
432 auto param3 = model->addOperand(&type4);
433 auto output01 = model->addOperand(&type16);
434 // Phase 2, operations
435 static int32_t param2_init[] = {0};
436 model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
437 static int32_t param3_init[] = {1};
438 model->setOperandValue(param3, param3_init, sizeof(int32_t) * 1);
439 model->addOperation(ANEURALNETWORKS_GATHER, {input01, param2, param3}, {output01});
440 // Phase 3, inputs and outputs
441 model->identifyInputsAndOutputs(
442 {input01},
443 {output01});
444 assert(model->isValid());
445 }
446
is_ignored_dynamic_output_shape_2(int i)447 inline bool is_ignored_dynamic_output_shape_2(int i) {
448 static std::set<int> ignore = {};
449 return ignore.find(i) != ignore.end();
450 }
451
CreateModel_dynamic_output_shape_relaxed_2(Model * model)452 void CreateModel_dynamic_output_shape_relaxed_2(Model *model) {
453 OperandType type0(Type::TENSOR_FLOAT32, {2, 2});
454 OperandType type1(Type::INT32, {});
455 OperandType type16(Type::TENSOR_FLOAT32, {0, 0});
456 OperandType type4(Type::TENSOR_INT32, {1});
457 // Phase 1, operands
458 auto input01 = model->addOperand(&type0);
459 auto param2 = model->addOperand(&type1);
460 auto param3 = model->addOperand(&type4);
461 auto output01 = model->addOperand(&type16);
462 // Phase 2, operations
463 static int32_t param2_init[] = {0};
464 model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
465 static int32_t param3_init[] = {1};
466 model->setOperandValue(param3, param3_init, sizeof(int32_t) * 1);
467 model->addOperation(ANEURALNETWORKS_GATHER, {input01, param2, param3}, {output01});
468 // Phase 3, inputs and outputs
469 model->identifyInputsAndOutputs(
470 {input01},
471 {output01});
472 // Phase 4: set relaxed execution
473 model->relaxComputationFloat32toFloat16(true);
474 assert(model->isValid());
475 }
476
is_ignored_dynamic_output_shape_relaxed_2(int i)477 inline bool is_ignored_dynamic_output_shape_relaxed_2(int i) {
478 static std::set<int> ignore = {};
479 return ignore.find(i) != ignore.end();
480 }
481
CreateModel_dynamic_output_shape_quant8_2(Model * model)482 void CreateModel_dynamic_output_shape_quant8_2(Model *model) {
483 OperandType type1(Type::INT32, {});
484 OperandType type13(Type::TENSOR_QUANT8_ASYMM, {2, 2}, 0.5f, 127);
485 OperandType type17(Type::TENSOR_QUANT8_ASYMM, {0, 0}, 0.5f, 127);
486 OperandType type4(Type::TENSOR_INT32, {1});
487 // Phase 1, operands
488 auto input01 = model->addOperand(&type13);
489 auto param2 = model->addOperand(&type1);
490 auto param3 = model->addOperand(&type4);
491 auto output01 = model->addOperand(&type17);
492 // Phase 2, operations
493 static int32_t param2_init[] = {0};
494 model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
495 static int32_t param3_init[] = {1};
496 model->setOperandValue(param3, param3_init, sizeof(int32_t) * 1);
497 model->addOperation(ANEURALNETWORKS_GATHER, {input01, param2, param3}, {output01});
498 // Phase 3, inputs and outputs
499 model->identifyInputsAndOutputs(
500 {input01},
501 {output01});
502 assert(model->isValid());
503 }
504
is_ignored_dynamic_output_shape_quant8_2(int i)505 inline bool is_ignored_dynamic_output_shape_quant8_2(int i) {
506 static std::set<int> ignore = {};
507 return ignore.find(i) != ignore.end();
508 }
509
CreateModel_dynamic_output_shape_int32_2(Model * model)510 void CreateModel_dynamic_output_shape_int32_2(Model *model) {
511 OperandType type1(Type::INT32, {});
512 OperandType type14(Type::TENSOR_INT32, {2, 2});
513 OperandType type18(Type::TENSOR_INT32, {0, 0});
514 OperandType type4(Type::TENSOR_INT32, {1});
515 // Phase 1, operands
516 auto input01 = model->addOperand(&type14);
517 auto param2 = model->addOperand(&type1);
518 auto param3 = model->addOperand(&type4);
519 auto output01 = model->addOperand(&type18);
520 // Phase 2, operations
521 static int32_t param2_init[] = {0};
522 model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
523 static int32_t param3_init[] = {1};
524 model->setOperandValue(param3, param3_init, sizeof(int32_t) * 1);
525 model->addOperation(ANEURALNETWORKS_GATHER, {input01, param2, param3}, {output01});
526 // Phase 3, inputs and outputs
527 model->identifyInputsAndOutputs(
528 {input01},
529 {output01});
530 assert(model->isValid());
531 }
532
is_ignored_dynamic_output_shape_int32_2(int i)533 inline bool is_ignored_dynamic_output_shape_int32_2(int i) {
534 static std::set<int> ignore = {};
535 return ignore.find(i) != ignore.end();
536 }
537
CreateModel_dynamic_output_shape_float16_2(Model * model)538 void CreateModel_dynamic_output_shape_float16_2(Model *model) {
539 OperandType type1(Type::INT32, {});
540 OperandType type15(Type::TENSOR_FLOAT16, {2, 2});
541 OperandType type19(Type::TENSOR_FLOAT16, {0, 0});
542 OperandType type4(Type::TENSOR_INT32, {1});
543 // Phase 1, operands
544 auto input01 = model->addOperand(&type15);
545 auto param2 = model->addOperand(&type1);
546 auto param3 = model->addOperand(&type4);
547 auto output01 = model->addOperand(&type19);
548 // Phase 2, operations
549 static int32_t param2_init[] = {0};
550 model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
551 static int32_t param3_init[] = {1};
552 model->setOperandValue(param3, param3_init, sizeof(int32_t) * 1);
553 model->addOperation(ANEURALNETWORKS_GATHER, {input01, param2, param3}, {output01});
554 // Phase 3, inputs and outputs
555 model->identifyInputsAndOutputs(
556 {input01},
557 {output01});
558 assert(model->isValid());
559 }
560
is_ignored_dynamic_output_shape_float16_2(int i)561 inline bool is_ignored_dynamic_output_shape_float16_2(int i) {
562 static std::set<int> ignore = {};
563 return ignore.find(i) != ignore.end();
564 }
565
CreateModel_3(Model * model)566 void CreateModel_3(Model *model) {
567 OperandType type1(Type::INT32, {});
568 OperandType type4(Type::TENSOR_INT32, {1});
569 OperandType type5(Type::TENSOR_FLOAT32, {3});
570 OperandType type6(Type::TENSOR_FLOAT32, {1});
571 // Phase 1, operands
572 auto input02 = model->addOperand(&type5);
573 auto param4 = model->addOperand(&type1);
574 auto param5 = model->addOperand(&type4);
575 auto output02 = model->addOperand(&type6);
576 // Phase 2, operations
577 static int32_t param4_init[] = {0};
578 model->setOperandValue(param4, param4_init, sizeof(int32_t) * 1);
579 static int32_t param5_init[] = {1};
580 model->setOperandValue(param5, param5_init, sizeof(int32_t) * 1);
581 model->addOperation(ANEURALNETWORKS_GATHER, {input02, param4, param5}, {output02});
582 // Phase 3, inputs and outputs
583 model->identifyInputsAndOutputs(
584 {input02},
585 {output02});
586 assert(model->isValid());
587 }
588
is_ignored_3(int i)589 inline bool is_ignored_3(int i) {
590 static std::set<int> ignore = {};
591 return ignore.find(i) != ignore.end();
592 }
593
CreateModel_relaxed_3(Model * model)594 void CreateModel_relaxed_3(Model *model) {
595 OperandType type1(Type::INT32, {});
596 OperandType type4(Type::TENSOR_INT32, {1});
597 OperandType type5(Type::TENSOR_FLOAT32, {3});
598 OperandType type6(Type::TENSOR_FLOAT32, {1});
599 // Phase 1, operands
600 auto input02 = model->addOperand(&type5);
601 auto param4 = model->addOperand(&type1);
602 auto param5 = model->addOperand(&type4);
603 auto output02 = model->addOperand(&type6);
604 // Phase 2, operations
605 static int32_t param4_init[] = {0};
606 model->setOperandValue(param4, param4_init, sizeof(int32_t) * 1);
607 static int32_t param5_init[] = {1};
608 model->setOperandValue(param5, param5_init, sizeof(int32_t) * 1);
609 model->addOperation(ANEURALNETWORKS_GATHER, {input02, param4, param5}, {output02});
610 // Phase 3, inputs and outputs
611 model->identifyInputsAndOutputs(
612 {input02},
613 {output02});
614 // Phase 4: set relaxed execution
615 model->relaxComputationFloat32toFloat16(true);
616 assert(model->isValid());
617 }
618
is_ignored_relaxed_3(int i)619 inline bool is_ignored_relaxed_3(int i) {
620 static std::set<int> ignore = {};
621 return ignore.find(i) != ignore.end();
622 }
623
CreateModel_quant8_3(Model * model)624 void CreateModel_quant8_3(Model *model) {
625 OperandType type1(Type::INT32, {});
626 OperandType type23(Type::TENSOR_QUANT8_ASYMM, {3}, 0.5f, 127);
627 OperandType type24(Type::TENSOR_QUANT8_ASYMM, {1}, 0.5f, 127);
628 OperandType type4(Type::TENSOR_INT32, {1});
629 // Phase 1, operands
630 auto input02 = model->addOperand(&type23);
631 auto param4 = model->addOperand(&type1);
632 auto param5 = model->addOperand(&type4);
633 auto output02 = model->addOperand(&type24);
634 // Phase 2, operations
635 static int32_t param4_init[] = {0};
636 model->setOperandValue(param4, param4_init, sizeof(int32_t) * 1);
637 static int32_t param5_init[] = {1};
638 model->setOperandValue(param5, param5_init, sizeof(int32_t) * 1);
639 model->addOperation(ANEURALNETWORKS_GATHER, {input02, param4, param5}, {output02});
640 // Phase 3, inputs and outputs
641 model->identifyInputsAndOutputs(
642 {input02},
643 {output02});
644 assert(model->isValid());
645 }
646
is_ignored_quant8_3(int i)647 inline bool is_ignored_quant8_3(int i) {
648 static std::set<int> ignore = {};
649 return ignore.find(i) != ignore.end();
650 }
651
CreateModel_int32_3(Model * model)652 void CreateModel_int32_3(Model *model) {
653 OperandType type1(Type::INT32, {});
654 OperandType type25(Type::TENSOR_INT32, {3});
655 OperandType type4(Type::TENSOR_INT32, {1});
656 // Phase 1, operands
657 auto input02 = model->addOperand(&type25);
658 auto param4 = model->addOperand(&type1);
659 auto param5 = model->addOperand(&type4);
660 auto output02 = model->addOperand(&type4);
661 // Phase 2, operations
662 static int32_t param4_init[] = {0};
663 model->setOperandValue(param4, param4_init, sizeof(int32_t) * 1);
664 static int32_t param5_init[] = {1};
665 model->setOperandValue(param5, param5_init, sizeof(int32_t) * 1);
666 model->addOperation(ANEURALNETWORKS_GATHER, {input02, param4, param5}, {output02});
667 // Phase 3, inputs and outputs
668 model->identifyInputsAndOutputs(
669 {input02},
670 {output02});
671 assert(model->isValid());
672 }
673
is_ignored_int32_3(int i)674 inline bool is_ignored_int32_3(int i) {
675 static std::set<int> ignore = {};
676 return ignore.find(i) != ignore.end();
677 }
678
CreateModel_float16_3(Model * model)679 void CreateModel_float16_3(Model *model) {
680 OperandType type1(Type::INT32, {});
681 OperandType type26(Type::TENSOR_FLOAT16, {3});
682 OperandType type27(Type::TENSOR_FLOAT16, {1});
683 OperandType type4(Type::TENSOR_INT32, {1});
684 // Phase 1, operands
685 auto input02 = model->addOperand(&type26);
686 auto param4 = model->addOperand(&type1);
687 auto param5 = model->addOperand(&type4);
688 auto output02 = model->addOperand(&type27);
689 // Phase 2, operations
690 static int32_t param4_init[] = {0};
691 model->setOperandValue(param4, param4_init, sizeof(int32_t) * 1);
692 static int32_t param5_init[] = {1};
693 model->setOperandValue(param5, param5_init, sizeof(int32_t) * 1);
694 model->addOperation(ANEURALNETWORKS_GATHER, {input02, param4, param5}, {output02});
695 // Phase 3, inputs and outputs
696 model->identifyInputsAndOutputs(
697 {input02},
698 {output02});
699 assert(model->isValid());
700 }
701
is_ignored_float16_3(int i)702 inline bool is_ignored_float16_3(int i) {
703 static std::set<int> ignore = {};
704 return ignore.find(i) != ignore.end();
705 }
706
CreateModel_dynamic_output_shape_3(Model * model)707 void CreateModel_dynamic_output_shape_3(Model *model) {
708 OperandType type1(Type::INT32, {});
709 OperandType type28(Type::TENSOR_FLOAT32, {0});
710 OperandType type4(Type::TENSOR_INT32, {1});
711 OperandType type5(Type::TENSOR_FLOAT32, {3});
712 // Phase 1, operands
713 auto input02 = model->addOperand(&type5);
714 auto param4 = model->addOperand(&type1);
715 auto param5 = model->addOperand(&type4);
716 auto output02 = model->addOperand(&type28);
717 // Phase 2, operations
718 static int32_t param4_init[] = {0};
719 model->setOperandValue(param4, param4_init, sizeof(int32_t) * 1);
720 static int32_t param5_init[] = {1};
721 model->setOperandValue(param5, param5_init, sizeof(int32_t) * 1);
722 model->addOperation(ANEURALNETWORKS_GATHER, {input02, param4, param5}, {output02});
723 // Phase 3, inputs and outputs
724 model->identifyInputsAndOutputs(
725 {input02},
726 {output02});
727 assert(model->isValid());
728 }
729
is_ignored_dynamic_output_shape_3(int i)730 inline bool is_ignored_dynamic_output_shape_3(int i) {
731 static std::set<int> ignore = {};
732 return ignore.find(i) != ignore.end();
733 }
734
CreateModel_dynamic_output_shape_relaxed_3(Model * model)735 void CreateModel_dynamic_output_shape_relaxed_3(Model *model) {
736 OperandType type1(Type::INT32, {});
737 OperandType type28(Type::TENSOR_FLOAT32, {0});
738 OperandType type4(Type::TENSOR_INT32, {1});
739 OperandType type5(Type::TENSOR_FLOAT32, {3});
740 // Phase 1, operands
741 auto input02 = model->addOperand(&type5);
742 auto param4 = model->addOperand(&type1);
743 auto param5 = model->addOperand(&type4);
744 auto output02 = model->addOperand(&type28);
745 // Phase 2, operations
746 static int32_t param4_init[] = {0};
747 model->setOperandValue(param4, param4_init, sizeof(int32_t) * 1);
748 static int32_t param5_init[] = {1};
749 model->setOperandValue(param5, param5_init, sizeof(int32_t) * 1);
750 model->addOperation(ANEURALNETWORKS_GATHER, {input02, param4, param5}, {output02});
751 // Phase 3, inputs and outputs
752 model->identifyInputsAndOutputs(
753 {input02},
754 {output02});
755 // Phase 4: set relaxed execution
756 model->relaxComputationFloat32toFloat16(true);
757 assert(model->isValid());
758 }
759
is_ignored_dynamic_output_shape_relaxed_3(int i)760 inline bool is_ignored_dynamic_output_shape_relaxed_3(int i) {
761 static std::set<int> ignore = {};
762 return ignore.find(i) != ignore.end();
763 }
764
CreateModel_dynamic_output_shape_quant8_3(Model * model)765 void CreateModel_dynamic_output_shape_quant8_3(Model *model) {
766 OperandType type1(Type::INT32, {});
767 OperandType type23(Type::TENSOR_QUANT8_ASYMM, {3}, 0.5f, 127);
768 OperandType type29(Type::TENSOR_QUANT8_ASYMM, {0}, 0.5f, 127);
769 OperandType type4(Type::TENSOR_INT32, {1});
770 // Phase 1, operands
771 auto input02 = model->addOperand(&type23);
772 auto param4 = model->addOperand(&type1);
773 auto param5 = model->addOperand(&type4);
774 auto output02 = model->addOperand(&type29);
775 // Phase 2, operations
776 static int32_t param4_init[] = {0};
777 model->setOperandValue(param4, param4_init, sizeof(int32_t) * 1);
778 static int32_t param5_init[] = {1};
779 model->setOperandValue(param5, param5_init, sizeof(int32_t) * 1);
780 model->addOperation(ANEURALNETWORKS_GATHER, {input02, param4, param5}, {output02});
781 // Phase 3, inputs and outputs
782 model->identifyInputsAndOutputs(
783 {input02},
784 {output02});
785 assert(model->isValid());
786 }
787
is_ignored_dynamic_output_shape_quant8_3(int i)788 inline bool is_ignored_dynamic_output_shape_quant8_3(int i) {
789 static std::set<int> ignore = {};
790 return ignore.find(i) != ignore.end();
791 }
792
CreateModel_dynamic_output_shape_int32_3(Model * model)793 void CreateModel_dynamic_output_shape_int32_3(Model *model) {
794 OperandType type1(Type::INT32, {});
795 OperandType type25(Type::TENSOR_INT32, {3});
796 OperandType type30(Type::TENSOR_INT32, {0});
797 OperandType type4(Type::TENSOR_INT32, {1});
798 // Phase 1, operands
799 auto input02 = model->addOperand(&type25);
800 auto param4 = model->addOperand(&type1);
801 auto param5 = model->addOperand(&type4);
802 auto output02 = model->addOperand(&type30);
803 // Phase 2, operations
804 static int32_t param4_init[] = {0};
805 model->setOperandValue(param4, param4_init, sizeof(int32_t) * 1);
806 static int32_t param5_init[] = {1};
807 model->setOperandValue(param5, param5_init, sizeof(int32_t) * 1);
808 model->addOperation(ANEURALNETWORKS_GATHER, {input02, param4, param5}, {output02});
809 // Phase 3, inputs and outputs
810 model->identifyInputsAndOutputs(
811 {input02},
812 {output02});
813 assert(model->isValid());
814 }
815
is_ignored_dynamic_output_shape_int32_3(int i)816 inline bool is_ignored_dynamic_output_shape_int32_3(int i) {
817 static std::set<int> ignore = {};
818 return ignore.find(i) != ignore.end();
819 }
820
CreateModel_dynamic_output_shape_float16_3(Model * model)821 void CreateModel_dynamic_output_shape_float16_3(Model *model) {
822 OperandType type1(Type::INT32, {});
823 OperandType type26(Type::TENSOR_FLOAT16, {3});
824 OperandType type31(Type::TENSOR_FLOAT16, {0});
825 OperandType type4(Type::TENSOR_INT32, {1});
826 // Phase 1, operands
827 auto input02 = model->addOperand(&type26);
828 auto param4 = model->addOperand(&type1);
829 auto param5 = model->addOperand(&type4);
830 auto output02 = model->addOperand(&type31);
831 // Phase 2, operations
832 static int32_t param4_init[] = {0};
833 model->setOperandValue(param4, param4_init, sizeof(int32_t) * 1);
834 static int32_t param5_init[] = {1};
835 model->setOperandValue(param5, param5_init, sizeof(int32_t) * 1);
836 model->addOperation(ANEURALNETWORKS_GATHER, {input02, param4, param5}, {output02});
837 // Phase 3, inputs and outputs
838 model->identifyInputsAndOutputs(
839 {input02},
840 {output02});
841 assert(model->isValid());
842 }
843
is_ignored_dynamic_output_shape_float16_3(int i)844 inline bool is_ignored_dynamic_output_shape_float16_3(int i) {
845 static std::set<int> ignore = {};
846 return ignore.find(i) != ignore.end();
847 }
848
CreateModel_4(Model * model)849 void CreateModel_4(Model *model) {
850 OperandType type1(Type::INT32, {});
851 OperandType type2(Type::TENSOR_INT32, {2});
852 OperandType type5(Type::TENSOR_FLOAT32, {3});
853 OperandType type7(Type::TENSOR_FLOAT32, {2});
854 // Phase 1, operands
855 auto input03 = model->addOperand(&type5);
856 auto param6 = model->addOperand(&type1);
857 auto param7 = model->addOperand(&type2);
858 auto output03 = model->addOperand(&type7);
859 // Phase 2, operations
860 static int32_t param6_init[] = {0};
861 model->setOperandValue(param6, param6_init, sizeof(int32_t) * 1);
862 static int32_t param7_init[] = {1, 0};
863 model->setOperandValue(param7, param7_init, sizeof(int32_t) * 2);
864 model->addOperation(ANEURALNETWORKS_GATHER, {input03, param6, param7}, {output03});
865 // Phase 3, inputs and outputs
866 model->identifyInputsAndOutputs(
867 {input03},
868 {output03});
869 assert(model->isValid());
870 }
871
is_ignored_4(int i)872 inline bool is_ignored_4(int i) {
873 static std::set<int> ignore = {};
874 return ignore.find(i) != ignore.end();
875 }
876
CreateModel_relaxed_4(Model * model)877 void CreateModel_relaxed_4(Model *model) {
878 OperandType type1(Type::INT32, {});
879 OperandType type2(Type::TENSOR_INT32, {2});
880 OperandType type5(Type::TENSOR_FLOAT32, {3});
881 OperandType type7(Type::TENSOR_FLOAT32, {2});
882 // Phase 1, operands
883 auto input03 = model->addOperand(&type5);
884 auto param6 = model->addOperand(&type1);
885 auto param7 = model->addOperand(&type2);
886 auto output03 = model->addOperand(&type7);
887 // Phase 2, operations
888 static int32_t param6_init[] = {0};
889 model->setOperandValue(param6, param6_init, sizeof(int32_t) * 1);
890 static int32_t param7_init[] = {1, 0};
891 model->setOperandValue(param7, param7_init, sizeof(int32_t) * 2);
892 model->addOperation(ANEURALNETWORKS_GATHER, {input03, param6, param7}, {output03});
893 // Phase 3, inputs and outputs
894 model->identifyInputsAndOutputs(
895 {input03},
896 {output03});
897 // Phase 4: set relaxed execution
898 model->relaxComputationFloat32toFloat16(true);
899 assert(model->isValid());
900 }
901
is_ignored_relaxed_4(int i)902 inline bool is_ignored_relaxed_4(int i) {
903 static std::set<int> ignore = {};
904 return ignore.find(i) != ignore.end();
905 }
906
CreateModel_quant8_4(Model * model)907 void CreateModel_quant8_4(Model *model) {
908 OperandType type1(Type::INT32, {});
909 OperandType type2(Type::TENSOR_INT32, {2});
910 OperandType type23(Type::TENSOR_QUANT8_ASYMM, {3}, 0.5f, 127);
911 OperandType type32(Type::TENSOR_QUANT8_ASYMM, {2}, 0.5f, 127);
912 // Phase 1, operands
913 auto input03 = model->addOperand(&type23);
914 auto param6 = model->addOperand(&type1);
915 auto param7 = model->addOperand(&type2);
916 auto output03 = model->addOperand(&type32);
917 // Phase 2, operations
918 static int32_t param6_init[] = {0};
919 model->setOperandValue(param6, param6_init, sizeof(int32_t) * 1);
920 static int32_t param7_init[] = {1, 0};
921 model->setOperandValue(param7, param7_init, sizeof(int32_t) * 2);
922 model->addOperation(ANEURALNETWORKS_GATHER, {input03, param6, param7}, {output03});
923 // Phase 3, inputs and outputs
924 model->identifyInputsAndOutputs(
925 {input03},
926 {output03});
927 assert(model->isValid());
928 }
929
is_ignored_quant8_4(int i)930 inline bool is_ignored_quant8_4(int i) {
931 static std::set<int> ignore = {};
932 return ignore.find(i) != ignore.end();
933 }
934
CreateModel_int32_4(Model * model)935 void CreateModel_int32_4(Model *model) {
936 OperandType type1(Type::INT32, {});
937 OperandType type2(Type::TENSOR_INT32, {2});
938 OperandType type25(Type::TENSOR_INT32, {3});
939 // Phase 1, operands
940 auto input03 = model->addOperand(&type25);
941 auto param6 = model->addOperand(&type1);
942 auto param7 = model->addOperand(&type2);
943 auto output03 = model->addOperand(&type2);
944 // Phase 2, operations
945 static int32_t param6_init[] = {0};
946 model->setOperandValue(param6, param6_init, sizeof(int32_t) * 1);
947 static int32_t param7_init[] = {1, 0};
948 model->setOperandValue(param7, param7_init, sizeof(int32_t) * 2);
949 model->addOperation(ANEURALNETWORKS_GATHER, {input03, param6, param7}, {output03});
950 // Phase 3, inputs and outputs
951 model->identifyInputsAndOutputs(
952 {input03},
953 {output03});
954 assert(model->isValid());
955 }
956
is_ignored_int32_4(int i)957 inline bool is_ignored_int32_4(int i) {
958 static std::set<int> ignore = {};
959 return ignore.find(i) != ignore.end();
960 }
961
CreateModel_float16_4(Model * model)962 void CreateModel_float16_4(Model *model) {
963 OperandType type1(Type::INT32, {});
964 OperandType type2(Type::TENSOR_INT32, {2});
965 OperandType type26(Type::TENSOR_FLOAT16, {3});
966 OperandType type33(Type::TENSOR_FLOAT16, {2});
967 // Phase 1, operands
968 auto input03 = model->addOperand(&type26);
969 auto param6 = model->addOperand(&type1);
970 auto param7 = model->addOperand(&type2);
971 auto output03 = model->addOperand(&type33);
972 // Phase 2, operations
973 static int32_t param6_init[] = {0};
974 model->setOperandValue(param6, param6_init, sizeof(int32_t) * 1);
975 static int32_t param7_init[] = {1, 0};
976 model->setOperandValue(param7, param7_init, sizeof(int32_t) * 2);
977 model->addOperation(ANEURALNETWORKS_GATHER, {input03, param6, param7}, {output03});
978 // Phase 3, inputs and outputs
979 model->identifyInputsAndOutputs(
980 {input03},
981 {output03});
982 assert(model->isValid());
983 }
984
is_ignored_float16_4(int i)985 inline bool is_ignored_float16_4(int i) {
986 static std::set<int> ignore = {};
987 return ignore.find(i) != ignore.end();
988 }
989
CreateModel_dynamic_output_shape_4(Model * model)990 void CreateModel_dynamic_output_shape_4(Model *model) {
991 OperandType type1(Type::INT32, {});
992 OperandType type2(Type::TENSOR_INT32, {2});
993 OperandType type28(Type::TENSOR_FLOAT32, {0});
994 OperandType type5(Type::TENSOR_FLOAT32, {3});
995 // Phase 1, operands
996 auto input03 = model->addOperand(&type5);
997 auto param6 = model->addOperand(&type1);
998 auto param7 = model->addOperand(&type2);
999 auto output03 = model->addOperand(&type28);
1000 // Phase 2, operations
1001 static int32_t param6_init[] = {0};
1002 model->setOperandValue(param6, param6_init, sizeof(int32_t) * 1);
1003 static int32_t param7_init[] = {1, 0};
1004 model->setOperandValue(param7, param7_init, sizeof(int32_t) * 2);
1005 model->addOperation(ANEURALNETWORKS_GATHER, {input03, param6, param7}, {output03});
1006 // Phase 3, inputs and outputs
1007 model->identifyInputsAndOutputs(
1008 {input03},
1009 {output03});
1010 assert(model->isValid());
1011 }
1012
is_ignored_dynamic_output_shape_4(int i)1013 inline bool is_ignored_dynamic_output_shape_4(int i) {
1014 static std::set<int> ignore = {};
1015 return ignore.find(i) != ignore.end();
1016 }
1017
CreateModel_dynamic_output_shape_relaxed_4(Model * model)1018 void CreateModel_dynamic_output_shape_relaxed_4(Model *model) {
1019 OperandType type1(Type::INT32, {});
1020 OperandType type2(Type::TENSOR_INT32, {2});
1021 OperandType type28(Type::TENSOR_FLOAT32, {0});
1022 OperandType type5(Type::TENSOR_FLOAT32, {3});
1023 // Phase 1, operands
1024 auto input03 = model->addOperand(&type5);
1025 auto param6 = model->addOperand(&type1);
1026 auto param7 = model->addOperand(&type2);
1027 auto output03 = model->addOperand(&type28);
1028 // Phase 2, operations
1029 static int32_t param6_init[] = {0};
1030 model->setOperandValue(param6, param6_init, sizeof(int32_t) * 1);
1031 static int32_t param7_init[] = {1, 0};
1032 model->setOperandValue(param7, param7_init, sizeof(int32_t) * 2);
1033 model->addOperation(ANEURALNETWORKS_GATHER, {input03, param6, param7}, {output03});
1034 // Phase 3, inputs and outputs
1035 model->identifyInputsAndOutputs(
1036 {input03},
1037 {output03});
1038 // Phase 4: set relaxed execution
1039 model->relaxComputationFloat32toFloat16(true);
1040 assert(model->isValid());
1041 }
1042
is_ignored_dynamic_output_shape_relaxed_4(int i)1043 inline bool is_ignored_dynamic_output_shape_relaxed_4(int i) {
1044 static std::set<int> ignore = {};
1045 return ignore.find(i) != ignore.end();
1046 }
1047
CreateModel_dynamic_output_shape_quant8_4(Model * model)1048 void CreateModel_dynamic_output_shape_quant8_4(Model *model) {
1049 OperandType type1(Type::INT32, {});
1050 OperandType type2(Type::TENSOR_INT32, {2});
1051 OperandType type23(Type::TENSOR_QUANT8_ASYMM, {3}, 0.5f, 127);
1052 OperandType type29(Type::TENSOR_QUANT8_ASYMM, {0}, 0.5f, 127);
1053 // Phase 1, operands
1054 auto input03 = model->addOperand(&type23);
1055 auto param6 = model->addOperand(&type1);
1056 auto param7 = model->addOperand(&type2);
1057 auto output03 = model->addOperand(&type29);
1058 // Phase 2, operations
1059 static int32_t param6_init[] = {0};
1060 model->setOperandValue(param6, param6_init, sizeof(int32_t) * 1);
1061 static int32_t param7_init[] = {1, 0};
1062 model->setOperandValue(param7, param7_init, sizeof(int32_t) * 2);
1063 model->addOperation(ANEURALNETWORKS_GATHER, {input03, param6, param7}, {output03});
1064 // Phase 3, inputs and outputs
1065 model->identifyInputsAndOutputs(
1066 {input03},
1067 {output03});
1068 assert(model->isValid());
1069 }
1070
is_ignored_dynamic_output_shape_quant8_4(int i)1071 inline bool is_ignored_dynamic_output_shape_quant8_4(int i) {
1072 static std::set<int> ignore = {};
1073 return ignore.find(i) != ignore.end();
1074 }
1075
CreateModel_dynamic_output_shape_int32_4(Model * model)1076 void CreateModel_dynamic_output_shape_int32_4(Model *model) {
1077 OperandType type1(Type::INT32, {});
1078 OperandType type2(Type::TENSOR_INT32, {2});
1079 OperandType type25(Type::TENSOR_INT32, {3});
1080 OperandType type30(Type::TENSOR_INT32, {0});
1081 // Phase 1, operands
1082 auto input03 = model->addOperand(&type25);
1083 auto param6 = model->addOperand(&type1);
1084 auto param7 = model->addOperand(&type2);
1085 auto output03 = model->addOperand(&type30);
1086 // Phase 2, operations
1087 static int32_t param6_init[] = {0};
1088 model->setOperandValue(param6, param6_init, sizeof(int32_t) * 1);
1089 static int32_t param7_init[] = {1, 0};
1090 model->setOperandValue(param7, param7_init, sizeof(int32_t) * 2);
1091 model->addOperation(ANEURALNETWORKS_GATHER, {input03, param6, param7}, {output03});
1092 // Phase 3, inputs and outputs
1093 model->identifyInputsAndOutputs(
1094 {input03},
1095 {output03});
1096 assert(model->isValid());
1097 }
1098
is_ignored_dynamic_output_shape_int32_4(int i)1099 inline bool is_ignored_dynamic_output_shape_int32_4(int i) {
1100 static std::set<int> ignore = {};
1101 return ignore.find(i) != ignore.end();
1102 }
1103
CreateModel_dynamic_output_shape_float16_4(Model * model)1104 void CreateModel_dynamic_output_shape_float16_4(Model *model) {
1105 OperandType type1(Type::INT32, {});
1106 OperandType type2(Type::TENSOR_INT32, {2});
1107 OperandType type26(Type::TENSOR_FLOAT16, {3});
1108 OperandType type31(Type::TENSOR_FLOAT16, {0});
1109 // Phase 1, operands
1110 auto input03 = model->addOperand(&type26);
1111 auto param6 = model->addOperand(&type1);
1112 auto param7 = model->addOperand(&type2);
1113 auto output03 = model->addOperand(&type31);
1114 // Phase 2, operations
1115 static int32_t param6_init[] = {0};
1116 model->setOperandValue(param6, param6_init, sizeof(int32_t) * 1);
1117 static int32_t param7_init[] = {1, 0};
1118 model->setOperandValue(param7, param7_init, sizeof(int32_t) * 2);
1119 model->addOperation(ANEURALNETWORKS_GATHER, {input03, param6, param7}, {output03});
1120 // Phase 3, inputs and outputs
1121 model->identifyInputsAndOutputs(
1122 {input03},
1123 {output03});
1124 assert(model->isValid());
1125 }
1126
is_ignored_dynamic_output_shape_float16_4(int i)1127 inline bool is_ignored_dynamic_output_shape_float16_4(int i) {
1128 static std::set<int> ignore = {};
1129 return ignore.find(i) != ignore.end();
1130 }
1131
CreateModel_5(Model * model)1132 void CreateModel_5(Model *model) {
1133 OperandType type1(Type::INT32, {});
1134 OperandType type2(Type::TENSOR_INT32, {2});
1135 OperandType type8(Type::TENSOR_FLOAT32, {1, 2, 2});
1136 OperandType type9(Type::TENSOR_FLOAT32, {2, 2, 2});
1137 // Phase 1, operands
1138 auto input04 = model->addOperand(&type8);
1139 auto param8 = model->addOperand(&type1);
1140 auto param9 = model->addOperand(&type2);
1141 auto output04 = model->addOperand(&type9);
1142 // Phase 2, operations
1143 static int32_t param8_init[] = {0};
1144 model->setOperandValue(param8, param8_init, sizeof(int32_t) * 1);
1145 static int32_t param9_init[] = {0, 0};
1146 model->setOperandValue(param9, param9_init, sizeof(int32_t) * 2);
1147 model->addOperation(ANEURALNETWORKS_GATHER, {input04, param8, param9}, {output04});
1148 // Phase 3, inputs and outputs
1149 model->identifyInputsAndOutputs(
1150 {input04},
1151 {output04});
1152 assert(model->isValid());
1153 }
1154
is_ignored_5(int i)1155 inline bool is_ignored_5(int i) {
1156 static std::set<int> ignore = {};
1157 return ignore.find(i) != ignore.end();
1158 }
1159
CreateModel_relaxed_5(Model * model)1160 void CreateModel_relaxed_5(Model *model) {
1161 OperandType type1(Type::INT32, {});
1162 OperandType type2(Type::TENSOR_INT32, {2});
1163 OperandType type8(Type::TENSOR_FLOAT32, {1, 2, 2});
1164 OperandType type9(Type::TENSOR_FLOAT32, {2, 2, 2});
1165 // Phase 1, operands
1166 auto input04 = model->addOperand(&type8);
1167 auto param8 = model->addOperand(&type1);
1168 auto param9 = model->addOperand(&type2);
1169 auto output04 = model->addOperand(&type9);
1170 // Phase 2, operations
1171 static int32_t param8_init[] = {0};
1172 model->setOperandValue(param8, param8_init, sizeof(int32_t) * 1);
1173 static int32_t param9_init[] = {0, 0};
1174 model->setOperandValue(param9, param9_init, sizeof(int32_t) * 2);
1175 model->addOperation(ANEURALNETWORKS_GATHER, {input04, param8, param9}, {output04});
1176 // Phase 3, inputs and outputs
1177 model->identifyInputsAndOutputs(
1178 {input04},
1179 {output04});
1180 // Phase 4: set relaxed execution
1181 model->relaxComputationFloat32toFloat16(true);
1182 assert(model->isValid());
1183 }
1184
is_ignored_relaxed_5(int i)1185 inline bool is_ignored_relaxed_5(int i) {
1186 static std::set<int> ignore = {};
1187 return ignore.find(i) != ignore.end();
1188 }
1189
CreateModel_quant8_5(Model * model)1190 void CreateModel_quant8_5(Model *model) {
1191 OperandType type1(Type::INT32, {});
1192 OperandType type2(Type::TENSOR_INT32, {2});
1193 OperandType type34(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2}, 0.5f, 127);
1194 OperandType type35(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2}, 0.5f, 127);
1195 // Phase 1, operands
1196 auto input04 = model->addOperand(&type34);
1197 auto param8 = model->addOperand(&type1);
1198 auto param9 = model->addOperand(&type2);
1199 auto output04 = model->addOperand(&type35);
1200 // Phase 2, operations
1201 static int32_t param8_init[] = {0};
1202 model->setOperandValue(param8, param8_init, sizeof(int32_t) * 1);
1203 static int32_t param9_init[] = {0, 0};
1204 model->setOperandValue(param9, param9_init, sizeof(int32_t) * 2);
1205 model->addOperation(ANEURALNETWORKS_GATHER, {input04, param8, param9}, {output04});
1206 // Phase 3, inputs and outputs
1207 model->identifyInputsAndOutputs(
1208 {input04},
1209 {output04});
1210 assert(model->isValid());
1211 }
1212
is_ignored_quant8_5(int i)1213 inline bool is_ignored_quant8_5(int i) {
1214 static std::set<int> ignore = {};
1215 return ignore.find(i) != ignore.end();
1216 }
1217
CreateModel_int32_5(Model * model)1218 void CreateModel_int32_5(Model *model) {
1219 OperandType type1(Type::INT32, {});
1220 OperandType type2(Type::TENSOR_INT32, {2});
1221 OperandType type36(Type::TENSOR_INT32, {1, 2, 2});
1222 OperandType type37(Type::TENSOR_INT32, {2, 2, 2});
1223 // Phase 1, operands
1224 auto input04 = model->addOperand(&type36);
1225 auto param8 = model->addOperand(&type1);
1226 auto param9 = model->addOperand(&type2);
1227 auto output04 = model->addOperand(&type37);
1228 // Phase 2, operations
1229 static int32_t param8_init[] = {0};
1230 model->setOperandValue(param8, param8_init, sizeof(int32_t) * 1);
1231 static int32_t param9_init[] = {0, 0};
1232 model->setOperandValue(param9, param9_init, sizeof(int32_t) * 2);
1233 model->addOperation(ANEURALNETWORKS_GATHER, {input04, param8, param9}, {output04});
1234 // Phase 3, inputs and outputs
1235 model->identifyInputsAndOutputs(
1236 {input04},
1237 {output04});
1238 assert(model->isValid());
1239 }
1240
is_ignored_int32_5(int i)1241 inline bool is_ignored_int32_5(int i) {
1242 static std::set<int> ignore = {};
1243 return ignore.find(i) != ignore.end();
1244 }
1245
CreateModel_float16_5(Model * model)1246 void CreateModel_float16_5(Model *model) {
1247 OperandType type1(Type::INT32, {});
1248 OperandType type2(Type::TENSOR_INT32, {2});
1249 OperandType type38(Type::TENSOR_FLOAT16, {1, 2, 2});
1250 OperandType type39(Type::TENSOR_FLOAT16, {2, 2, 2});
1251 // Phase 1, operands
1252 auto input04 = model->addOperand(&type38);
1253 auto param8 = model->addOperand(&type1);
1254 auto param9 = model->addOperand(&type2);
1255 auto output04 = model->addOperand(&type39);
1256 // Phase 2, operations
1257 static int32_t param8_init[] = {0};
1258 model->setOperandValue(param8, param8_init, sizeof(int32_t) * 1);
1259 static int32_t param9_init[] = {0, 0};
1260 model->setOperandValue(param9, param9_init, sizeof(int32_t) * 2);
1261 model->addOperation(ANEURALNETWORKS_GATHER, {input04, param8, param9}, {output04});
1262 // Phase 3, inputs and outputs
1263 model->identifyInputsAndOutputs(
1264 {input04},
1265 {output04});
1266 assert(model->isValid());
1267 }
1268
is_ignored_float16_5(int i)1269 inline bool is_ignored_float16_5(int i) {
1270 static std::set<int> ignore = {};
1271 return ignore.find(i) != ignore.end();
1272 }
1273
CreateModel_dynamic_output_shape_5(Model * model)1274 void CreateModel_dynamic_output_shape_5(Model *model) {
1275 OperandType type1(Type::INT32, {});
1276 OperandType type2(Type::TENSOR_INT32, {2});
1277 OperandType type40(Type::TENSOR_FLOAT32, {0, 0, 0});
1278 OperandType type8(Type::TENSOR_FLOAT32, {1, 2, 2});
1279 // Phase 1, operands
1280 auto input04 = model->addOperand(&type8);
1281 auto param8 = model->addOperand(&type1);
1282 auto param9 = model->addOperand(&type2);
1283 auto output04 = model->addOperand(&type40);
1284 // Phase 2, operations
1285 static int32_t param8_init[] = {0};
1286 model->setOperandValue(param8, param8_init, sizeof(int32_t) * 1);
1287 static int32_t param9_init[] = {0, 0};
1288 model->setOperandValue(param9, param9_init, sizeof(int32_t) * 2);
1289 model->addOperation(ANEURALNETWORKS_GATHER, {input04, param8, param9}, {output04});
1290 // Phase 3, inputs and outputs
1291 model->identifyInputsAndOutputs(
1292 {input04},
1293 {output04});
1294 assert(model->isValid());
1295 }
1296
is_ignored_dynamic_output_shape_5(int i)1297 inline bool is_ignored_dynamic_output_shape_5(int i) {
1298 static std::set<int> ignore = {};
1299 return ignore.find(i) != ignore.end();
1300 }
1301
CreateModel_dynamic_output_shape_relaxed_5(Model * model)1302 void CreateModel_dynamic_output_shape_relaxed_5(Model *model) {
1303 OperandType type1(Type::INT32, {});
1304 OperandType type2(Type::TENSOR_INT32, {2});
1305 OperandType type40(Type::TENSOR_FLOAT32, {0, 0, 0});
1306 OperandType type8(Type::TENSOR_FLOAT32, {1, 2, 2});
1307 // Phase 1, operands
1308 auto input04 = model->addOperand(&type8);
1309 auto param8 = model->addOperand(&type1);
1310 auto param9 = model->addOperand(&type2);
1311 auto output04 = model->addOperand(&type40);
1312 // Phase 2, operations
1313 static int32_t param8_init[] = {0};
1314 model->setOperandValue(param8, param8_init, sizeof(int32_t) * 1);
1315 static int32_t param9_init[] = {0, 0};
1316 model->setOperandValue(param9, param9_init, sizeof(int32_t) * 2);
1317 model->addOperation(ANEURALNETWORKS_GATHER, {input04, param8, param9}, {output04});
1318 // Phase 3, inputs and outputs
1319 model->identifyInputsAndOutputs(
1320 {input04},
1321 {output04});
1322 // Phase 4: set relaxed execution
1323 model->relaxComputationFloat32toFloat16(true);
1324 assert(model->isValid());
1325 }
1326
is_ignored_dynamic_output_shape_relaxed_5(int i)1327 inline bool is_ignored_dynamic_output_shape_relaxed_5(int i) {
1328 static std::set<int> ignore = {};
1329 return ignore.find(i) != ignore.end();
1330 }
1331
CreateModel_dynamic_output_shape_quant8_5(Model * model)1332 void CreateModel_dynamic_output_shape_quant8_5(Model *model) {
1333 OperandType type1(Type::INT32, {});
1334 OperandType type2(Type::TENSOR_INT32, {2});
1335 OperandType type34(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2}, 0.5f, 127);
1336 OperandType type41(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0}, 0.5f, 127);
1337 // Phase 1, operands
1338 auto input04 = model->addOperand(&type34);
1339 auto param8 = model->addOperand(&type1);
1340 auto param9 = model->addOperand(&type2);
1341 auto output04 = model->addOperand(&type41);
1342 // Phase 2, operations
1343 static int32_t param8_init[] = {0};
1344 model->setOperandValue(param8, param8_init, sizeof(int32_t) * 1);
1345 static int32_t param9_init[] = {0, 0};
1346 model->setOperandValue(param9, param9_init, sizeof(int32_t) * 2);
1347 model->addOperation(ANEURALNETWORKS_GATHER, {input04, param8, param9}, {output04});
1348 // Phase 3, inputs and outputs
1349 model->identifyInputsAndOutputs(
1350 {input04},
1351 {output04});
1352 assert(model->isValid());
1353 }
1354
is_ignored_dynamic_output_shape_quant8_5(int i)1355 inline bool is_ignored_dynamic_output_shape_quant8_5(int i) {
1356 static std::set<int> ignore = {};
1357 return ignore.find(i) != ignore.end();
1358 }
1359
CreateModel_dynamic_output_shape_int32_5(Model * model)1360 void CreateModel_dynamic_output_shape_int32_5(Model *model) {
1361 OperandType type1(Type::INT32, {});
1362 OperandType type2(Type::TENSOR_INT32, {2});
1363 OperandType type36(Type::TENSOR_INT32, {1, 2, 2});
1364 OperandType type42(Type::TENSOR_INT32, {0, 0, 0});
1365 // Phase 1, operands
1366 auto input04 = model->addOperand(&type36);
1367 auto param8 = model->addOperand(&type1);
1368 auto param9 = model->addOperand(&type2);
1369 auto output04 = model->addOperand(&type42);
1370 // Phase 2, operations
1371 static int32_t param8_init[] = {0};
1372 model->setOperandValue(param8, param8_init, sizeof(int32_t) * 1);
1373 static int32_t param9_init[] = {0, 0};
1374 model->setOperandValue(param9, param9_init, sizeof(int32_t) * 2);
1375 model->addOperation(ANEURALNETWORKS_GATHER, {input04, param8, param9}, {output04});
1376 // Phase 3, inputs and outputs
1377 model->identifyInputsAndOutputs(
1378 {input04},
1379 {output04});
1380 assert(model->isValid());
1381 }
1382
is_ignored_dynamic_output_shape_int32_5(int i)1383 inline bool is_ignored_dynamic_output_shape_int32_5(int i) {
1384 static std::set<int> ignore = {};
1385 return ignore.find(i) != ignore.end();
1386 }
1387
CreateModel_dynamic_output_shape_float16_5(Model * model)1388 void CreateModel_dynamic_output_shape_float16_5(Model *model) {
1389 OperandType type1(Type::INT32, {});
1390 OperandType type2(Type::TENSOR_INT32, {2});
1391 OperandType type38(Type::TENSOR_FLOAT16, {1, 2, 2});
1392 OperandType type43(Type::TENSOR_FLOAT16, {0, 0, 0});
1393 // Phase 1, operands
1394 auto input04 = model->addOperand(&type38);
1395 auto param8 = model->addOperand(&type1);
1396 auto param9 = model->addOperand(&type2);
1397 auto output04 = model->addOperand(&type43);
1398 // Phase 2, operations
1399 static int32_t param8_init[] = {0};
1400 model->setOperandValue(param8, param8_init, sizeof(int32_t) * 1);
1401 static int32_t param9_init[] = {0, 0};
1402 model->setOperandValue(param9, param9_init, sizeof(int32_t) * 2);
1403 model->addOperation(ANEURALNETWORKS_GATHER, {input04, param8, param9}, {output04});
1404 // Phase 3, inputs and outputs
1405 model->identifyInputsAndOutputs(
1406 {input04},
1407 {output04});
1408 assert(model->isValid());
1409 }
1410
is_ignored_dynamic_output_shape_float16_5(int i)1411 inline bool is_ignored_dynamic_output_shape_float16_5(int i) {
1412 static std::set<int> ignore = {};
1413 return ignore.find(i) != ignore.end();
1414 }
1415
CreateModel_6(Model * model)1416 void CreateModel_6(Model *model) {
1417 OperandType type1(Type::INT32, {});
1418 OperandType type10(Type::TENSOR_FLOAT32, {4, 1});
1419 OperandType type11(Type::TENSOR_FLOAT32, {2, 1});
1420 OperandType type2(Type::TENSOR_INT32, {2});
1421 // Phase 1, operands
1422 auto input05 = model->addOperand(&type10);
1423 auto param10 = model->addOperand(&type1);
1424 auto param11 = model->addOperand(&type2);
1425 auto output05 = model->addOperand(&type11);
1426 // Phase 2, operations
1427 static int32_t param10_init[] = {0};
1428 model->setOperandValue(param10, param10_init, sizeof(int32_t) * 1);
1429 static int32_t param11_init[] = {1, 3};
1430 model->setOperandValue(param11, param11_init, sizeof(int32_t) * 2);
1431 model->addOperation(ANEURALNETWORKS_GATHER, {input05, param10, param11}, {output05});
1432 // Phase 3, inputs and outputs
1433 model->identifyInputsAndOutputs(
1434 {input05},
1435 {output05});
1436 assert(model->isValid());
1437 }
1438
is_ignored_6(int i)1439 inline bool is_ignored_6(int i) {
1440 static std::set<int> ignore = {};
1441 return ignore.find(i) != ignore.end();
1442 }
1443
CreateModel_relaxed_6(Model * model)1444 void CreateModel_relaxed_6(Model *model) {
1445 OperandType type1(Type::INT32, {});
1446 OperandType type10(Type::TENSOR_FLOAT32, {4, 1});
1447 OperandType type11(Type::TENSOR_FLOAT32, {2, 1});
1448 OperandType type2(Type::TENSOR_INT32, {2});
1449 // Phase 1, operands
1450 auto input05 = model->addOperand(&type10);
1451 auto param10 = model->addOperand(&type1);
1452 auto param11 = model->addOperand(&type2);
1453 auto output05 = model->addOperand(&type11);
1454 // Phase 2, operations
1455 static int32_t param10_init[] = {0};
1456 model->setOperandValue(param10, param10_init, sizeof(int32_t) * 1);
1457 static int32_t param11_init[] = {1, 3};
1458 model->setOperandValue(param11, param11_init, sizeof(int32_t) * 2);
1459 model->addOperation(ANEURALNETWORKS_GATHER, {input05, param10, param11}, {output05});
1460 // Phase 3, inputs and outputs
1461 model->identifyInputsAndOutputs(
1462 {input05},
1463 {output05});
1464 // Phase 4: set relaxed execution
1465 model->relaxComputationFloat32toFloat16(true);
1466 assert(model->isValid());
1467 }
1468
is_ignored_relaxed_6(int i)1469 inline bool is_ignored_relaxed_6(int i) {
1470 static std::set<int> ignore = {};
1471 return ignore.find(i) != ignore.end();
1472 }
1473
CreateModel_quant8_6(Model * model)1474 void CreateModel_quant8_6(Model *model) {
1475 OperandType type1(Type::INT32, {});
1476 OperandType type2(Type::TENSOR_INT32, {2});
1477 OperandType type44(Type::TENSOR_QUANT8_ASYMM, {4, 1}, 0.5f, 127);
1478 OperandType type45(Type::TENSOR_QUANT8_ASYMM, {2, 1}, 0.5f, 127);
1479 // Phase 1, operands
1480 auto input05 = model->addOperand(&type44);
1481 auto param10 = model->addOperand(&type1);
1482 auto param11 = model->addOperand(&type2);
1483 auto output05 = model->addOperand(&type45);
1484 // Phase 2, operations
1485 static int32_t param10_init[] = {0};
1486 model->setOperandValue(param10, param10_init, sizeof(int32_t) * 1);
1487 static int32_t param11_init[] = {1, 3};
1488 model->setOperandValue(param11, param11_init, sizeof(int32_t) * 2);
1489 model->addOperation(ANEURALNETWORKS_GATHER, {input05, param10, param11}, {output05});
1490 // Phase 3, inputs and outputs
1491 model->identifyInputsAndOutputs(
1492 {input05},
1493 {output05});
1494 assert(model->isValid());
1495 }
1496
is_ignored_quant8_6(int i)1497 inline bool is_ignored_quant8_6(int i) {
1498 static std::set<int> ignore = {};
1499 return ignore.find(i) != ignore.end();
1500 }
1501
CreateModel_int32_6(Model * model)1502 void CreateModel_int32_6(Model *model) {
1503 OperandType type1(Type::INT32, {});
1504 OperandType type2(Type::TENSOR_INT32, {2});
1505 OperandType type46(Type::TENSOR_INT32, {4, 1});
1506 OperandType type47(Type::TENSOR_INT32, {2, 1});
1507 // Phase 1, operands
1508 auto input05 = model->addOperand(&type46);
1509 auto param10 = model->addOperand(&type1);
1510 auto param11 = model->addOperand(&type2);
1511 auto output05 = model->addOperand(&type47);
1512 // Phase 2, operations
1513 static int32_t param10_init[] = {0};
1514 model->setOperandValue(param10, param10_init, sizeof(int32_t) * 1);
1515 static int32_t param11_init[] = {1, 3};
1516 model->setOperandValue(param11, param11_init, sizeof(int32_t) * 2);
1517 model->addOperation(ANEURALNETWORKS_GATHER, {input05, param10, param11}, {output05});
1518 // Phase 3, inputs and outputs
1519 model->identifyInputsAndOutputs(
1520 {input05},
1521 {output05});
1522 assert(model->isValid());
1523 }
1524
is_ignored_int32_6(int i)1525 inline bool is_ignored_int32_6(int i) {
1526 static std::set<int> ignore = {};
1527 return ignore.find(i) != ignore.end();
1528 }
1529
CreateModel_float16_6(Model * model)1530 void CreateModel_float16_6(Model *model) {
1531 OperandType type1(Type::INT32, {});
1532 OperandType type2(Type::TENSOR_INT32, {2});
1533 OperandType type48(Type::TENSOR_FLOAT16, {4, 1});
1534 OperandType type49(Type::TENSOR_FLOAT16, {2, 1});
1535 // Phase 1, operands
1536 auto input05 = model->addOperand(&type48);
1537 auto param10 = model->addOperand(&type1);
1538 auto param11 = model->addOperand(&type2);
1539 auto output05 = model->addOperand(&type49);
1540 // Phase 2, operations
1541 static int32_t param10_init[] = {0};
1542 model->setOperandValue(param10, param10_init, sizeof(int32_t) * 1);
1543 static int32_t param11_init[] = {1, 3};
1544 model->setOperandValue(param11, param11_init, sizeof(int32_t) * 2);
1545 model->addOperation(ANEURALNETWORKS_GATHER, {input05, param10, param11}, {output05});
1546 // Phase 3, inputs and outputs
1547 model->identifyInputsAndOutputs(
1548 {input05},
1549 {output05});
1550 assert(model->isValid());
1551 }
1552
is_ignored_float16_6(int i)1553 inline bool is_ignored_float16_6(int i) {
1554 static std::set<int> ignore = {};
1555 return ignore.find(i) != ignore.end();
1556 }
1557
CreateModel_dynamic_output_shape_6(Model * model)1558 void CreateModel_dynamic_output_shape_6(Model *model) {
1559 OperandType type1(Type::INT32, {});
1560 OperandType type10(Type::TENSOR_FLOAT32, {4, 1});
1561 OperandType type16(Type::TENSOR_FLOAT32, {0, 0});
1562 OperandType type2(Type::TENSOR_INT32, {2});
1563 // Phase 1, operands
1564 auto input05 = model->addOperand(&type10);
1565 auto param10 = model->addOperand(&type1);
1566 auto param11 = model->addOperand(&type2);
1567 auto output05 = model->addOperand(&type16);
1568 // Phase 2, operations
1569 static int32_t param10_init[] = {0};
1570 model->setOperandValue(param10, param10_init, sizeof(int32_t) * 1);
1571 static int32_t param11_init[] = {1, 3};
1572 model->setOperandValue(param11, param11_init, sizeof(int32_t) * 2);
1573 model->addOperation(ANEURALNETWORKS_GATHER, {input05, param10, param11}, {output05});
1574 // Phase 3, inputs and outputs
1575 model->identifyInputsAndOutputs(
1576 {input05},
1577 {output05});
1578 assert(model->isValid());
1579 }
1580
is_ignored_dynamic_output_shape_6(int i)1581 inline bool is_ignored_dynamic_output_shape_6(int i) {
1582 static std::set<int> ignore = {};
1583 return ignore.find(i) != ignore.end();
1584 }
1585
CreateModel_dynamic_output_shape_relaxed_6(Model * model)1586 void CreateModel_dynamic_output_shape_relaxed_6(Model *model) {
1587 OperandType type1(Type::INT32, {});
1588 OperandType type10(Type::TENSOR_FLOAT32, {4, 1});
1589 OperandType type16(Type::TENSOR_FLOAT32, {0, 0});
1590 OperandType type2(Type::TENSOR_INT32, {2});
1591 // Phase 1, operands
1592 auto input05 = model->addOperand(&type10);
1593 auto param10 = model->addOperand(&type1);
1594 auto param11 = model->addOperand(&type2);
1595 auto output05 = model->addOperand(&type16);
1596 // Phase 2, operations
1597 static int32_t param10_init[] = {0};
1598 model->setOperandValue(param10, param10_init, sizeof(int32_t) * 1);
1599 static int32_t param11_init[] = {1, 3};
1600 model->setOperandValue(param11, param11_init, sizeof(int32_t) * 2);
1601 model->addOperation(ANEURALNETWORKS_GATHER, {input05, param10, param11}, {output05});
1602 // Phase 3, inputs and outputs
1603 model->identifyInputsAndOutputs(
1604 {input05},
1605 {output05});
1606 // Phase 4: set relaxed execution
1607 model->relaxComputationFloat32toFloat16(true);
1608 assert(model->isValid());
1609 }
1610
is_ignored_dynamic_output_shape_relaxed_6(int i)1611 inline bool is_ignored_dynamic_output_shape_relaxed_6(int i) {
1612 static std::set<int> ignore = {};
1613 return ignore.find(i) != ignore.end();
1614 }
1615
CreateModel_dynamic_output_shape_quant8_6(Model * model)1616 void CreateModel_dynamic_output_shape_quant8_6(Model *model) {
1617 OperandType type1(Type::INT32, {});
1618 OperandType type17(Type::TENSOR_QUANT8_ASYMM, {0, 0}, 0.5f, 127);
1619 OperandType type2(Type::TENSOR_INT32, {2});
1620 OperandType type44(Type::TENSOR_QUANT8_ASYMM, {4, 1}, 0.5f, 127);
1621 // Phase 1, operands
1622 auto input05 = model->addOperand(&type44);
1623 auto param10 = model->addOperand(&type1);
1624 auto param11 = model->addOperand(&type2);
1625 auto output05 = model->addOperand(&type17);
1626 // Phase 2, operations
1627 static int32_t param10_init[] = {0};
1628 model->setOperandValue(param10, param10_init, sizeof(int32_t) * 1);
1629 static int32_t param11_init[] = {1, 3};
1630 model->setOperandValue(param11, param11_init, sizeof(int32_t) * 2);
1631 model->addOperation(ANEURALNETWORKS_GATHER, {input05, param10, param11}, {output05});
1632 // Phase 3, inputs and outputs
1633 model->identifyInputsAndOutputs(
1634 {input05},
1635 {output05});
1636 assert(model->isValid());
1637 }
1638
is_ignored_dynamic_output_shape_quant8_6(int i)1639 inline bool is_ignored_dynamic_output_shape_quant8_6(int i) {
1640 static std::set<int> ignore = {};
1641 return ignore.find(i) != ignore.end();
1642 }
1643
CreateModel_dynamic_output_shape_int32_6(Model * model)1644 void CreateModel_dynamic_output_shape_int32_6(Model *model) {
1645 OperandType type1(Type::INT32, {});
1646 OperandType type18(Type::TENSOR_INT32, {0, 0});
1647 OperandType type2(Type::TENSOR_INT32, {2});
1648 OperandType type46(Type::TENSOR_INT32, {4, 1});
1649 // Phase 1, operands
1650 auto input05 = model->addOperand(&type46);
1651 auto param10 = model->addOperand(&type1);
1652 auto param11 = model->addOperand(&type2);
1653 auto output05 = model->addOperand(&type18);
1654 // Phase 2, operations
1655 static int32_t param10_init[] = {0};
1656 model->setOperandValue(param10, param10_init, sizeof(int32_t) * 1);
1657 static int32_t param11_init[] = {1, 3};
1658 model->setOperandValue(param11, param11_init, sizeof(int32_t) * 2);
1659 model->addOperation(ANEURALNETWORKS_GATHER, {input05, param10, param11}, {output05});
1660 // Phase 3, inputs and outputs
1661 model->identifyInputsAndOutputs(
1662 {input05},
1663 {output05});
1664 assert(model->isValid());
1665 }
1666
is_ignored_dynamic_output_shape_int32_6(int i)1667 inline bool is_ignored_dynamic_output_shape_int32_6(int i) {
1668 static std::set<int> ignore = {};
1669 return ignore.find(i) != ignore.end();
1670 }
1671
CreateModel_dynamic_output_shape_float16_6(Model * model)1672 void CreateModel_dynamic_output_shape_float16_6(Model *model) {
1673 OperandType type1(Type::INT32, {});
1674 OperandType type19(Type::TENSOR_FLOAT16, {0, 0});
1675 OperandType type2(Type::TENSOR_INT32, {2});
1676 OperandType type48(Type::TENSOR_FLOAT16, {4, 1});
1677 // Phase 1, operands
1678 auto input05 = model->addOperand(&type48);
1679 auto param10 = model->addOperand(&type1);
1680 auto param11 = model->addOperand(&type2);
1681 auto output05 = model->addOperand(&type19);
1682 // Phase 2, operations
1683 static int32_t param10_init[] = {0};
1684 model->setOperandValue(param10, param10_init, sizeof(int32_t) * 1);
1685 static int32_t param11_init[] = {1, 3};
1686 model->setOperandValue(param11, param11_init, sizeof(int32_t) * 2);
1687 model->addOperation(ANEURALNETWORKS_GATHER, {input05, param10, param11}, {output05});
1688 // Phase 3, inputs and outputs
1689 model->identifyInputsAndOutputs(
1690 {input05},
1691 {output05});
1692 assert(model->isValid());
1693 }
1694
is_ignored_dynamic_output_shape_float16_6(int i)1695 inline bool is_ignored_dynamic_output_shape_float16_6(int i) {
1696 static std::set<int> ignore = {};
1697 return ignore.find(i) != ignore.end();
1698 }
1699
CreateModel_7(Model * model)1700 void CreateModel_7(Model *model) {
1701 OperandType type1(Type::INT32, {});
1702 OperandType type12(Type::TENSOR_FLOAT32, {1, 2, 3});
1703 OperandType type2(Type::TENSOR_INT32, {2});
1704 // Phase 1, operands
1705 auto input06 = model->addOperand(&type12);
1706 auto param12 = model->addOperand(&type1);
1707 auto param13 = model->addOperand(&type2);
1708 auto output06 = model->addOperand(&type12);
1709 // Phase 2, operations
1710 static int32_t param12_init[] = {1};
1711 model->setOperandValue(param12, param12_init, sizeof(int32_t) * 1);
1712 static int32_t param13_init[] = {1, 0};
1713 model->setOperandValue(param13, param13_init, sizeof(int32_t) * 2);
1714 model->addOperation(ANEURALNETWORKS_GATHER, {input06, param12, param13}, {output06});
1715 // Phase 3, inputs and outputs
1716 model->identifyInputsAndOutputs(
1717 {input06},
1718 {output06});
1719 assert(model->isValid());
1720 }
1721
is_ignored_7(int i)1722 inline bool is_ignored_7(int i) {
1723 static std::set<int> ignore = {};
1724 return ignore.find(i) != ignore.end();
1725 }
1726
CreateModel_relaxed_7(Model * model)1727 void CreateModel_relaxed_7(Model *model) {
1728 OperandType type1(Type::INT32, {});
1729 OperandType type12(Type::TENSOR_FLOAT32, {1, 2, 3});
1730 OperandType type2(Type::TENSOR_INT32, {2});
1731 // Phase 1, operands
1732 auto input06 = model->addOperand(&type12);
1733 auto param12 = model->addOperand(&type1);
1734 auto param13 = model->addOperand(&type2);
1735 auto output06 = model->addOperand(&type12);
1736 // Phase 2, operations
1737 static int32_t param12_init[] = {1};
1738 model->setOperandValue(param12, param12_init, sizeof(int32_t) * 1);
1739 static int32_t param13_init[] = {1, 0};
1740 model->setOperandValue(param13, param13_init, sizeof(int32_t) * 2);
1741 model->addOperation(ANEURALNETWORKS_GATHER, {input06, param12, param13}, {output06});
1742 // Phase 3, inputs and outputs
1743 model->identifyInputsAndOutputs(
1744 {input06},
1745 {output06});
1746 // Phase 4: set relaxed execution
1747 model->relaxComputationFloat32toFloat16(true);
1748 assert(model->isValid());
1749 }
1750
is_ignored_relaxed_7(int i)1751 inline bool is_ignored_relaxed_7(int i) {
1752 static std::set<int> ignore = {};
1753 return ignore.find(i) != ignore.end();
1754 }
1755
CreateModel_quant8_7(Model * model)1756 void CreateModel_quant8_7(Model *model) {
1757 OperandType type1(Type::INT32, {});
1758 OperandType type2(Type::TENSOR_INT32, {2});
1759 OperandType type50(Type::TENSOR_QUANT8_ASYMM, {1, 2, 3}, 0.5f, 127);
1760 // Phase 1, operands
1761 auto input06 = model->addOperand(&type50);
1762 auto param12 = model->addOperand(&type1);
1763 auto param13 = model->addOperand(&type2);
1764 auto output06 = model->addOperand(&type50);
1765 // Phase 2, operations
1766 static int32_t param12_init[] = {1};
1767 model->setOperandValue(param12, param12_init, sizeof(int32_t) * 1);
1768 static int32_t param13_init[] = {1, 0};
1769 model->setOperandValue(param13, param13_init, sizeof(int32_t) * 2);
1770 model->addOperation(ANEURALNETWORKS_GATHER, {input06, param12, param13}, {output06});
1771 // Phase 3, inputs and outputs
1772 model->identifyInputsAndOutputs(
1773 {input06},
1774 {output06});
1775 assert(model->isValid());
1776 }
1777
is_ignored_quant8_7(int i)1778 inline bool is_ignored_quant8_7(int i) {
1779 static std::set<int> ignore = {};
1780 return ignore.find(i) != ignore.end();
1781 }
1782
CreateModel_int32_7(Model * model)1783 void CreateModel_int32_7(Model *model) {
1784 OperandType type1(Type::INT32, {});
1785 OperandType type2(Type::TENSOR_INT32, {2});
1786 OperandType type51(Type::TENSOR_INT32, {1, 2, 3});
1787 // Phase 1, operands
1788 auto input06 = model->addOperand(&type51);
1789 auto param12 = model->addOperand(&type1);
1790 auto param13 = model->addOperand(&type2);
1791 auto output06 = model->addOperand(&type51);
1792 // Phase 2, operations
1793 static int32_t param12_init[] = {1};
1794 model->setOperandValue(param12, param12_init, sizeof(int32_t) * 1);
1795 static int32_t param13_init[] = {1, 0};
1796 model->setOperandValue(param13, param13_init, sizeof(int32_t) * 2);
1797 model->addOperation(ANEURALNETWORKS_GATHER, {input06, param12, param13}, {output06});
1798 // Phase 3, inputs and outputs
1799 model->identifyInputsAndOutputs(
1800 {input06},
1801 {output06});
1802 assert(model->isValid());
1803 }
1804
is_ignored_int32_7(int i)1805 inline bool is_ignored_int32_7(int i) {
1806 static std::set<int> ignore = {};
1807 return ignore.find(i) != ignore.end();
1808 }
1809
CreateModel_float16_7(Model * model)1810 void CreateModel_float16_7(Model *model) {
1811 OperandType type1(Type::INT32, {});
1812 OperandType type2(Type::TENSOR_INT32, {2});
1813 OperandType type52(Type::TENSOR_FLOAT16, {1, 2, 3});
1814 // Phase 1, operands
1815 auto input06 = model->addOperand(&type52);
1816 auto param12 = model->addOperand(&type1);
1817 auto param13 = model->addOperand(&type2);
1818 auto output06 = model->addOperand(&type52);
1819 // Phase 2, operations
1820 static int32_t param12_init[] = {1};
1821 model->setOperandValue(param12, param12_init, sizeof(int32_t) * 1);
1822 static int32_t param13_init[] = {1, 0};
1823 model->setOperandValue(param13, param13_init, sizeof(int32_t) * 2);
1824 model->addOperation(ANEURALNETWORKS_GATHER, {input06, param12, param13}, {output06});
1825 // Phase 3, inputs and outputs
1826 model->identifyInputsAndOutputs(
1827 {input06},
1828 {output06});
1829 assert(model->isValid());
1830 }
1831
is_ignored_float16_7(int i)1832 inline bool is_ignored_float16_7(int i) {
1833 static std::set<int> ignore = {};
1834 return ignore.find(i) != ignore.end();
1835 }
1836
CreateModel_dynamic_output_shape_7(Model * model)1837 void CreateModel_dynamic_output_shape_7(Model *model) {
1838 OperandType type1(Type::INT32, {});
1839 OperandType type12(Type::TENSOR_FLOAT32, {1, 2, 3});
1840 OperandType type2(Type::TENSOR_INT32, {2});
1841 OperandType type40(Type::TENSOR_FLOAT32, {0, 0, 0});
1842 // Phase 1, operands
1843 auto input06 = model->addOperand(&type12);
1844 auto param12 = model->addOperand(&type1);
1845 auto param13 = model->addOperand(&type2);
1846 auto output06 = model->addOperand(&type40);
1847 // Phase 2, operations
1848 static int32_t param12_init[] = {1};
1849 model->setOperandValue(param12, param12_init, sizeof(int32_t) * 1);
1850 static int32_t param13_init[] = {1, 0};
1851 model->setOperandValue(param13, param13_init, sizeof(int32_t) * 2);
1852 model->addOperation(ANEURALNETWORKS_GATHER, {input06, param12, param13}, {output06});
1853 // Phase 3, inputs and outputs
1854 model->identifyInputsAndOutputs(
1855 {input06},
1856 {output06});
1857 assert(model->isValid());
1858 }
1859
is_ignored_dynamic_output_shape_7(int i)1860 inline bool is_ignored_dynamic_output_shape_7(int i) {
1861 static std::set<int> ignore = {};
1862 return ignore.find(i) != ignore.end();
1863 }
1864
CreateModel_dynamic_output_shape_relaxed_7(Model * model)1865 void CreateModel_dynamic_output_shape_relaxed_7(Model *model) {
1866 OperandType type1(Type::INT32, {});
1867 OperandType type12(Type::TENSOR_FLOAT32, {1, 2, 3});
1868 OperandType type2(Type::TENSOR_INT32, {2});
1869 OperandType type40(Type::TENSOR_FLOAT32, {0, 0, 0});
1870 // Phase 1, operands
1871 auto input06 = model->addOperand(&type12);
1872 auto param12 = model->addOperand(&type1);
1873 auto param13 = model->addOperand(&type2);
1874 auto output06 = model->addOperand(&type40);
1875 // Phase 2, operations
1876 static int32_t param12_init[] = {1};
1877 model->setOperandValue(param12, param12_init, sizeof(int32_t) * 1);
1878 static int32_t param13_init[] = {1, 0};
1879 model->setOperandValue(param13, param13_init, sizeof(int32_t) * 2);
1880 model->addOperation(ANEURALNETWORKS_GATHER, {input06, param12, param13}, {output06});
1881 // Phase 3, inputs and outputs
1882 model->identifyInputsAndOutputs(
1883 {input06},
1884 {output06});
1885 // Phase 4: set relaxed execution
1886 model->relaxComputationFloat32toFloat16(true);
1887 assert(model->isValid());
1888 }
1889
is_ignored_dynamic_output_shape_relaxed_7(int i)1890 inline bool is_ignored_dynamic_output_shape_relaxed_7(int i) {
1891 static std::set<int> ignore = {};
1892 return ignore.find(i) != ignore.end();
1893 }
1894
CreateModel_dynamic_output_shape_quant8_7(Model * model)1895 void CreateModel_dynamic_output_shape_quant8_7(Model *model) {
1896 OperandType type1(Type::INT32, {});
1897 OperandType type2(Type::TENSOR_INT32, {2});
1898 OperandType type41(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0}, 0.5f, 127);
1899 OperandType type50(Type::TENSOR_QUANT8_ASYMM, {1, 2, 3}, 0.5f, 127);
1900 // Phase 1, operands
1901 auto input06 = model->addOperand(&type50);
1902 auto param12 = model->addOperand(&type1);
1903 auto param13 = model->addOperand(&type2);
1904 auto output06 = model->addOperand(&type41);
1905 // Phase 2, operations
1906 static int32_t param12_init[] = {1};
1907 model->setOperandValue(param12, param12_init, sizeof(int32_t) * 1);
1908 static int32_t param13_init[] = {1, 0};
1909 model->setOperandValue(param13, param13_init, sizeof(int32_t) * 2);
1910 model->addOperation(ANEURALNETWORKS_GATHER, {input06, param12, param13}, {output06});
1911 // Phase 3, inputs and outputs
1912 model->identifyInputsAndOutputs(
1913 {input06},
1914 {output06});
1915 assert(model->isValid());
1916 }
1917
is_ignored_dynamic_output_shape_quant8_7(int i)1918 inline bool is_ignored_dynamic_output_shape_quant8_7(int i) {
1919 static std::set<int> ignore = {};
1920 return ignore.find(i) != ignore.end();
1921 }
1922
CreateModel_dynamic_output_shape_int32_7(Model * model)1923 void CreateModel_dynamic_output_shape_int32_7(Model *model) {
1924 OperandType type1(Type::INT32, {});
1925 OperandType type2(Type::TENSOR_INT32, {2});
1926 OperandType type42(Type::TENSOR_INT32, {0, 0, 0});
1927 OperandType type51(Type::TENSOR_INT32, {1, 2, 3});
1928 // Phase 1, operands
1929 auto input06 = model->addOperand(&type51);
1930 auto param12 = model->addOperand(&type1);
1931 auto param13 = model->addOperand(&type2);
1932 auto output06 = model->addOperand(&type42);
1933 // Phase 2, operations
1934 static int32_t param12_init[] = {1};
1935 model->setOperandValue(param12, param12_init, sizeof(int32_t) * 1);
1936 static int32_t param13_init[] = {1, 0};
1937 model->setOperandValue(param13, param13_init, sizeof(int32_t) * 2);
1938 model->addOperation(ANEURALNETWORKS_GATHER, {input06, param12, param13}, {output06});
1939 // Phase 3, inputs and outputs
1940 model->identifyInputsAndOutputs(
1941 {input06},
1942 {output06});
1943 assert(model->isValid());
1944 }
1945
is_ignored_dynamic_output_shape_int32_7(int i)1946 inline bool is_ignored_dynamic_output_shape_int32_7(int i) {
1947 static std::set<int> ignore = {};
1948 return ignore.find(i) != ignore.end();
1949 }
1950
CreateModel_dynamic_output_shape_float16_7(Model * model)1951 void CreateModel_dynamic_output_shape_float16_7(Model *model) {
1952 OperandType type1(Type::INT32, {});
1953 OperandType type2(Type::TENSOR_INT32, {2});
1954 OperandType type43(Type::TENSOR_FLOAT16, {0, 0, 0});
1955 OperandType type52(Type::TENSOR_FLOAT16, {1, 2, 3});
1956 // Phase 1, operands
1957 auto input06 = model->addOperand(&type52);
1958 auto param12 = model->addOperand(&type1);
1959 auto param13 = model->addOperand(&type2);
1960 auto output06 = model->addOperand(&type43);
1961 // Phase 2, operations
1962 static int32_t param12_init[] = {1};
1963 model->setOperandValue(param12, param12_init, sizeof(int32_t) * 1);
1964 static int32_t param13_init[] = {1, 0};
1965 model->setOperandValue(param13, param13_init, sizeof(int32_t) * 2);
1966 model->addOperation(ANEURALNETWORKS_GATHER, {input06, param12, param13}, {output06});
1967 // Phase 3, inputs and outputs
1968 model->identifyInputsAndOutputs(
1969 {input06},
1970 {output06});
1971 assert(model->isValid());
1972 }
1973
is_ignored_dynamic_output_shape_float16_7(int i)1974 inline bool is_ignored_dynamic_output_shape_float16_7(int i) {
1975 static std::set<int> ignore = {};
1976 return ignore.find(i) != ignore.end();
1977 }
1978
CreateModel_8(Model * model)1979 void CreateModel_8(Model *model) {
1980 OperandType type1(Type::INT32, {});
1981 OperandType type12(Type::TENSOR_FLOAT32, {1, 2, 3});
1982 OperandType type2(Type::TENSOR_INT32, {2});
1983 OperandType type8(Type::TENSOR_FLOAT32, {1, 2, 2});
1984 // Phase 1, operands
1985 auto input07 = model->addOperand(&type12);
1986 auto param14 = model->addOperand(&type1);
1987 auto param15 = model->addOperand(&type2);
1988 auto output07 = model->addOperand(&type8);
1989 // Phase 2, operations
1990 static int32_t param14_init[] = {-1};
1991 model->setOperandValue(param14, param14_init, sizeof(int32_t) * 1);
1992 static int32_t param15_init[] = {2, 0};
1993 model->setOperandValue(param15, param15_init, sizeof(int32_t) * 2);
1994 model->addOperation(ANEURALNETWORKS_GATHER, {input07, param14, param15}, {output07});
1995 // Phase 3, inputs and outputs
1996 model->identifyInputsAndOutputs(
1997 {input07},
1998 {output07});
1999 assert(model->isValid());
2000 }
2001
is_ignored_8(int i)2002 inline bool is_ignored_8(int i) {
2003 static std::set<int> ignore = {};
2004 return ignore.find(i) != ignore.end();
2005 }
2006
CreateModel_relaxed_8(Model * model)2007 void CreateModel_relaxed_8(Model *model) {
2008 OperandType type1(Type::INT32, {});
2009 OperandType type12(Type::TENSOR_FLOAT32, {1, 2, 3});
2010 OperandType type2(Type::TENSOR_INT32, {2});
2011 OperandType type8(Type::TENSOR_FLOAT32, {1, 2, 2});
2012 // Phase 1, operands
2013 auto input07 = model->addOperand(&type12);
2014 auto param14 = model->addOperand(&type1);
2015 auto param15 = model->addOperand(&type2);
2016 auto output07 = model->addOperand(&type8);
2017 // Phase 2, operations
2018 static int32_t param14_init[] = {-1};
2019 model->setOperandValue(param14, param14_init, sizeof(int32_t) * 1);
2020 static int32_t param15_init[] = {2, 0};
2021 model->setOperandValue(param15, param15_init, sizeof(int32_t) * 2);
2022 model->addOperation(ANEURALNETWORKS_GATHER, {input07, param14, param15}, {output07});
2023 // Phase 3, inputs and outputs
2024 model->identifyInputsAndOutputs(
2025 {input07},
2026 {output07});
2027 // Phase 4: set relaxed execution
2028 model->relaxComputationFloat32toFloat16(true);
2029 assert(model->isValid());
2030 }
2031
is_ignored_relaxed_8(int i)2032 inline bool is_ignored_relaxed_8(int i) {
2033 static std::set<int> ignore = {};
2034 return ignore.find(i) != ignore.end();
2035 }
2036
CreateModel_quant8_8(Model * model)2037 void CreateModel_quant8_8(Model *model) {
2038 OperandType type1(Type::INT32, {});
2039 OperandType type2(Type::TENSOR_INT32, {2});
2040 OperandType type34(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2}, 0.5f, 127);
2041 OperandType type50(Type::TENSOR_QUANT8_ASYMM, {1, 2, 3}, 0.5f, 127);
2042 // Phase 1, operands
2043 auto input07 = model->addOperand(&type50);
2044 auto param14 = model->addOperand(&type1);
2045 auto param15 = model->addOperand(&type2);
2046 auto output07 = model->addOperand(&type34);
2047 // Phase 2, operations
2048 static int32_t param14_init[] = {-1};
2049 model->setOperandValue(param14, param14_init, sizeof(int32_t) * 1);
2050 static int32_t param15_init[] = {2, 0};
2051 model->setOperandValue(param15, param15_init, sizeof(int32_t) * 2);
2052 model->addOperation(ANEURALNETWORKS_GATHER, {input07, param14, param15}, {output07});
2053 // Phase 3, inputs and outputs
2054 model->identifyInputsAndOutputs(
2055 {input07},
2056 {output07});
2057 assert(model->isValid());
2058 }
2059
is_ignored_quant8_8(int i)2060 inline bool is_ignored_quant8_8(int i) {
2061 static std::set<int> ignore = {};
2062 return ignore.find(i) != ignore.end();
2063 }
2064
CreateModel_int32_8(Model * model)2065 void CreateModel_int32_8(Model *model) {
2066 OperandType type1(Type::INT32, {});
2067 OperandType type2(Type::TENSOR_INT32, {2});
2068 OperandType type36(Type::TENSOR_INT32, {1, 2, 2});
2069 OperandType type51(Type::TENSOR_INT32, {1, 2, 3});
2070 // Phase 1, operands
2071 auto input07 = model->addOperand(&type51);
2072 auto param14 = model->addOperand(&type1);
2073 auto param15 = model->addOperand(&type2);
2074 auto output07 = model->addOperand(&type36);
2075 // Phase 2, operations
2076 static int32_t param14_init[] = {-1};
2077 model->setOperandValue(param14, param14_init, sizeof(int32_t) * 1);
2078 static int32_t param15_init[] = {2, 0};
2079 model->setOperandValue(param15, param15_init, sizeof(int32_t) * 2);
2080 model->addOperation(ANEURALNETWORKS_GATHER, {input07, param14, param15}, {output07});
2081 // Phase 3, inputs and outputs
2082 model->identifyInputsAndOutputs(
2083 {input07},
2084 {output07});
2085 assert(model->isValid());
2086 }
2087
is_ignored_int32_8(int i)2088 inline bool is_ignored_int32_8(int i) {
2089 static std::set<int> ignore = {};
2090 return ignore.find(i) != ignore.end();
2091 }
2092
CreateModel_float16_8(Model * model)2093 void CreateModel_float16_8(Model *model) {
2094 OperandType type1(Type::INT32, {});
2095 OperandType type2(Type::TENSOR_INT32, {2});
2096 OperandType type38(Type::TENSOR_FLOAT16, {1, 2, 2});
2097 OperandType type52(Type::TENSOR_FLOAT16, {1, 2, 3});
2098 // Phase 1, operands
2099 auto input07 = model->addOperand(&type52);
2100 auto param14 = model->addOperand(&type1);
2101 auto param15 = model->addOperand(&type2);
2102 auto output07 = model->addOperand(&type38);
2103 // Phase 2, operations
2104 static int32_t param14_init[] = {-1};
2105 model->setOperandValue(param14, param14_init, sizeof(int32_t) * 1);
2106 static int32_t param15_init[] = {2, 0};
2107 model->setOperandValue(param15, param15_init, sizeof(int32_t) * 2);
2108 model->addOperation(ANEURALNETWORKS_GATHER, {input07, param14, param15}, {output07});
2109 // Phase 3, inputs and outputs
2110 model->identifyInputsAndOutputs(
2111 {input07},
2112 {output07});
2113 assert(model->isValid());
2114 }
2115
is_ignored_float16_8(int i)2116 inline bool is_ignored_float16_8(int i) {
2117 static std::set<int> ignore = {};
2118 return ignore.find(i) != ignore.end();
2119 }
2120
CreateModel_dynamic_output_shape_8(Model * model)2121 void CreateModel_dynamic_output_shape_8(Model *model) {
2122 OperandType type1(Type::INT32, {});
2123 OperandType type12(Type::TENSOR_FLOAT32, {1, 2, 3});
2124 OperandType type2(Type::TENSOR_INT32, {2});
2125 OperandType type40(Type::TENSOR_FLOAT32, {0, 0, 0});
2126 // Phase 1, operands
2127 auto input07 = model->addOperand(&type12);
2128 auto param14 = model->addOperand(&type1);
2129 auto param15 = model->addOperand(&type2);
2130 auto output07 = model->addOperand(&type40);
2131 // Phase 2, operations
2132 static int32_t param14_init[] = {-1};
2133 model->setOperandValue(param14, param14_init, sizeof(int32_t) * 1);
2134 static int32_t param15_init[] = {2, 0};
2135 model->setOperandValue(param15, param15_init, sizeof(int32_t) * 2);
2136 model->addOperation(ANEURALNETWORKS_GATHER, {input07, param14, param15}, {output07});
2137 // Phase 3, inputs and outputs
2138 model->identifyInputsAndOutputs(
2139 {input07},
2140 {output07});
2141 assert(model->isValid());
2142 }
2143
is_ignored_dynamic_output_shape_8(int i)2144 inline bool is_ignored_dynamic_output_shape_8(int i) {
2145 static std::set<int> ignore = {};
2146 return ignore.find(i) != ignore.end();
2147 }
2148
CreateModel_dynamic_output_shape_relaxed_8(Model * model)2149 void CreateModel_dynamic_output_shape_relaxed_8(Model *model) {
2150 OperandType type1(Type::INT32, {});
2151 OperandType type12(Type::TENSOR_FLOAT32, {1, 2, 3});
2152 OperandType type2(Type::TENSOR_INT32, {2});
2153 OperandType type40(Type::TENSOR_FLOAT32, {0, 0, 0});
2154 // Phase 1, operands
2155 auto input07 = model->addOperand(&type12);
2156 auto param14 = model->addOperand(&type1);
2157 auto param15 = model->addOperand(&type2);
2158 auto output07 = model->addOperand(&type40);
2159 // Phase 2, operations
2160 static int32_t param14_init[] = {-1};
2161 model->setOperandValue(param14, param14_init, sizeof(int32_t) * 1);
2162 static int32_t param15_init[] = {2, 0};
2163 model->setOperandValue(param15, param15_init, sizeof(int32_t) * 2);
2164 model->addOperation(ANEURALNETWORKS_GATHER, {input07, param14, param15}, {output07});
2165 // Phase 3, inputs and outputs
2166 model->identifyInputsAndOutputs(
2167 {input07},
2168 {output07});
2169 // Phase 4: set relaxed execution
2170 model->relaxComputationFloat32toFloat16(true);
2171 assert(model->isValid());
2172 }
2173
is_ignored_dynamic_output_shape_relaxed_8(int i)2174 inline bool is_ignored_dynamic_output_shape_relaxed_8(int i) {
2175 static std::set<int> ignore = {};
2176 return ignore.find(i) != ignore.end();
2177 }
2178
CreateModel_dynamic_output_shape_quant8_8(Model * model)2179 void CreateModel_dynamic_output_shape_quant8_8(Model *model) {
2180 OperandType type1(Type::INT32, {});
2181 OperandType type2(Type::TENSOR_INT32, {2});
2182 OperandType type41(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0}, 0.5f, 127);
2183 OperandType type50(Type::TENSOR_QUANT8_ASYMM, {1, 2, 3}, 0.5f, 127);
2184 // Phase 1, operands
2185 auto input07 = model->addOperand(&type50);
2186 auto param14 = model->addOperand(&type1);
2187 auto param15 = model->addOperand(&type2);
2188 auto output07 = model->addOperand(&type41);
2189 // Phase 2, operations
2190 static int32_t param14_init[] = {-1};
2191 model->setOperandValue(param14, param14_init, sizeof(int32_t) * 1);
2192 static int32_t param15_init[] = {2, 0};
2193 model->setOperandValue(param15, param15_init, sizeof(int32_t) * 2);
2194 model->addOperation(ANEURALNETWORKS_GATHER, {input07, param14, param15}, {output07});
2195 // Phase 3, inputs and outputs
2196 model->identifyInputsAndOutputs(
2197 {input07},
2198 {output07});
2199 assert(model->isValid());
2200 }
2201
is_ignored_dynamic_output_shape_quant8_8(int i)2202 inline bool is_ignored_dynamic_output_shape_quant8_8(int i) {
2203 static std::set<int> ignore = {};
2204 return ignore.find(i) != ignore.end();
2205 }
2206
CreateModel_dynamic_output_shape_int32_8(Model * model)2207 void CreateModel_dynamic_output_shape_int32_8(Model *model) {
2208 OperandType type1(Type::INT32, {});
2209 OperandType type2(Type::TENSOR_INT32, {2});
2210 OperandType type42(Type::TENSOR_INT32, {0, 0, 0});
2211 OperandType type51(Type::TENSOR_INT32, {1, 2, 3});
2212 // Phase 1, operands
2213 auto input07 = model->addOperand(&type51);
2214 auto param14 = model->addOperand(&type1);
2215 auto param15 = model->addOperand(&type2);
2216 auto output07 = model->addOperand(&type42);
2217 // Phase 2, operations
2218 static int32_t param14_init[] = {-1};
2219 model->setOperandValue(param14, param14_init, sizeof(int32_t) * 1);
2220 static int32_t param15_init[] = {2, 0};
2221 model->setOperandValue(param15, param15_init, sizeof(int32_t) * 2);
2222 model->addOperation(ANEURALNETWORKS_GATHER, {input07, param14, param15}, {output07});
2223 // Phase 3, inputs and outputs
2224 model->identifyInputsAndOutputs(
2225 {input07},
2226 {output07});
2227 assert(model->isValid());
2228 }
2229
is_ignored_dynamic_output_shape_int32_8(int i)2230 inline bool is_ignored_dynamic_output_shape_int32_8(int i) {
2231 static std::set<int> ignore = {};
2232 return ignore.find(i) != ignore.end();
2233 }
2234
CreateModel_dynamic_output_shape_float16_8(Model * model)2235 void CreateModel_dynamic_output_shape_float16_8(Model *model) {
2236 OperandType type1(Type::INT32, {});
2237 OperandType type2(Type::TENSOR_INT32, {2});
2238 OperandType type43(Type::TENSOR_FLOAT16, {0, 0, 0});
2239 OperandType type52(Type::TENSOR_FLOAT16, {1, 2, 3});
2240 // Phase 1, operands
2241 auto input07 = model->addOperand(&type52);
2242 auto param14 = model->addOperand(&type1);
2243 auto param15 = model->addOperand(&type2);
2244 auto output07 = model->addOperand(&type43);
2245 // Phase 2, operations
2246 static int32_t param14_init[] = {-1};
2247 model->setOperandValue(param14, param14_init, sizeof(int32_t) * 1);
2248 static int32_t param15_init[] = {2, 0};
2249 model->setOperandValue(param15, param15_init, sizeof(int32_t) * 2);
2250 model->addOperation(ANEURALNETWORKS_GATHER, {input07, param14, param15}, {output07});
2251 // Phase 3, inputs and outputs
2252 model->identifyInputsAndOutputs(
2253 {input07},
2254 {output07});
2255 assert(model->isValid());
2256 }
2257
is_ignored_dynamic_output_shape_float16_8(int i)2258 inline bool is_ignored_dynamic_output_shape_float16_8(int i) {
2259 static std::set<int> ignore = {};
2260 return ignore.find(i) != ignore.end();
2261 }
2262
2263