1 // clang-format off
2 // Generated file (from: expand_dims.mod.py). Do not edit
CreateModel(Model * model)3 void CreateModel(Model *model) {
4 OperandType type0(Type::TENSOR_FLOAT32, {2, 2});
5 OperandType type1(Type::TENSOR_FLOAT32, {1, 2, 2});
6 OperandType type4(Type::INT32, {});
7 // Phase 1, operands
8 auto input0 = model->addOperand(&type0);
9 auto param = model->addOperand(&type4);
10 auto output = model->addOperand(&type1);
11 // Phase 2, operations
12 static int32_t param_init[] = {0};
13 model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
14 model->addOperation(ANEURALNETWORKS_EXPAND_DIMS, {input0, param}, {output});
15 // Phase 3, inputs and outputs
16 model->identifyInputsAndOutputs(
17 {input0},
18 {output});
19 assert(model->isValid());
20 }
21
is_ignored(int i)22 inline bool is_ignored(int i) {
23 static std::set<int> ignore = {};
24 return ignore.find(i) != ignore.end();
25 }
26
CreateModel_relaxed(Model * model)27 void CreateModel_relaxed(Model *model) {
28 OperandType type0(Type::TENSOR_FLOAT32, {2, 2});
29 OperandType type1(Type::TENSOR_FLOAT32, {1, 2, 2});
30 OperandType type4(Type::INT32, {});
31 // Phase 1, operands
32 auto input0 = model->addOperand(&type0);
33 auto param = model->addOperand(&type4);
34 auto output = model->addOperand(&type1);
35 // Phase 2, operations
36 static int32_t param_init[] = {0};
37 model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
38 model->addOperation(ANEURALNETWORKS_EXPAND_DIMS, {input0, param}, {output});
39 // Phase 3, inputs and outputs
40 model->identifyInputsAndOutputs(
41 {input0},
42 {output});
43 // Phase 4: set relaxed execution
44 model->relaxComputationFloat32toFloat16(true);
45 assert(model->isValid());
46 }
47
is_ignored_relaxed(int i)48 inline bool is_ignored_relaxed(int i) {
49 static std::set<int> ignore = {};
50 return ignore.find(i) != ignore.end();
51 }
52
CreateModel_quant8(Model * model)53 void CreateModel_quant8(Model *model) {
54 OperandType type4(Type::INT32, {});
55 OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2}, 0.5f, 127);
56 OperandType type6(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2}, 0.5f, 127);
57 // Phase 1, operands
58 auto input0 = model->addOperand(&type5);
59 auto param = model->addOperand(&type4);
60 auto output = model->addOperand(&type6);
61 // Phase 2, operations
62 static int32_t param_init[] = {0};
63 model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
64 model->addOperation(ANEURALNETWORKS_EXPAND_DIMS, {input0, param}, {output});
65 // Phase 3, inputs and outputs
66 model->identifyInputsAndOutputs(
67 {input0},
68 {output});
69 assert(model->isValid());
70 }
71
is_ignored_quant8(int i)72 inline bool is_ignored_quant8(int i) {
73 static std::set<int> ignore = {};
74 return ignore.find(i) != ignore.end();
75 }
76
CreateModel_int32(Model * model)77 void CreateModel_int32(Model *model) {
78 OperandType type4(Type::INT32, {});
79 OperandType type7(Type::TENSOR_INT32, {2, 2});
80 OperandType type8(Type::TENSOR_INT32, {1, 2, 2});
81 // Phase 1, operands
82 auto input0 = model->addOperand(&type7);
83 auto param = model->addOperand(&type4);
84 auto output = model->addOperand(&type8);
85 // Phase 2, operations
86 static int32_t param_init[] = {0};
87 model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
88 model->addOperation(ANEURALNETWORKS_EXPAND_DIMS, {input0, param}, {output});
89 // Phase 3, inputs and outputs
90 model->identifyInputsAndOutputs(
91 {input0},
92 {output});
93 assert(model->isValid());
94 }
95
is_ignored_int32(int i)96 inline bool is_ignored_int32(int i) {
97 static std::set<int> ignore = {};
98 return ignore.find(i) != ignore.end();
99 }
100
CreateModel_float16(Model * model)101 void CreateModel_float16(Model *model) {
102 OperandType type10(Type::TENSOR_FLOAT16, {1, 2, 2});
103 OperandType type4(Type::INT32, {});
104 OperandType type9(Type::TENSOR_FLOAT16, {2, 2});
105 // Phase 1, operands
106 auto input0 = model->addOperand(&type9);
107 auto param = model->addOperand(&type4);
108 auto output = model->addOperand(&type10);
109 // Phase 2, operations
110 static int32_t param_init[] = {0};
111 model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
112 model->addOperation(ANEURALNETWORKS_EXPAND_DIMS, {input0, param}, {output});
113 // Phase 3, inputs and outputs
114 model->identifyInputsAndOutputs(
115 {input0},
116 {output});
117 assert(model->isValid());
118 }
119
is_ignored_float16(int i)120 inline bool is_ignored_float16(int i) {
121 static std::set<int> ignore = {};
122 return ignore.find(i) != ignore.end();
123 }
124
CreateModel_dynamic_output_shape(Model * model)125 void CreateModel_dynamic_output_shape(Model *model) {
126 OperandType type0(Type::TENSOR_FLOAT32, {2, 2});
127 OperandType type11(Type::TENSOR_FLOAT32, {0, 0, 0});
128 OperandType type4(Type::INT32, {});
129 // Phase 1, operands
130 auto input0 = model->addOperand(&type0);
131 auto param = model->addOperand(&type4);
132 auto output = model->addOperand(&type11);
133 // Phase 2, operations
134 static int32_t param_init[] = {0};
135 model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
136 model->addOperation(ANEURALNETWORKS_EXPAND_DIMS, {input0, param}, {output});
137 // Phase 3, inputs and outputs
138 model->identifyInputsAndOutputs(
139 {input0},
140 {output});
141 assert(model->isValid());
142 }
143
is_ignored_dynamic_output_shape(int i)144 inline bool is_ignored_dynamic_output_shape(int i) {
145 static std::set<int> ignore = {};
146 return ignore.find(i) != ignore.end();
147 }
148
CreateModel_dynamic_output_shape_relaxed(Model * model)149 void CreateModel_dynamic_output_shape_relaxed(Model *model) {
150 OperandType type0(Type::TENSOR_FLOAT32, {2, 2});
151 OperandType type11(Type::TENSOR_FLOAT32, {0, 0, 0});
152 OperandType type4(Type::INT32, {});
153 // Phase 1, operands
154 auto input0 = model->addOperand(&type0);
155 auto param = model->addOperand(&type4);
156 auto output = model->addOperand(&type11);
157 // Phase 2, operations
158 static int32_t param_init[] = {0};
159 model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
160 model->addOperation(ANEURALNETWORKS_EXPAND_DIMS, {input0, param}, {output});
161 // Phase 3, inputs and outputs
162 model->identifyInputsAndOutputs(
163 {input0},
164 {output});
165 // Phase 4: set relaxed execution
166 model->relaxComputationFloat32toFloat16(true);
167 assert(model->isValid());
168 }
169
is_ignored_dynamic_output_shape_relaxed(int i)170 inline bool is_ignored_dynamic_output_shape_relaxed(int i) {
171 static std::set<int> ignore = {};
172 return ignore.find(i) != ignore.end();
173 }
174
CreateModel_dynamic_output_shape_quant8(Model * model)175 void CreateModel_dynamic_output_shape_quant8(Model *model) {
176 OperandType type12(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0}, 0.5f, 127);
177 OperandType type4(Type::INT32, {});
178 OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2}, 0.5f, 127);
179 // Phase 1, operands
180 auto input0 = model->addOperand(&type5);
181 auto param = model->addOperand(&type4);
182 auto output = model->addOperand(&type12);
183 // Phase 2, operations
184 static int32_t param_init[] = {0};
185 model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
186 model->addOperation(ANEURALNETWORKS_EXPAND_DIMS, {input0, param}, {output});
187 // Phase 3, inputs and outputs
188 model->identifyInputsAndOutputs(
189 {input0},
190 {output});
191 assert(model->isValid());
192 }
193
is_ignored_dynamic_output_shape_quant8(int i)194 inline bool is_ignored_dynamic_output_shape_quant8(int i) {
195 static std::set<int> ignore = {};
196 return ignore.find(i) != ignore.end();
197 }
198
CreateModel_dynamic_output_shape_int32(Model * model)199 void CreateModel_dynamic_output_shape_int32(Model *model) {
200 OperandType type13(Type::TENSOR_INT32, {0, 0, 0});
201 OperandType type4(Type::INT32, {});
202 OperandType type7(Type::TENSOR_INT32, {2, 2});
203 // Phase 1, operands
204 auto input0 = model->addOperand(&type7);
205 auto param = model->addOperand(&type4);
206 auto output = model->addOperand(&type13);
207 // Phase 2, operations
208 static int32_t param_init[] = {0};
209 model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
210 model->addOperation(ANEURALNETWORKS_EXPAND_DIMS, {input0, param}, {output});
211 // Phase 3, inputs and outputs
212 model->identifyInputsAndOutputs(
213 {input0},
214 {output});
215 assert(model->isValid());
216 }
217
is_ignored_dynamic_output_shape_int32(int i)218 inline bool is_ignored_dynamic_output_shape_int32(int i) {
219 static std::set<int> ignore = {};
220 return ignore.find(i) != ignore.end();
221 }
222
CreateModel_dynamic_output_shape_float16(Model * model)223 void CreateModel_dynamic_output_shape_float16(Model *model) {
224 OperandType type14(Type::TENSOR_FLOAT16, {0, 0, 0});
225 OperandType type4(Type::INT32, {});
226 OperandType type9(Type::TENSOR_FLOAT16, {2, 2});
227 // Phase 1, operands
228 auto input0 = model->addOperand(&type9);
229 auto param = model->addOperand(&type4);
230 auto output = model->addOperand(&type14);
231 // Phase 2, operations
232 static int32_t param_init[] = {0};
233 model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
234 model->addOperation(ANEURALNETWORKS_EXPAND_DIMS, {input0, param}, {output});
235 // Phase 3, inputs and outputs
236 model->identifyInputsAndOutputs(
237 {input0},
238 {output});
239 assert(model->isValid());
240 }
241
is_ignored_dynamic_output_shape_float16(int i)242 inline bool is_ignored_dynamic_output_shape_float16(int i) {
243 static std::set<int> ignore = {};
244 return ignore.find(i) != ignore.end();
245 }
246
CreateModel_2(Model * model)247 void CreateModel_2(Model *model) {
248 OperandType type0(Type::TENSOR_FLOAT32, {2, 2});
249 OperandType type2(Type::TENSOR_FLOAT32, {2, 1, 2});
250 OperandType type4(Type::INT32, {});
251 // Phase 1, operands
252 auto input0 = model->addOperand(&type0);
253 auto param1 = model->addOperand(&type4);
254 auto output1 = model->addOperand(&type2);
255 // Phase 2, operations
256 static int32_t param1_init[] = {1};
257 model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
258 model->addOperation(ANEURALNETWORKS_EXPAND_DIMS, {input0, param1}, {output1});
259 // Phase 3, inputs and outputs
260 model->identifyInputsAndOutputs(
261 {input0},
262 {output1});
263 assert(model->isValid());
264 }
265
is_ignored_2(int i)266 inline bool is_ignored_2(int i) {
267 static std::set<int> ignore = {};
268 return ignore.find(i) != ignore.end();
269 }
270
CreateModel_relaxed_2(Model * model)271 void CreateModel_relaxed_2(Model *model) {
272 OperandType type0(Type::TENSOR_FLOAT32, {2, 2});
273 OperandType type2(Type::TENSOR_FLOAT32, {2, 1, 2});
274 OperandType type4(Type::INT32, {});
275 // Phase 1, operands
276 auto input0 = model->addOperand(&type0);
277 auto param1 = model->addOperand(&type4);
278 auto output1 = model->addOperand(&type2);
279 // Phase 2, operations
280 static int32_t param1_init[] = {1};
281 model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
282 model->addOperation(ANEURALNETWORKS_EXPAND_DIMS, {input0, param1}, {output1});
283 // Phase 3, inputs and outputs
284 model->identifyInputsAndOutputs(
285 {input0},
286 {output1});
287 // Phase 4: set relaxed execution
288 model->relaxComputationFloat32toFloat16(true);
289 assert(model->isValid());
290 }
291
is_ignored_relaxed_2(int i)292 inline bool is_ignored_relaxed_2(int i) {
293 static std::set<int> ignore = {};
294 return ignore.find(i) != ignore.end();
295 }
296
CreateModel_quant8_2(Model * model)297 void CreateModel_quant8_2(Model *model) {
298 OperandType type15(Type::TENSOR_QUANT8_ASYMM, {2, 1, 2}, 0.5f, 127);
299 OperandType type4(Type::INT32, {});
300 OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2}, 0.5f, 127);
301 // Phase 1, operands
302 auto input0 = model->addOperand(&type5);
303 auto param1 = model->addOperand(&type4);
304 auto output1 = model->addOperand(&type15);
305 // Phase 2, operations
306 static int32_t param1_init[] = {1};
307 model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
308 model->addOperation(ANEURALNETWORKS_EXPAND_DIMS, {input0, param1}, {output1});
309 // Phase 3, inputs and outputs
310 model->identifyInputsAndOutputs(
311 {input0},
312 {output1});
313 assert(model->isValid());
314 }
315
is_ignored_quant8_2(int i)316 inline bool is_ignored_quant8_2(int i) {
317 static std::set<int> ignore = {};
318 return ignore.find(i) != ignore.end();
319 }
320
CreateModel_int32_2(Model * model)321 void CreateModel_int32_2(Model *model) {
322 OperandType type16(Type::TENSOR_INT32, {2, 1, 2});
323 OperandType type4(Type::INT32, {});
324 OperandType type7(Type::TENSOR_INT32, {2, 2});
325 // Phase 1, operands
326 auto input0 = model->addOperand(&type7);
327 auto param1 = model->addOperand(&type4);
328 auto output1 = model->addOperand(&type16);
329 // Phase 2, operations
330 static int32_t param1_init[] = {1};
331 model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
332 model->addOperation(ANEURALNETWORKS_EXPAND_DIMS, {input0, param1}, {output1});
333 // Phase 3, inputs and outputs
334 model->identifyInputsAndOutputs(
335 {input0},
336 {output1});
337 assert(model->isValid());
338 }
339
is_ignored_int32_2(int i)340 inline bool is_ignored_int32_2(int i) {
341 static std::set<int> ignore = {};
342 return ignore.find(i) != ignore.end();
343 }
344
CreateModel_float16_2(Model * model)345 void CreateModel_float16_2(Model *model) {
346 OperandType type17(Type::TENSOR_FLOAT16, {2, 1, 2});
347 OperandType type4(Type::INT32, {});
348 OperandType type9(Type::TENSOR_FLOAT16, {2, 2});
349 // Phase 1, operands
350 auto input0 = model->addOperand(&type9);
351 auto param1 = model->addOperand(&type4);
352 auto output1 = model->addOperand(&type17);
353 // Phase 2, operations
354 static int32_t param1_init[] = {1};
355 model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
356 model->addOperation(ANEURALNETWORKS_EXPAND_DIMS, {input0, param1}, {output1});
357 // Phase 3, inputs and outputs
358 model->identifyInputsAndOutputs(
359 {input0},
360 {output1});
361 assert(model->isValid());
362 }
363
is_ignored_float16_2(int i)364 inline bool is_ignored_float16_2(int i) {
365 static std::set<int> ignore = {};
366 return ignore.find(i) != ignore.end();
367 }
368
CreateModel_dynamic_output_shape_2(Model * model)369 void CreateModel_dynamic_output_shape_2(Model *model) {
370 OperandType type0(Type::TENSOR_FLOAT32, {2, 2});
371 OperandType type11(Type::TENSOR_FLOAT32, {0, 0, 0});
372 OperandType type4(Type::INT32, {});
373 // Phase 1, operands
374 auto input0 = model->addOperand(&type0);
375 auto param1 = model->addOperand(&type4);
376 auto output1 = model->addOperand(&type11);
377 // Phase 2, operations
378 static int32_t param1_init[] = {1};
379 model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
380 model->addOperation(ANEURALNETWORKS_EXPAND_DIMS, {input0, param1}, {output1});
381 // Phase 3, inputs and outputs
382 model->identifyInputsAndOutputs(
383 {input0},
384 {output1});
385 assert(model->isValid());
386 }
387
is_ignored_dynamic_output_shape_2(int i)388 inline bool is_ignored_dynamic_output_shape_2(int i) {
389 static std::set<int> ignore = {};
390 return ignore.find(i) != ignore.end();
391 }
392
CreateModel_dynamic_output_shape_relaxed_2(Model * model)393 void CreateModel_dynamic_output_shape_relaxed_2(Model *model) {
394 OperandType type0(Type::TENSOR_FLOAT32, {2, 2});
395 OperandType type11(Type::TENSOR_FLOAT32, {0, 0, 0});
396 OperandType type4(Type::INT32, {});
397 // Phase 1, operands
398 auto input0 = model->addOperand(&type0);
399 auto param1 = model->addOperand(&type4);
400 auto output1 = model->addOperand(&type11);
401 // Phase 2, operations
402 static int32_t param1_init[] = {1};
403 model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
404 model->addOperation(ANEURALNETWORKS_EXPAND_DIMS, {input0, param1}, {output1});
405 // Phase 3, inputs and outputs
406 model->identifyInputsAndOutputs(
407 {input0},
408 {output1});
409 // Phase 4: set relaxed execution
410 model->relaxComputationFloat32toFloat16(true);
411 assert(model->isValid());
412 }
413
is_ignored_dynamic_output_shape_relaxed_2(int i)414 inline bool is_ignored_dynamic_output_shape_relaxed_2(int i) {
415 static std::set<int> ignore = {};
416 return ignore.find(i) != ignore.end();
417 }
418
CreateModel_dynamic_output_shape_quant8_2(Model * model)419 void CreateModel_dynamic_output_shape_quant8_2(Model *model) {
420 OperandType type12(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0}, 0.5f, 127);
421 OperandType type4(Type::INT32, {});
422 OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2}, 0.5f, 127);
423 // Phase 1, operands
424 auto input0 = model->addOperand(&type5);
425 auto param1 = model->addOperand(&type4);
426 auto output1 = model->addOperand(&type12);
427 // Phase 2, operations
428 static int32_t param1_init[] = {1};
429 model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
430 model->addOperation(ANEURALNETWORKS_EXPAND_DIMS, {input0, param1}, {output1});
431 // Phase 3, inputs and outputs
432 model->identifyInputsAndOutputs(
433 {input0},
434 {output1});
435 assert(model->isValid());
436 }
437
is_ignored_dynamic_output_shape_quant8_2(int i)438 inline bool is_ignored_dynamic_output_shape_quant8_2(int i) {
439 static std::set<int> ignore = {};
440 return ignore.find(i) != ignore.end();
441 }
442
CreateModel_dynamic_output_shape_int32_2(Model * model)443 void CreateModel_dynamic_output_shape_int32_2(Model *model) {
444 OperandType type13(Type::TENSOR_INT32, {0, 0, 0});
445 OperandType type4(Type::INT32, {});
446 OperandType type7(Type::TENSOR_INT32, {2, 2});
447 // Phase 1, operands
448 auto input0 = model->addOperand(&type7);
449 auto param1 = model->addOperand(&type4);
450 auto output1 = model->addOperand(&type13);
451 // Phase 2, operations
452 static int32_t param1_init[] = {1};
453 model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
454 model->addOperation(ANEURALNETWORKS_EXPAND_DIMS, {input0, param1}, {output1});
455 // Phase 3, inputs and outputs
456 model->identifyInputsAndOutputs(
457 {input0},
458 {output1});
459 assert(model->isValid());
460 }
461
is_ignored_dynamic_output_shape_int32_2(int i)462 inline bool is_ignored_dynamic_output_shape_int32_2(int i) {
463 static std::set<int> ignore = {};
464 return ignore.find(i) != ignore.end();
465 }
466
CreateModel_dynamic_output_shape_float16_2(Model * model)467 void CreateModel_dynamic_output_shape_float16_2(Model *model) {
468 OperandType type14(Type::TENSOR_FLOAT16, {0, 0, 0});
469 OperandType type4(Type::INT32, {});
470 OperandType type9(Type::TENSOR_FLOAT16, {2, 2});
471 // Phase 1, operands
472 auto input0 = model->addOperand(&type9);
473 auto param1 = model->addOperand(&type4);
474 auto output1 = model->addOperand(&type14);
475 // Phase 2, operations
476 static int32_t param1_init[] = {1};
477 model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
478 model->addOperation(ANEURALNETWORKS_EXPAND_DIMS, {input0, param1}, {output1});
479 // Phase 3, inputs and outputs
480 model->identifyInputsAndOutputs(
481 {input0},
482 {output1});
483 assert(model->isValid());
484 }
485
is_ignored_dynamic_output_shape_float16_2(int i)486 inline bool is_ignored_dynamic_output_shape_float16_2(int i) {
487 static std::set<int> ignore = {};
488 return ignore.find(i) != ignore.end();
489 }
490
CreateModel_3(Model * model)491 void CreateModel_3(Model *model) {
492 OperandType type0(Type::TENSOR_FLOAT32, {2, 2});
493 OperandType type3(Type::TENSOR_FLOAT32, {2, 2, 1});
494 OperandType type4(Type::INT32, {});
495 // Phase 1, operands
496 auto input0 = model->addOperand(&type0);
497 auto param2 = model->addOperand(&type4);
498 auto output2 = model->addOperand(&type3);
499 // Phase 2, operations
500 static int32_t param2_init[] = {2};
501 model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
502 model->addOperation(ANEURALNETWORKS_EXPAND_DIMS, {input0, param2}, {output2});
503 // Phase 3, inputs and outputs
504 model->identifyInputsAndOutputs(
505 {input0},
506 {output2});
507 assert(model->isValid());
508 }
509
is_ignored_3(int i)510 inline bool is_ignored_3(int i) {
511 static std::set<int> ignore = {};
512 return ignore.find(i) != ignore.end();
513 }
514
CreateModel_relaxed_3(Model * model)515 void CreateModel_relaxed_3(Model *model) {
516 OperandType type0(Type::TENSOR_FLOAT32, {2, 2});
517 OperandType type3(Type::TENSOR_FLOAT32, {2, 2, 1});
518 OperandType type4(Type::INT32, {});
519 // Phase 1, operands
520 auto input0 = model->addOperand(&type0);
521 auto param2 = model->addOperand(&type4);
522 auto output2 = model->addOperand(&type3);
523 // Phase 2, operations
524 static int32_t param2_init[] = {2};
525 model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
526 model->addOperation(ANEURALNETWORKS_EXPAND_DIMS, {input0, param2}, {output2});
527 // Phase 3, inputs and outputs
528 model->identifyInputsAndOutputs(
529 {input0},
530 {output2});
531 // Phase 4: set relaxed execution
532 model->relaxComputationFloat32toFloat16(true);
533 assert(model->isValid());
534 }
535
is_ignored_relaxed_3(int i)536 inline bool is_ignored_relaxed_3(int i) {
537 static std::set<int> ignore = {};
538 return ignore.find(i) != ignore.end();
539 }
540
CreateModel_quant8_3(Model * model)541 void CreateModel_quant8_3(Model *model) {
542 OperandType type18(Type::TENSOR_QUANT8_ASYMM, {2, 2, 1}, 0.5f, 127);
543 OperandType type4(Type::INT32, {});
544 OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2}, 0.5f, 127);
545 // Phase 1, operands
546 auto input0 = model->addOperand(&type5);
547 auto param2 = model->addOperand(&type4);
548 auto output2 = model->addOperand(&type18);
549 // Phase 2, operations
550 static int32_t param2_init[] = {2};
551 model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
552 model->addOperation(ANEURALNETWORKS_EXPAND_DIMS, {input0, param2}, {output2});
553 // Phase 3, inputs and outputs
554 model->identifyInputsAndOutputs(
555 {input0},
556 {output2});
557 assert(model->isValid());
558 }
559
is_ignored_quant8_3(int i)560 inline bool is_ignored_quant8_3(int i) {
561 static std::set<int> ignore = {};
562 return ignore.find(i) != ignore.end();
563 }
564
CreateModel_int32_3(Model * model)565 void CreateModel_int32_3(Model *model) {
566 OperandType type19(Type::TENSOR_INT32, {2, 2, 1});
567 OperandType type4(Type::INT32, {});
568 OperandType type7(Type::TENSOR_INT32, {2, 2});
569 // Phase 1, operands
570 auto input0 = model->addOperand(&type7);
571 auto param2 = model->addOperand(&type4);
572 auto output2 = model->addOperand(&type19);
573 // Phase 2, operations
574 static int32_t param2_init[] = {2};
575 model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
576 model->addOperation(ANEURALNETWORKS_EXPAND_DIMS, {input0, param2}, {output2});
577 // Phase 3, inputs and outputs
578 model->identifyInputsAndOutputs(
579 {input0},
580 {output2});
581 assert(model->isValid());
582 }
583
is_ignored_int32_3(int i)584 inline bool is_ignored_int32_3(int i) {
585 static std::set<int> ignore = {};
586 return ignore.find(i) != ignore.end();
587 }
588
CreateModel_float16_3(Model * model)589 void CreateModel_float16_3(Model *model) {
590 OperandType type20(Type::TENSOR_FLOAT16, {2, 2, 1});
591 OperandType type4(Type::INT32, {});
592 OperandType type9(Type::TENSOR_FLOAT16, {2, 2});
593 // Phase 1, operands
594 auto input0 = model->addOperand(&type9);
595 auto param2 = model->addOperand(&type4);
596 auto output2 = model->addOperand(&type20);
597 // Phase 2, operations
598 static int32_t param2_init[] = {2};
599 model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
600 model->addOperation(ANEURALNETWORKS_EXPAND_DIMS, {input0, param2}, {output2});
601 // Phase 3, inputs and outputs
602 model->identifyInputsAndOutputs(
603 {input0},
604 {output2});
605 assert(model->isValid());
606 }
607
is_ignored_float16_3(int i)608 inline bool is_ignored_float16_3(int i) {
609 static std::set<int> ignore = {};
610 return ignore.find(i) != ignore.end();
611 }
612
CreateModel_dynamic_output_shape_3(Model * model)613 void CreateModel_dynamic_output_shape_3(Model *model) {
614 OperandType type0(Type::TENSOR_FLOAT32, {2, 2});
615 OperandType type11(Type::TENSOR_FLOAT32, {0, 0, 0});
616 OperandType type4(Type::INT32, {});
617 // Phase 1, operands
618 auto input0 = model->addOperand(&type0);
619 auto param2 = model->addOperand(&type4);
620 auto output2 = model->addOperand(&type11);
621 // Phase 2, operations
622 static int32_t param2_init[] = {2};
623 model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
624 model->addOperation(ANEURALNETWORKS_EXPAND_DIMS, {input0, param2}, {output2});
625 // Phase 3, inputs and outputs
626 model->identifyInputsAndOutputs(
627 {input0},
628 {output2});
629 assert(model->isValid());
630 }
631
is_ignored_dynamic_output_shape_3(int i)632 inline bool is_ignored_dynamic_output_shape_3(int i) {
633 static std::set<int> ignore = {};
634 return ignore.find(i) != ignore.end();
635 }
636
CreateModel_dynamic_output_shape_relaxed_3(Model * model)637 void CreateModel_dynamic_output_shape_relaxed_3(Model *model) {
638 OperandType type0(Type::TENSOR_FLOAT32, {2, 2});
639 OperandType type11(Type::TENSOR_FLOAT32, {0, 0, 0});
640 OperandType type4(Type::INT32, {});
641 // Phase 1, operands
642 auto input0 = model->addOperand(&type0);
643 auto param2 = model->addOperand(&type4);
644 auto output2 = model->addOperand(&type11);
645 // Phase 2, operations
646 static int32_t param2_init[] = {2};
647 model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
648 model->addOperation(ANEURALNETWORKS_EXPAND_DIMS, {input0, param2}, {output2});
649 // Phase 3, inputs and outputs
650 model->identifyInputsAndOutputs(
651 {input0},
652 {output2});
653 // Phase 4: set relaxed execution
654 model->relaxComputationFloat32toFloat16(true);
655 assert(model->isValid());
656 }
657
is_ignored_dynamic_output_shape_relaxed_3(int i)658 inline bool is_ignored_dynamic_output_shape_relaxed_3(int i) {
659 static std::set<int> ignore = {};
660 return ignore.find(i) != ignore.end();
661 }
662
CreateModel_dynamic_output_shape_quant8_3(Model * model)663 void CreateModel_dynamic_output_shape_quant8_3(Model *model) {
664 OperandType type12(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0}, 0.5f, 127);
665 OperandType type4(Type::INT32, {});
666 OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2}, 0.5f, 127);
667 // Phase 1, operands
668 auto input0 = model->addOperand(&type5);
669 auto param2 = model->addOperand(&type4);
670 auto output2 = model->addOperand(&type12);
671 // Phase 2, operations
672 static int32_t param2_init[] = {2};
673 model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
674 model->addOperation(ANEURALNETWORKS_EXPAND_DIMS, {input0, param2}, {output2});
675 // Phase 3, inputs and outputs
676 model->identifyInputsAndOutputs(
677 {input0},
678 {output2});
679 assert(model->isValid());
680 }
681
is_ignored_dynamic_output_shape_quant8_3(int i)682 inline bool is_ignored_dynamic_output_shape_quant8_3(int i) {
683 static std::set<int> ignore = {};
684 return ignore.find(i) != ignore.end();
685 }
686
CreateModel_dynamic_output_shape_int32_3(Model * model)687 void CreateModel_dynamic_output_shape_int32_3(Model *model) {
688 OperandType type13(Type::TENSOR_INT32, {0, 0, 0});
689 OperandType type4(Type::INT32, {});
690 OperandType type7(Type::TENSOR_INT32, {2, 2});
691 // Phase 1, operands
692 auto input0 = model->addOperand(&type7);
693 auto param2 = model->addOperand(&type4);
694 auto output2 = model->addOperand(&type13);
695 // Phase 2, operations
696 static int32_t param2_init[] = {2};
697 model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
698 model->addOperation(ANEURALNETWORKS_EXPAND_DIMS, {input0, param2}, {output2});
699 // Phase 3, inputs and outputs
700 model->identifyInputsAndOutputs(
701 {input0},
702 {output2});
703 assert(model->isValid());
704 }
705
is_ignored_dynamic_output_shape_int32_3(int i)706 inline bool is_ignored_dynamic_output_shape_int32_3(int i) {
707 static std::set<int> ignore = {};
708 return ignore.find(i) != ignore.end();
709 }
710
CreateModel_dynamic_output_shape_float16_3(Model * model)711 void CreateModel_dynamic_output_shape_float16_3(Model *model) {
712 OperandType type14(Type::TENSOR_FLOAT16, {0, 0, 0});
713 OperandType type4(Type::INT32, {});
714 OperandType type9(Type::TENSOR_FLOAT16, {2, 2});
715 // Phase 1, operands
716 auto input0 = model->addOperand(&type9);
717 auto param2 = model->addOperand(&type4);
718 auto output2 = model->addOperand(&type14);
719 // Phase 2, operations
720 static int32_t param2_init[] = {2};
721 model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
722 model->addOperation(ANEURALNETWORKS_EXPAND_DIMS, {input0, param2}, {output2});
723 // Phase 3, inputs and outputs
724 model->identifyInputsAndOutputs(
725 {input0},
726 {output2});
727 assert(model->isValid());
728 }
729
is_ignored_dynamic_output_shape_float16_3(int i)730 inline bool is_ignored_dynamic_output_shape_float16_3(int i) {
731 static std::set<int> ignore = {};
732 return ignore.find(i) != ignore.end();
733 }
734
CreateModel_4(Model * model)735 void CreateModel_4(Model *model) {
736 OperandType type0(Type::TENSOR_FLOAT32, {2, 2});
737 OperandType type3(Type::TENSOR_FLOAT32, {2, 2, 1});
738 OperandType type4(Type::INT32, {});
739 // Phase 1, operands
740 auto input0 = model->addOperand(&type0);
741 auto param3 = model->addOperand(&type4);
742 auto output2 = model->addOperand(&type3);
743 // Phase 2, operations
744 static int32_t param3_init[] = {-1};
745 model->setOperandValue(param3, param3_init, sizeof(int32_t) * 1);
746 model->addOperation(ANEURALNETWORKS_EXPAND_DIMS, {input0, param3}, {output2});
747 // Phase 3, inputs and outputs
748 model->identifyInputsAndOutputs(
749 {input0},
750 {output2});
751 assert(model->isValid());
752 }
753
is_ignored_4(int i)754 inline bool is_ignored_4(int i) {
755 static std::set<int> ignore = {};
756 return ignore.find(i) != ignore.end();
757 }
758
CreateModel_relaxed_4(Model * model)759 void CreateModel_relaxed_4(Model *model) {
760 OperandType type0(Type::TENSOR_FLOAT32, {2, 2});
761 OperandType type3(Type::TENSOR_FLOAT32, {2, 2, 1});
762 OperandType type4(Type::INT32, {});
763 // Phase 1, operands
764 auto input0 = model->addOperand(&type0);
765 auto param3 = model->addOperand(&type4);
766 auto output2 = model->addOperand(&type3);
767 // Phase 2, operations
768 static int32_t param3_init[] = {-1};
769 model->setOperandValue(param3, param3_init, sizeof(int32_t) * 1);
770 model->addOperation(ANEURALNETWORKS_EXPAND_DIMS, {input0, param3}, {output2});
771 // Phase 3, inputs and outputs
772 model->identifyInputsAndOutputs(
773 {input0},
774 {output2});
775 // Phase 4: set relaxed execution
776 model->relaxComputationFloat32toFloat16(true);
777 assert(model->isValid());
778 }
779
is_ignored_relaxed_4(int i)780 inline bool is_ignored_relaxed_4(int i) {
781 static std::set<int> ignore = {};
782 return ignore.find(i) != ignore.end();
783 }
784
CreateModel_quant8_4(Model * model)785 void CreateModel_quant8_4(Model *model) {
786 OperandType type18(Type::TENSOR_QUANT8_ASYMM, {2, 2, 1}, 0.5f, 127);
787 OperandType type4(Type::INT32, {});
788 OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2}, 0.5f, 127);
789 // Phase 1, operands
790 auto input0 = model->addOperand(&type5);
791 auto param3 = model->addOperand(&type4);
792 auto output2 = model->addOperand(&type18);
793 // Phase 2, operations
794 static int32_t param3_init[] = {-1};
795 model->setOperandValue(param3, param3_init, sizeof(int32_t) * 1);
796 model->addOperation(ANEURALNETWORKS_EXPAND_DIMS, {input0, param3}, {output2});
797 // Phase 3, inputs and outputs
798 model->identifyInputsAndOutputs(
799 {input0},
800 {output2});
801 assert(model->isValid());
802 }
803
is_ignored_quant8_4(int i)804 inline bool is_ignored_quant8_4(int i) {
805 static std::set<int> ignore = {};
806 return ignore.find(i) != ignore.end();
807 }
808
CreateModel_int32_4(Model * model)809 void CreateModel_int32_4(Model *model) {
810 OperandType type19(Type::TENSOR_INT32, {2, 2, 1});
811 OperandType type4(Type::INT32, {});
812 OperandType type7(Type::TENSOR_INT32, {2, 2});
813 // Phase 1, operands
814 auto input0 = model->addOperand(&type7);
815 auto param3 = model->addOperand(&type4);
816 auto output2 = model->addOperand(&type19);
817 // Phase 2, operations
818 static int32_t param3_init[] = {-1};
819 model->setOperandValue(param3, param3_init, sizeof(int32_t) * 1);
820 model->addOperation(ANEURALNETWORKS_EXPAND_DIMS, {input0, param3}, {output2});
821 // Phase 3, inputs and outputs
822 model->identifyInputsAndOutputs(
823 {input0},
824 {output2});
825 assert(model->isValid());
826 }
827
is_ignored_int32_4(int i)828 inline bool is_ignored_int32_4(int i) {
829 static std::set<int> ignore = {};
830 return ignore.find(i) != ignore.end();
831 }
832
CreateModel_float16_4(Model * model)833 void CreateModel_float16_4(Model *model) {
834 OperandType type20(Type::TENSOR_FLOAT16, {2, 2, 1});
835 OperandType type4(Type::INT32, {});
836 OperandType type9(Type::TENSOR_FLOAT16, {2, 2});
837 // Phase 1, operands
838 auto input0 = model->addOperand(&type9);
839 auto param3 = model->addOperand(&type4);
840 auto output2 = model->addOperand(&type20);
841 // Phase 2, operations
842 static int32_t param3_init[] = {-1};
843 model->setOperandValue(param3, param3_init, sizeof(int32_t) * 1);
844 model->addOperation(ANEURALNETWORKS_EXPAND_DIMS, {input0, param3}, {output2});
845 // Phase 3, inputs and outputs
846 model->identifyInputsAndOutputs(
847 {input0},
848 {output2});
849 assert(model->isValid());
850 }
851
is_ignored_float16_4(int i)852 inline bool is_ignored_float16_4(int i) {
853 static std::set<int> ignore = {};
854 return ignore.find(i) != ignore.end();
855 }
856
CreateModel_dynamic_output_shape_4(Model * model)857 void CreateModel_dynamic_output_shape_4(Model *model) {
858 OperandType type0(Type::TENSOR_FLOAT32, {2, 2});
859 OperandType type11(Type::TENSOR_FLOAT32, {0, 0, 0});
860 OperandType type4(Type::INT32, {});
861 // Phase 1, operands
862 auto input0 = model->addOperand(&type0);
863 auto param3 = model->addOperand(&type4);
864 auto output2 = model->addOperand(&type11);
865 // Phase 2, operations
866 static int32_t param3_init[] = {-1};
867 model->setOperandValue(param3, param3_init, sizeof(int32_t) * 1);
868 model->addOperation(ANEURALNETWORKS_EXPAND_DIMS, {input0, param3}, {output2});
869 // Phase 3, inputs and outputs
870 model->identifyInputsAndOutputs(
871 {input0},
872 {output2});
873 assert(model->isValid());
874 }
875
is_ignored_dynamic_output_shape_4(int i)876 inline bool is_ignored_dynamic_output_shape_4(int i) {
877 static std::set<int> ignore = {};
878 return ignore.find(i) != ignore.end();
879 }
880
CreateModel_dynamic_output_shape_relaxed_4(Model * model)881 void CreateModel_dynamic_output_shape_relaxed_4(Model *model) {
882 OperandType type0(Type::TENSOR_FLOAT32, {2, 2});
883 OperandType type11(Type::TENSOR_FLOAT32, {0, 0, 0});
884 OperandType type4(Type::INT32, {});
885 // Phase 1, operands
886 auto input0 = model->addOperand(&type0);
887 auto param3 = model->addOperand(&type4);
888 auto output2 = model->addOperand(&type11);
889 // Phase 2, operations
890 static int32_t param3_init[] = {-1};
891 model->setOperandValue(param3, param3_init, sizeof(int32_t) * 1);
892 model->addOperation(ANEURALNETWORKS_EXPAND_DIMS, {input0, param3}, {output2});
893 // Phase 3, inputs and outputs
894 model->identifyInputsAndOutputs(
895 {input0},
896 {output2});
897 // Phase 4: set relaxed execution
898 model->relaxComputationFloat32toFloat16(true);
899 assert(model->isValid());
900 }
901
is_ignored_dynamic_output_shape_relaxed_4(int i)902 inline bool is_ignored_dynamic_output_shape_relaxed_4(int i) {
903 static std::set<int> ignore = {};
904 return ignore.find(i) != ignore.end();
905 }
906
CreateModel_dynamic_output_shape_quant8_4(Model * model)907 void CreateModel_dynamic_output_shape_quant8_4(Model *model) {
908 OperandType type12(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0}, 0.5f, 127);
909 OperandType type4(Type::INT32, {});
910 OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2}, 0.5f, 127);
911 // Phase 1, operands
912 auto input0 = model->addOperand(&type5);
913 auto param3 = model->addOperand(&type4);
914 auto output2 = model->addOperand(&type12);
915 // Phase 2, operations
916 static int32_t param3_init[] = {-1};
917 model->setOperandValue(param3, param3_init, sizeof(int32_t) * 1);
918 model->addOperation(ANEURALNETWORKS_EXPAND_DIMS, {input0, param3}, {output2});
919 // Phase 3, inputs and outputs
920 model->identifyInputsAndOutputs(
921 {input0},
922 {output2});
923 assert(model->isValid());
924 }
925
is_ignored_dynamic_output_shape_quant8_4(int i)926 inline bool is_ignored_dynamic_output_shape_quant8_4(int i) {
927 static std::set<int> ignore = {};
928 return ignore.find(i) != ignore.end();
929 }
930
CreateModel_dynamic_output_shape_int32_4(Model * model)931 void CreateModel_dynamic_output_shape_int32_4(Model *model) {
932 OperandType type13(Type::TENSOR_INT32, {0, 0, 0});
933 OperandType type4(Type::INT32, {});
934 OperandType type7(Type::TENSOR_INT32, {2, 2});
935 // Phase 1, operands
936 auto input0 = model->addOperand(&type7);
937 auto param3 = model->addOperand(&type4);
938 auto output2 = model->addOperand(&type13);
939 // Phase 2, operations
940 static int32_t param3_init[] = {-1};
941 model->setOperandValue(param3, param3_init, sizeof(int32_t) * 1);
942 model->addOperation(ANEURALNETWORKS_EXPAND_DIMS, {input0, param3}, {output2});
943 // Phase 3, inputs and outputs
944 model->identifyInputsAndOutputs(
945 {input0},
946 {output2});
947 assert(model->isValid());
948 }
949
is_ignored_dynamic_output_shape_int32_4(int i)950 inline bool is_ignored_dynamic_output_shape_int32_4(int i) {
951 static std::set<int> ignore = {};
952 return ignore.find(i) != ignore.end();
953 }
954
CreateModel_dynamic_output_shape_float16_4(Model * model)955 void CreateModel_dynamic_output_shape_float16_4(Model *model) {
956 OperandType type14(Type::TENSOR_FLOAT16, {0, 0, 0});
957 OperandType type4(Type::INT32, {});
958 OperandType type9(Type::TENSOR_FLOAT16, {2, 2});
959 // Phase 1, operands
960 auto input0 = model->addOperand(&type9);
961 auto param3 = model->addOperand(&type4);
962 auto output2 = model->addOperand(&type14);
963 // Phase 2, operations
964 static int32_t param3_init[] = {-1};
965 model->setOperandValue(param3, param3_init, sizeof(int32_t) * 1);
966 model->addOperation(ANEURALNETWORKS_EXPAND_DIMS, {input0, param3}, {output2});
967 // Phase 3, inputs and outputs
968 model->identifyInputsAndOutputs(
969 {input0},
970 {output2});
971 assert(model->isValid());
972 }
973
is_ignored_dynamic_output_shape_float16_4(int i)974 inline bool is_ignored_dynamic_output_shape_float16_4(int i) {
975 static std::set<int> ignore = {};
976 return ignore.find(i) != ignore.end();
977 }
978
979