1 // clang-format off
2 // Generated file (from: reduce_max.mod.py). Do not edit
CreateModel(Model * model)3 void CreateModel(Model *model) {
4 OperandType type0(Type::TENSOR_FLOAT32, {3, 2});
5 OperandType type1(Type::TENSOR_FLOAT32, {3});
6 OperandType type2(Type::TENSOR_INT32, {1});
7 OperandType type3(Type::BOOL, {});
8 // Phase 1, operands
9 auto input0 = model->addOperand(&type0);
10 auto param = model->addOperand(&type2);
11 auto param1 = model->addOperand(&type3);
12 auto output0 = model->addOperand(&type1);
13 // Phase 2, operations
14 static int32_t param_init[] = {-1};
15 model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
16 static bool8 param1_init[] = {false};
17 model->setOperandValue(param1, param1_init, sizeof(bool8) * 1);
18 model->addOperation(ANEURALNETWORKS_REDUCE_MAX, {input0, param, param1}, {output0});
19 // Phase 3, inputs and outputs
20 model->identifyInputsAndOutputs(
21 {input0},
22 {output0});
23 assert(model->isValid());
24 }
25
is_ignored(int i)26 inline bool is_ignored(int i) {
27 static std::set<int> ignore = {};
28 return ignore.find(i) != ignore.end();
29 }
30
CreateModel_relaxed(Model * model)31 void CreateModel_relaxed(Model *model) {
32 OperandType type0(Type::TENSOR_FLOAT32, {3, 2});
33 OperandType type1(Type::TENSOR_FLOAT32, {3});
34 OperandType type2(Type::TENSOR_INT32, {1});
35 OperandType type3(Type::BOOL, {});
36 // Phase 1, operands
37 auto input0 = model->addOperand(&type0);
38 auto param = model->addOperand(&type2);
39 auto param1 = model->addOperand(&type3);
40 auto output0 = model->addOperand(&type1);
41 // Phase 2, operations
42 static int32_t param_init[] = {-1};
43 model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
44 static bool8 param1_init[] = {false};
45 model->setOperandValue(param1, param1_init, sizeof(bool8) * 1);
46 model->addOperation(ANEURALNETWORKS_REDUCE_MAX, {input0, param, param1}, {output0});
47 // Phase 3, inputs and outputs
48 model->identifyInputsAndOutputs(
49 {input0},
50 {output0});
51 // Phase 4: set relaxed execution
52 model->relaxComputationFloat32toFloat16(true);
53 assert(model->isValid());
54 }
55
is_ignored_relaxed(int i)56 inline bool is_ignored_relaxed(int i) {
57 static std::set<int> ignore = {};
58 return ignore.find(i) != ignore.end();
59 }
60
CreateModel_float16(Model * model)61 void CreateModel_float16(Model *model) {
62 OperandType type10(Type::TENSOR_FLOAT16, {3, 2});
63 OperandType type11(Type::TENSOR_FLOAT16, {3});
64 OperandType type2(Type::TENSOR_INT32, {1});
65 OperandType type3(Type::BOOL, {});
66 // Phase 1, operands
67 auto input0 = model->addOperand(&type10);
68 auto param = model->addOperand(&type2);
69 auto param1 = model->addOperand(&type3);
70 auto output0 = model->addOperand(&type11);
71 // Phase 2, operations
72 static int32_t param_init[] = {-1};
73 model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
74 static bool8 param1_init[] = {false};
75 model->setOperandValue(param1, param1_init, sizeof(bool8) * 1);
76 model->addOperation(ANEURALNETWORKS_REDUCE_MAX, {input0, param, param1}, {output0});
77 // Phase 3, inputs and outputs
78 model->identifyInputsAndOutputs(
79 {input0},
80 {output0});
81 assert(model->isValid());
82 }
83
is_ignored_float16(int i)84 inline bool is_ignored_float16(int i) {
85 static std::set<int> ignore = {};
86 return ignore.find(i) != ignore.end();
87 }
88
CreateModel_quant8(Model * model)89 void CreateModel_quant8(Model *model) {
90 OperandType type12(Type::TENSOR_QUANT8_ASYMM, {3, 2}, 0.5f, 127);
91 OperandType type13(Type::TENSOR_QUANT8_ASYMM, {3}, 0.5f, 127);
92 OperandType type2(Type::TENSOR_INT32, {1});
93 OperandType type3(Type::BOOL, {});
94 // Phase 1, operands
95 auto input0 = model->addOperand(&type12);
96 auto param = model->addOperand(&type2);
97 auto param1 = model->addOperand(&type3);
98 auto output0 = model->addOperand(&type13);
99 // Phase 2, operations
100 static int32_t param_init[] = {-1};
101 model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
102 static bool8 param1_init[] = {false};
103 model->setOperandValue(param1, param1_init, sizeof(bool8) * 1);
104 model->addOperation(ANEURALNETWORKS_REDUCE_MAX, {input0, param, param1}, {output0});
105 // Phase 3, inputs and outputs
106 model->identifyInputsAndOutputs(
107 {input0},
108 {output0});
109 assert(model->isValid());
110 }
111
is_ignored_quant8(int i)112 inline bool is_ignored_quant8(int i) {
113 static std::set<int> ignore = {};
114 return ignore.find(i) != ignore.end();
115 }
116
CreateModel_dynamic_output_shape(Model * model)117 void CreateModel_dynamic_output_shape(Model *model) {
118 OperandType type0(Type::TENSOR_FLOAT32, {3, 2});
119 OperandType type14(Type::TENSOR_FLOAT32, {0});
120 OperandType type2(Type::TENSOR_INT32, {1});
121 OperandType type3(Type::BOOL, {});
122 // Phase 1, operands
123 auto input0 = model->addOperand(&type0);
124 auto param = model->addOperand(&type2);
125 auto param1 = model->addOperand(&type3);
126 auto output0 = model->addOperand(&type14);
127 // Phase 2, operations
128 static int32_t param_init[] = {-1};
129 model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
130 static bool8 param1_init[] = {false};
131 model->setOperandValue(param1, param1_init, sizeof(bool8) * 1);
132 model->addOperation(ANEURALNETWORKS_REDUCE_MAX, {input0, param, param1}, {output0});
133 // Phase 3, inputs and outputs
134 model->identifyInputsAndOutputs(
135 {input0},
136 {output0});
137 assert(model->isValid());
138 }
139
is_ignored_dynamic_output_shape(int i)140 inline bool is_ignored_dynamic_output_shape(int i) {
141 static std::set<int> ignore = {};
142 return ignore.find(i) != ignore.end();
143 }
144
CreateModel_dynamic_output_shape_relaxed(Model * model)145 void CreateModel_dynamic_output_shape_relaxed(Model *model) {
146 OperandType type0(Type::TENSOR_FLOAT32, {3, 2});
147 OperandType type14(Type::TENSOR_FLOAT32, {0});
148 OperandType type2(Type::TENSOR_INT32, {1});
149 OperandType type3(Type::BOOL, {});
150 // Phase 1, operands
151 auto input0 = model->addOperand(&type0);
152 auto param = model->addOperand(&type2);
153 auto param1 = model->addOperand(&type3);
154 auto output0 = model->addOperand(&type14);
155 // Phase 2, operations
156 static int32_t param_init[] = {-1};
157 model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
158 static bool8 param1_init[] = {false};
159 model->setOperandValue(param1, param1_init, sizeof(bool8) * 1);
160 model->addOperation(ANEURALNETWORKS_REDUCE_MAX, {input0, param, param1}, {output0});
161 // Phase 3, inputs and outputs
162 model->identifyInputsAndOutputs(
163 {input0},
164 {output0});
165 // Phase 4: set relaxed execution
166 model->relaxComputationFloat32toFloat16(true);
167 assert(model->isValid());
168 }
169
is_ignored_dynamic_output_shape_relaxed(int i)170 inline bool is_ignored_dynamic_output_shape_relaxed(int i) {
171 static std::set<int> ignore = {};
172 return ignore.find(i) != ignore.end();
173 }
174
CreateModel_dynamic_output_shape_float16(Model * model)175 void CreateModel_dynamic_output_shape_float16(Model *model) {
176 OperandType type10(Type::TENSOR_FLOAT16, {3, 2});
177 OperandType type15(Type::TENSOR_FLOAT16, {0});
178 OperandType type2(Type::TENSOR_INT32, {1});
179 OperandType type3(Type::BOOL, {});
180 // Phase 1, operands
181 auto input0 = model->addOperand(&type10);
182 auto param = model->addOperand(&type2);
183 auto param1 = model->addOperand(&type3);
184 auto output0 = model->addOperand(&type15);
185 // Phase 2, operations
186 static int32_t param_init[] = {-1};
187 model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
188 static bool8 param1_init[] = {false};
189 model->setOperandValue(param1, param1_init, sizeof(bool8) * 1);
190 model->addOperation(ANEURALNETWORKS_REDUCE_MAX, {input0, param, param1}, {output0});
191 // Phase 3, inputs and outputs
192 model->identifyInputsAndOutputs(
193 {input0},
194 {output0});
195 assert(model->isValid());
196 }
197
is_ignored_dynamic_output_shape_float16(int i)198 inline bool is_ignored_dynamic_output_shape_float16(int i) {
199 static std::set<int> ignore = {};
200 return ignore.find(i) != ignore.end();
201 }
202
CreateModel_dynamic_output_shape_quant8(Model * model)203 void CreateModel_dynamic_output_shape_quant8(Model *model) {
204 OperandType type12(Type::TENSOR_QUANT8_ASYMM, {3, 2}, 0.5f, 127);
205 OperandType type16(Type::TENSOR_QUANT8_ASYMM, {0}, 0.5f, 127);
206 OperandType type2(Type::TENSOR_INT32, {1});
207 OperandType type3(Type::BOOL, {});
208 // Phase 1, operands
209 auto input0 = model->addOperand(&type12);
210 auto param = model->addOperand(&type2);
211 auto param1 = model->addOperand(&type3);
212 auto output0 = model->addOperand(&type16);
213 // Phase 2, operations
214 static int32_t param_init[] = {-1};
215 model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
216 static bool8 param1_init[] = {false};
217 model->setOperandValue(param1, param1_init, sizeof(bool8) * 1);
218 model->addOperation(ANEURALNETWORKS_REDUCE_MAX, {input0, param, param1}, {output0});
219 // Phase 3, inputs and outputs
220 model->identifyInputsAndOutputs(
221 {input0},
222 {output0});
223 assert(model->isValid());
224 }
225
is_ignored_dynamic_output_shape_quant8(int i)226 inline bool is_ignored_dynamic_output_shape_quant8(int i) {
227 static std::set<int> ignore = {};
228 return ignore.find(i) != ignore.end();
229 }
230
CreateModel_2(Model * model)231 void CreateModel_2(Model *model) {
232 OperandType type2(Type::TENSOR_INT32, {1});
233 OperandType type3(Type::BOOL, {});
234 OperandType type4(Type::TENSOR_FLOAT32, {1});
235 // Phase 1, operands
236 auto input01 = model->addOperand(&type4);
237 auto param2 = model->addOperand(&type2);
238 auto param3 = model->addOperand(&type3);
239 auto output01 = model->addOperand(&type4);
240 // Phase 2, operations
241 static int32_t param2_init[] = {0};
242 model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
243 static bool8 param3_init[] = {true};
244 model->setOperandValue(param3, param3_init, sizeof(bool8) * 1);
245 model->addOperation(ANEURALNETWORKS_REDUCE_MAX, {input01, param2, param3}, {output01});
246 // Phase 3, inputs and outputs
247 model->identifyInputsAndOutputs(
248 {input01},
249 {output01});
250 assert(model->isValid());
251 }
252
is_ignored_2(int i)253 inline bool is_ignored_2(int i) {
254 static std::set<int> ignore = {};
255 return ignore.find(i) != ignore.end();
256 }
257
CreateModel_relaxed_2(Model * model)258 void CreateModel_relaxed_2(Model *model) {
259 OperandType type2(Type::TENSOR_INT32, {1});
260 OperandType type3(Type::BOOL, {});
261 OperandType type4(Type::TENSOR_FLOAT32, {1});
262 // Phase 1, operands
263 auto input01 = model->addOperand(&type4);
264 auto param2 = model->addOperand(&type2);
265 auto param3 = model->addOperand(&type3);
266 auto output01 = model->addOperand(&type4);
267 // Phase 2, operations
268 static int32_t param2_init[] = {0};
269 model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
270 static bool8 param3_init[] = {true};
271 model->setOperandValue(param3, param3_init, sizeof(bool8) * 1);
272 model->addOperation(ANEURALNETWORKS_REDUCE_MAX, {input01, param2, param3}, {output01});
273 // Phase 3, inputs and outputs
274 model->identifyInputsAndOutputs(
275 {input01},
276 {output01});
277 // Phase 4: set relaxed execution
278 model->relaxComputationFloat32toFloat16(true);
279 assert(model->isValid());
280 }
281
is_ignored_relaxed_2(int i)282 inline bool is_ignored_relaxed_2(int i) {
283 static std::set<int> ignore = {};
284 return ignore.find(i) != ignore.end();
285 }
286
CreateModel_float16_2(Model * model)287 void CreateModel_float16_2(Model *model) {
288 OperandType type17(Type::TENSOR_FLOAT16, {1});
289 OperandType type2(Type::TENSOR_INT32, {1});
290 OperandType type3(Type::BOOL, {});
291 // Phase 1, operands
292 auto input01 = model->addOperand(&type17);
293 auto param2 = model->addOperand(&type2);
294 auto param3 = model->addOperand(&type3);
295 auto output01 = model->addOperand(&type17);
296 // Phase 2, operations
297 static int32_t param2_init[] = {0};
298 model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
299 static bool8 param3_init[] = {true};
300 model->setOperandValue(param3, param3_init, sizeof(bool8) * 1);
301 model->addOperation(ANEURALNETWORKS_REDUCE_MAX, {input01, param2, param3}, {output01});
302 // Phase 3, inputs and outputs
303 model->identifyInputsAndOutputs(
304 {input01},
305 {output01});
306 assert(model->isValid());
307 }
308
is_ignored_float16_2(int i)309 inline bool is_ignored_float16_2(int i) {
310 static std::set<int> ignore = {};
311 return ignore.find(i) != ignore.end();
312 }
313
CreateModel_quant8_2(Model * model)314 void CreateModel_quant8_2(Model *model) {
315 OperandType type18(Type::TENSOR_QUANT8_ASYMM, {1}, 0.5f, 127);
316 OperandType type2(Type::TENSOR_INT32, {1});
317 OperandType type3(Type::BOOL, {});
318 // Phase 1, operands
319 auto input01 = model->addOperand(&type18);
320 auto param2 = model->addOperand(&type2);
321 auto param3 = model->addOperand(&type3);
322 auto output01 = model->addOperand(&type18);
323 // Phase 2, operations
324 static int32_t param2_init[] = {0};
325 model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
326 static bool8 param3_init[] = {true};
327 model->setOperandValue(param3, param3_init, sizeof(bool8) * 1);
328 model->addOperation(ANEURALNETWORKS_REDUCE_MAX, {input01, param2, param3}, {output01});
329 // Phase 3, inputs and outputs
330 model->identifyInputsAndOutputs(
331 {input01},
332 {output01});
333 assert(model->isValid());
334 }
335
is_ignored_quant8_2(int i)336 inline bool is_ignored_quant8_2(int i) {
337 static std::set<int> ignore = {};
338 return ignore.find(i) != ignore.end();
339 }
340
CreateModel_dynamic_output_shape_2(Model * model)341 void CreateModel_dynamic_output_shape_2(Model *model) {
342 OperandType type14(Type::TENSOR_FLOAT32, {0});
343 OperandType type2(Type::TENSOR_INT32, {1});
344 OperandType type3(Type::BOOL, {});
345 OperandType type4(Type::TENSOR_FLOAT32, {1});
346 // Phase 1, operands
347 auto input01 = model->addOperand(&type4);
348 auto param2 = model->addOperand(&type2);
349 auto param3 = model->addOperand(&type3);
350 auto output01 = model->addOperand(&type14);
351 // Phase 2, operations
352 static int32_t param2_init[] = {0};
353 model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
354 static bool8 param3_init[] = {true};
355 model->setOperandValue(param3, param3_init, sizeof(bool8) * 1);
356 model->addOperation(ANEURALNETWORKS_REDUCE_MAX, {input01, param2, param3}, {output01});
357 // Phase 3, inputs and outputs
358 model->identifyInputsAndOutputs(
359 {input01},
360 {output01});
361 assert(model->isValid());
362 }
363
is_ignored_dynamic_output_shape_2(int i)364 inline bool is_ignored_dynamic_output_shape_2(int i) {
365 static std::set<int> ignore = {};
366 return ignore.find(i) != ignore.end();
367 }
368
CreateModel_dynamic_output_shape_relaxed_2(Model * model)369 void CreateModel_dynamic_output_shape_relaxed_2(Model *model) {
370 OperandType type14(Type::TENSOR_FLOAT32, {0});
371 OperandType type2(Type::TENSOR_INT32, {1});
372 OperandType type3(Type::BOOL, {});
373 OperandType type4(Type::TENSOR_FLOAT32, {1});
374 // Phase 1, operands
375 auto input01 = model->addOperand(&type4);
376 auto param2 = model->addOperand(&type2);
377 auto param3 = model->addOperand(&type3);
378 auto output01 = model->addOperand(&type14);
379 // Phase 2, operations
380 static int32_t param2_init[] = {0};
381 model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
382 static bool8 param3_init[] = {true};
383 model->setOperandValue(param3, param3_init, sizeof(bool8) * 1);
384 model->addOperation(ANEURALNETWORKS_REDUCE_MAX, {input01, param2, param3}, {output01});
385 // Phase 3, inputs and outputs
386 model->identifyInputsAndOutputs(
387 {input01},
388 {output01});
389 // Phase 4: set relaxed execution
390 model->relaxComputationFloat32toFloat16(true);
391 assert(model->isValid());
392 }
393
is_ignored_dynamic_output_shape_relaxed_2(int i)394 inline bool is_ignored_dynamic_output_shape_relaxed_2(int i) {
395 static std::set<int> ignore = {};
396 return ignore.find(i) != ignore.end();
397 }
398
CreateModel_dynamic_output_shape_float16_2(Model * model)399 void CreateModel_dynamic_output_shape_float16_2(Model *model) {
400 OperandType type15(Type::TENSOR_FLOAT16, {0});
401 OperandType type17(Type::TENSOR_FLOAT16, {1});
402 OperandType type2(Type::TENSOR_INT32, {1});
403 OperandType type3(Type::BOOL, {});
404 // Phase 1, operands
405 auto input01 = model->addOperand(&type17);
406 auto param2 = model->addOperand(&type2);
407 auto param3 = model->addOperand(&type3);
408 auto output01 = model->addOperand(&type15);
409 // Phase 2, operations
410 static int32_t param2_init[] = {0};
411 model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
412 static bool8 param3_init[] = {true};
413 model->setOperandValue(param3, param3_init, sizeof(bool8) * 1);
414 model->addOperation(ANEURALNETWORKS_REDUCE_MAX, {input01, param2, param3}, {output01});
415 // Phase 3, inputs and outputs
416 model->identifyInputsAndOutputs(
417 {input01},
418 {output01});
419 assert(model->isValid());
420 }
421
is_ignored_dynamic_output_shape_float16_2(int i)422 inline bool is_ignored_dynamic_output_shape_float16_2(int i) {
423 static std::set<int> ignore = {};
424 return ignore.find(i) != ignore.end();
425 }
426
CreateModel_dynamic_output_shape_quant8_2(Model * model)427 void CreateModel_dynamic_output_shape_quant8_2(Model *model) {
428 OperandType type16(Type::TENSOR_QUANT8_ASYMM, {0}, 0.5f, 127);
429 OperandType type18(Type::TENSOR_QUANT8_ASYMM, {1}, 0.5f, 127);
430 OperandType type2(Type::TENSOR_INT32, {1});
431 OperandType type3(Type::BOOL, {});
432 // Phase 1, operands
433 auto input01 = model->addOperand(&type18);
434 auto param2 = model->addOperand(&type2);
435 auto param3 = model->addOperand(&type3);
436 auto output01 = model->addOperand(&type16);
437 // Phase 2, operations
438 static int32_t param2_init[] = {0};
439 model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
440 static bool8 param3_init[] = {true};
441 model->setOperandValue(param3, param3_init, sizeof(bool8) * 1);
442 model->addOperation(ANEURALNETWORKS_REDUCE_MAX, {input01, param2, param3}, {output01});
443 // Phase 3, inputs and outputs
444 model->identifyInputsAndOutputs(
445 {input01},
446 {output01});
447 assert(model->isValid());
448 }
449
is_ignored_dynamic_output_shape_quant8_2(int i)450 inline bool is_ignored_dynamic_output_shape_quant8_2(int i) {
451 static std::set<int> ignore = {};
452 return ignore.find(i) != ignore.end();
453 }
454
CreateModel_3(Model * model)455 void CreateModel_3(Model *model) {
456 OperandType type3(Type::BOOL, {});
457 OperandType type5(Type::TENSOR_FLOAT32, {4, 3, 2});
458 OperandType type6(Type::TENSOR_FLOAT32, {2});
459 OperandType type7(Type::TENSOR_INT32, {4});
460 // Phase 1, operands
461 auto input02 = model->addOperand(&type5);
462 auto param4 = model->addOperand(&type7);
463 auto param5 = model->addOperand(&type3);
464 auto output02 = model->addOperand(&type6);
465 // Phase 2, operations
466 static int32_t param4_init[] = {1, 0, -3, -3};
467 model->setOperandValue(param4, param4_init, sizeof(int32_t) * 4);
468 static bool8 param5_init[] = {false};
469 model->setOperandValue(param5, param5_init, sizeof(bool8) * 1);
470 model->addOperation(ANEURALNETWORKS_REDUCE_MAX, {input02, param4, param5}, {output02});
471 // Phase 3, inputs and outputs
472 model->identifyInputsAndOutputs(
473 {input02},
474 {output02});
475 assert(model->isValid());
476 }
477
is_ignored_3(int i)478 inline bool is_ignored_3(int i) {
479 static std::set<int> ignore = {};
480 return ignore.find(i) != ignore.end();
481 }
482
CreateModel_relaxed_3(Model * model)483 void CreateModel_relaxed_3(Model *model) {
484 OperandType type3(Type::BOOL, {});
485 OperandType type5(Type::TENSOR_FLOAT32, {4, 3, 2});
486 OperandType type6(Type::TENSOR_FLOAT32, {2});
487 OperandType type7(Type::TENSOR_INT32, {4});
488 // Phase 1, operands
489 auto input02 = model->addOperand(&type5);
490 auto param4 = model->addOperand(&type7);
491 auto param5 = model->addOperand(&type3);
492 auto output02 = model->addOperand(&type6);
493 // Phase 2, operations
494 static int32_t param4_init[] = {1, 0, -3, -3};
495 model->setOperandValue(param4, param4_init, sizeof(int32_t) * 4);
496 static bool8 param5_init[] = {false};
497 model->setOperandValue(param5, param5_init, sizeof(bool8) * 1);
498 model->addOperation(ANEURALNETWORKS_REDUCE_MAX, {input02, param4, param5}, {output02});
499 // Phase 3, inputs and outputs
500 model->identifyInputsAndOutputs(
501 {input02},
502 {output02});
503 // Phase 4: set relaxed execution
504 model->relaxComputationFloat32toFloat16(true);
505 assert(model->isValid());
506 }
507
is_ignored_relaxed_3(int i)508 inline bool is_ignored_relaxed_3(int i) {
509 static std::set<int> ignore = {};
510 return ignore.find(i) != ignore.end();
511 }
512
CreateModel_float16_3(Model * model)513 void CreateModel_float16_3(Model *model) {
514 OperandType type19(Type::TENSOR_FLOAT16, {4, 3, 2});
515 OperandType type20(Type::TENSOR_FLOAT16, {2});
516 OperandType type3(Type::BOOL, {});
517 OperandType type7(Type::TENSOR_INT32, {4});
518 // Phase 1, operands
519 auto input02 = model->addOperand(&type19);
520 auto param4 = model->addOperand(&type7);
521 auto param5 = model->addOperand(&type3);
522 auto output02 = model->addOperand(&type20);
523 // Phase 2, operations
524 static int32_t param4_init[] = {1, 0, -3, -3};
525 model->setOperandValue(param4, param4_init, sizeof(int32_t) * 4);
526 static bool8 param5_init[] = {false};
527 model->setOperandValue(param5, param5_init, sizeof(bool8) * 1);
528 model->addOperation(ANEURALNETWORKS_REDUCE_MAX, {input02, param4, param5}, {output02});
529 // Phase 3, inputs and outputs
530 model->identifyInputsAndOutputs(
531 {input02},
532 {output02});
533 assert(model->isValid());
534 }
535
is_ignored_float16_3(int i)536 inline bool is_ignored_float16_3(int i) {
537 static std::set<int> ignore = {};
538 return ignore.find(i) != ignore.end();
539 }
540
CreateModel_quant8_3(Model * model)541 void CreateModel_quant8_3(Model *model) {
542 OperandType type21(Type::TENSOR_QUANT8_ASYMM, {4, 3, 2}, 0.5f, 127);
543 OperandType type22(Type::TENSOR_QUANT8_ASYMM, {2}, 0.5f, 127);
544 OperandType type3(Type::BOOL, {});
545 OperandType type7(Type::TENSOR_INT32, {4});
546 // Phase 1, operands
547 auto input02 = model->addOperand(&type21);
548 auto param4 = model->addOperand(&type7);
549 auto param5 = model->addOperand(&type3);
550 auto output02 = model->addOperand(&type22);
551 // Phase 2, operations
552 static int32_t param4_init[] = {1, 0, -3, -3};
553 model->setOperandValue(param4, param4_init, sizeof(int32_t) * 4);
554 static bool8 param5_init[] = {false};
555 model->setOperandValue(param5, param5_init, sizeof(bool8) * 1);
556 model->addOperation(ANEURALNETWORKS_REDUCE_MAX, {input02, param4, param5}, {output02});
557 // Phase 3, inputs and outputs
558 model->identifyInputsAndOutputs(
559 {input02},
560 {output02});
561 assert(model->isValid());
562 }
563
is_ignored_quant8_3(int i)564 inline bool is_ignored_quant8_3(int i) {
565 static std::set<int> ignore = {};
566 return ignore.find(i) != ignore.end();
567 }
568
CreateModel_dynamic_output_shape_3(Model * model)569 void CreateModel_dynamic_output_shape_3(Model *model) {
570 OperandType type14(Type::TENSOR_FLOAT32, {0});
571 OperandType type3(Type::BOOL, {});
572 OperandType type5(Type::TENSOR_FLOAT32, {4, 3, 2});
573 OperandType type7(Type::TENSOR_INT32, {4});
574 // Phase 1, operands
575 auto input02 = model->addOperand(&type5);
576 auto param4 = model->addOperand(&type7);
577 auto param5 = model->addOperand(&type3);
578 auto output02 = model->addOperand(&type14);
579 // Phase 2, operations
580 static int32_t param4_init[] = {1, 0, -3, -3};
581 model->setOperandValue(param4, param4_init, sizeof(int32_t) * 4);
582 static bool8 param5_init[] = {false};
583 model->setOperandValue(param5, param5_init, sizeof(bool8) * 1);
584 model->addOperation(ANEURALNETWORKS_REDUCE_MAX, {input02, param4, param5}, {output02});
585 // Phase 3, inputs and outputs
586 model->identifyInputsAndOutputs(
587 {input02},
588 {output02});
589 assert(model->isValid());
590 }
591
is_ignored_dynamic_output_shape_3(int i)592 inline bool is_ignored_dynamic_output_shape_3(int i) {
593 static std::set<int> ignore = {};
594 return ignore.find(i) != ignore.end();
595 }
596
CreateModel_dynamic_output_shape_relaxed_3(Model * model)597 void CreateModel_dynamic_output_shape_relaxed_3(Model *model) {
598 OperandType type14(Type::TENSOR_FLOAT32, {0});
599 OperandType type3(Type::BOOL, {});
600 OperandType type5(Type::TENSOR_FLOAT32, {4, 3, 2});
601 OperandType type7(Type::TENSOR_INT32, {4});
602 // Phase 1, operands
603 auto input02 = model->addOperand(&type5);
604 auto param4 = model->addOperand(&type7);
605 auto param5 = model->addOperand(&type3);
606 auto output02 = model->addOperand(&type14);
607 // Phase 2, operations
608 static int32_t param4_init[] = {1, 0, -3, -3};
609 model->setOperandValue(param4, param4_init, sizeof(int32_t) * 4);
610 static bool8 param5_init[] = {false};
611 model->setOperandValue(param5, param5_init, sizeof(bool8) * 1);
612 model->addOperation(ANEURALNETWORKS_REDUCE_MAX, {input02, param4, param5}, {output02});
613 // Phase 3, inputs and outputs
614 model->identifyInputsAndOutputs(
615 {input02},
616 {output02});
617 // Phase 4: set relaxed execution
618 model->relaxComputationFloat32toFloat16(true);
619 assert(model->isValid());
620 }
621
is_ignored_dynamic_output_shape_relaxed_3(int i)622 inline bool is_ignored_dynamic_output_shape_relaxed_3(int i) {
623 static std::set<int> ignore = {};
624 return ignore.find(i) != ignore.end();
625 }
626
CreateModel_dynamic_output_shape_float16_3(Model * model)627 void CreateModel_dynamic_output_shape_float16_3(Model *model) {
628 OperandType type15(Type::TENSOR_FLOAT16, {0});
629 OperandType type19(Type::TENSOR_FLOAT16, {4, 3, 2});
630 OperandType type3(Type::BOOL, {});
631 OperandType type7(Type::TENSOR_INT32, {4});
632 // Phase 1, operands
633 auto input02 = model->addOperand(&type19);
634 auto param4 = model->addOperand(&type7);
635 auto param5 = model->addOperand(&type3);
636 auto output02 = model->addOperand(&type15);
637 // Phase 2, operations
638 static int32_t param4_init[] = {1, 0, -3, -3};
639 model->setOperandValue(param4, param4_init, sizeof(int32_t) * 4);
640 static bool8 param5_init[] = {false};
641 model->setOperandValue(param5, param5_init, sizeof(bool8) * 1);
642 model->addOperation(ANEURALNETWORKS_REDUCE_MAX, {input02, param4, param5}, {output02});
643 // Phase 3, inputs and outputs
644 model->identifyInputsAndOutputs(
645 {input02},
646 {output02});
647 assert(model->isValid());
648 }
649
is_ignored_dynamic_output_shape_float16_3(int i)650 inline bool is_ignored_dynamic_output_shape_float16_3(int i) {
651 static std::set<int> ignore = {};
652 return ignore.find(i) != ignore.end();
653 }
654
CreateModel_dynamic_output_shape_quant8_3(Model * model)655 void CreateModel_dynamic_output_shape_quant8_3(Model *model) {
656 OperandType type16(Type::TENSOR_QUANT8_ASYMM, {0}, 0.5f, 127);
657 OperandType type21(Type::TENSOR_QUANT8_ASYMM, {4, 3, 2}, 0.5f, 127);
658 OperandType type3(Type::BOOL, {});
659 OperandType type7(Type::TENSOR_INT32, {4});
660 // Phase 1, operands
661 auto input02 = model->addOperand(&type21);
662 auto param4 = model->addOperand(&type7);
663 auto param5 = model->addOperand(&type3);
664 auto output02 = model->addOperand(&type16);
665 // Phase 2, operations
666 static int32_t param4_init[] = {1, 0, -3, -3};
667 model->setOperandValue(param4, param4_init, sizeof(int32_t) * 4);
668 static bool8 param5_init[] = {false};
669 model->setOperandValue(param5, param5_init, sizeof(bool8) * 1);
670 model->addOperation(ANEURALNETWORKS_REDUCE_MAX, {input02, param4, param5}, {output02});
671 // Phase 3, inputs and outputs
672 model->identifyInputsAndOutputs(
673 {input02},
674 {output02});
675 assert(model->isValid());
676 }
677
is_ignored_dynamic_output_shape_quant8_3(int i)678 inline bool is_ignored_dynamic_output_shape_quant8_3(int i) {
679 static std::set<int> ignore = {};
680 return ignore.find(i) != ignore.end();
681 }
682
CreateModel_4(Model * model)683 void CreateModel_4(Model *model) {
684 OperandType type3(Type::BOOL, {});
685 OperandType type5(Type::TENSOR_FLOAT32, {4, 3, 2});
686 OperandType type8(Type::TENSOR_FLOAT32, {1, 3, 1});
687 OperandType type9(Type::TENSOR_INT32, {2});
688 // Phase 1, operands
689 auto input03 = model->addOperand(&type5);
690 auto param6 = model->addOperand(&type9);
691 auto param7 = model->addOperand(&type3);
692 auto output03 = model->addOperand(&type8);
693 // Phase 2, operations
694 static int32_t param6_init[] = {0, 2};
695 model->setOperandValue(param6, param6_init, sizeof(int32_t) * 2);
696 static bool8 param7_init[] = {true};
697 model->setOperandValue(param7, param7_init, sizeof(bool8) * 1);
698 model->addOperation(ANEURALNETWORKS_REDUCE_MAX, {input03, param6, param7}, {output03});
699 // Phase 3, inputs and outputs
700 model->identifyInputsAndOutputs(
701 {input03},
702 {output03});
703 assert(model->isValid());
704 }
705
is_ignored_4(int i)706 inline bool is_ignored_4(int i) {
707 static std::set<int> ignore = {};
708 return ignore.find(i) != ignore.end();
709 }
710
CreateModel_relaxed_4(Model * model)711 void CreateModel_relaxed_4(Model *model) {
712 OperandType type3(Type::BOOL, {});
713 OperandType type5(Type::TENSOR_FLOAT32, {4, 3, 2});
714 OperandType type8(Type::TENSOR_FLOAT32, {1, 3, 1});
715 OperandType type9(Type::TENSOR_INT32, {2});
716 // Phase 1, operands
717 auto input03 = model->addOperand(&type5);
718 auto param6 = model->addOperand(&type9);
719 auto param7 = model->addOperand(&type3);
720 auto output03 = model->addOperand(&type8);
721 // Phase 2, operations
722 static int32_t param6_init[] = {0, 2};
723 model->setOperandValue(param6, param6_init, sizeof(int32_t) * 2);
724 static bool8 param7_init[] = {true};
725 model->setOperandValue(param7, param7_init, sizeof(bool8) * 1);
726 model->addOperation(ANEURALNETWORKS_REDUCE_MAX, {input03, param6, param7}, {output03});
727 // Phase 3, inputs and outputs
728 model->identifyInputsAndOutputs(
729 {input03},
730 {output03});
731 // Phase 4: set relaxed execution
732 model->relaxComputationFloat32toFloat16(true);
733 assert(model->isValid());
734 }
735
is_ignored_relaxed_4(int i)736 inline bool is_ignored_relaxed_4(int i) {
737 static std::set<int> ignore = {};
738 return ignore.find(i) != ignore.end();
739 }
740
CreateModel_float16_4(Model * model)741 void CreateModel_float16_4(Model *model) {
742 OperandType type19(Type::TENSOR_FLOAT16, {4, 3, 2});
743 OperandType type23(Type::TENSOR_FLOAT16, {1, 3, 1});
744 OperandType type3(Type::BOOL, {});
745 OperandType type9(Type::TENSOR_INT32, {2});
746 // Phase 1, operands
747 auto input03 = model->addOperand(&type19);
748 auto param6 = model->addOperand(&type9);
749 auto param7 = model->addOperand(&type3);
750 auto output03 = model->addOperand(&type23);
751 // Phase 2, operations
752 static int32_t param6_init[] = {0, 2};
753 model->setOperandValue(param6, param6_init, sizeof(int32_t) * 2);
754 static bool8 param7_init[] = {true};
755 model->setOperandValue(param7, param7_init, sizeof(bool8) * 1);
756 model->addOperation(ANEURALNETWORKS_REDUCE_MAX, {input03, param6, param7}, {output03});
757 // Phase 3, inputs and outputs
758 model->identifyInputsAndOutputs(
759 {input03},
760 {output03});
761 assert(model->isValid());
762 }
763
is_ignored_float16_4(int i)764 inline bool is_ignored_float16_4(int i) {
765 static std::set<int> ignore = {};
766 return ignore.find(i) != ignore.end();
767 }
768
CreateModel_quant8_4(Model * model)769 void CreateModel_quant8_4(Model *model) {
770 OperandType type21(Type::TENSOR_QUANT8_ASYMM, {4, 3, 2}, 0.5f, 127);
771 OperandType type24(Type::TENSOR_QUANT8_ASYMM, {1, 3, 1}, 0.5f, 127);
772 OperandType type3(Type::BOOL, {});
773 OperandType type9(Type::TENSOR_INT32, {2});
774 // Phase 1, operands
775 auto input03 = model->addOperand(&type21);
776 auto param6 = model->addOperand(&type9);
777 auto param7 = model->addOperand(&type3);
778 auto output03 = model->addOperand(&type24);
779 // Phase 2, operations
780 static int32_t param6_init[] = {0, 2};
781 model->setOperandValue(param6, param6_init, sizeof(int32_t) * 2);
782 static bool8 param7_init[] = {true};
783 model->setOperandValue(param7, param7_init, sizeof(bool8) * 1);
784 model->addOperation(ANEURALNETWORKS_REDUCE_MAX, {input03, param6, param7}, {output03});
785 // Phase 3, inputs and outputs
786 model->identifyInputsAndOutputs(
787 {input03},
788 {output03});
789 assert(model->isValid());
790 }
791
is_ignored_quant8_4(int i)792 inline bool is_ignored_quant8_4(int i) {
793 static std::set<int> ignore = {};
794 return ignore.find(i) != ignore.end();
795 }
796
CreateModel_dynamic_output_shape_4(Model * model)797 void CreateModel_dynamic_output_shape_4(Model *model) {
798 OperandType type25(Type::TENSOR_FLOAT32, {0, 0, 0});
799 OperandType type3(Type::BOOL, {});
800 OperandType type5(Type::TENSOR_FLOAT32, {4, 3, 2});
801 OperandType type9(Type::TENSOR_INT32, {2});
802 // Phase 1, operands
803 auto input03 = model->addOperand(&type5);
804 auto param6 = model->addOperand(&type9);
805 auto param7 = model->addOperand(&type3);
806 auto output03 = model->addOperand(&type25);
807 // Phase 2, operations
808 static int32_t param6_init[] = {0, 2};
809 model->setOperandValue(param6, param6_init, sizeof(int32_t) * 2);
810 static bool8 param7_init[] = {true};
811 model->setOperandValue(param7, param7_init, sizeof(bool8) * 1);
812 model->addOperation(ANEURALNETWORKS_REDUCE_MAX, {input03, param6, param7}, {output03});
813 // Phase 3, inputs and outputs
814 model->identifyInputsAndOutputs(
815 {input03},
816 {output03});
817 assert(model->isValid());
818 }
819
is_ignored_dynamic_output_shape_4(int i)820 inline bool is_ignored_dynamic_output_shape_4(int i) {
821 static std::set<int> ignore = {};
822 return ignore.find(i) != ignore.end();
823 }
824
CreateModel_dynamic_output_shape_relaxed_4(Model * model)825 void CreateModel_dynamic_output_shape_relaxed_4(Model *model) {
826 OperandType type25(Type::TENSOR_FLOAT32, {0, 0, 0});
827 OperandType type3(Type::BOOL, {});
828 OperandType type5(Type::TENSOR_FLOAT32, {4, 3, 2});
829 OperandType type9(Type::TENSOR_INT32, {2});
830 // Phase 1, operands
831 auto input03 = model->addOperand(&type5);
832 auto param6 = model->addOperand(&type9);
833 auto param7 = model->addOperand(&type3);
834 auto output03 = model->addOperand(&type25);
835 // Phase 2, operations
836 static int32_t param6_init[] = {0, 2};
837 model->setOperandValue(param6, param6_init, sizeof(int32_t) * 2);
838 static bool8 param7_init[] = {true};
839 model->setOperandValue(param7, param7_init, sizeof(bool8) * 1);
840 model->addOperation(ANEURALNETWORKS_REDUCE_MAX, {input03, param6, param7}, {output03});
841 // Phase 3, inputs and outputs
842 model->identifyInputsAndOutputs(
843 {input03},
844 {output03});
845 // Phase 4: set relaxed execution
846 model->relaxComputationFloat32toFloat16(true);
847 assert(model->isValid());
848 }
849
is_ignored_dynamic_output_shape_relaxed_4(int i)850 inline bool is_ignored_dynamic_output_shape_relaxed_4(int i) {
851 static std::set<int> ignore = {};
852 return ignore.find(i) != ignore.end();
853 }
854
CreateModel_dynamic_output_shape_float16_4(Model * model)855 void CreateModel_dynamic_output_shape_float16_4(Model *model) {
856 OperandType type19(Type::TENSOR_FLOAT16, {4, 3, 2});
857 OperandType type26(Type::TENSOR_FLOAT16, {0, 0, 0});
858 OperandType type3(Type::BOOL, {});
859 OperandType type9(Type::TENSOR_INT32, {2});
860 // Phase 1, operands
861 auto input03 = model->addOperand(&type19);
862 auto param6 = model->addOperand(&type9);
863 auto param7 = model->addOperand(&type3);
864 auto output03 = model->addOperand(&type26);
865 // Phase 2, operations
866 static int32_t param6_init[] = {0, 2};
867 model->setOperandValue(param6, param6_init, sizeof(int32_t) * 2);
868 static bool8 param7_init[] = {true};
869 model->setOperandValue(param7, param7_init, sizeof(bool8) * 1);
870 model->addOperation(ANEURALNETWORKS_REDUCE_MAX, {input03, param6, param7}, {output03});
871 // Phase 3, inputs and outputs
872 model->identifyInputsAndOutputs(
873 {input03},
874 {output03});
875 assert(model->isValid());
876 }
877
is_ignored_dynamic_output_shape_float16_4(int i)878 inline bool is_ignored_dynamic_output_shape_float16_4(int i) {
879 static std::set<int> ignore = {};
880 return ignore.find(i) != ignore.end();
881 }
882
CreateModel_dynamic_output_shape_quant8_4(Model * model)883 void CreateModel_dynamic_output_shape_quant8_4(Model *model) {
884 OperandType type21(Type::TENSOR_QUANT8_ASYMM, {4, 3, 2}, 0.5f, 127);
885 OperandType type27(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0}, 0.5f, 127);
886 OperandType type3(Type::BOOL, {});
887 OperandType type9(Type::TENSOR_INT32, {2});
888 // Phase 1, operands
889 auto input03 = model->addOperand(&type21);
890 auto param6 = model->addOperand(&type9);
891 auto param7 = model->addOperand(&type3);
892 auto output03 = model->addOperand(&type27);
893 // Phase 2, operations
894 static int32_t param6_init[] = {0, 2};
895 model->setOperandValue(param6, param6_init, sizeof(int32_t) * 2);
896 static bool8 param7_init[] = {true};
897 model->setOperandValue(param7, param7_init, sizeof(bool8) * 1);
898 model->addOperation(ANEURALNETWORKS_REDUCE_MAX, {input03, param6, param7}, {output03});
899 // Phase 3, inputs and outputs
900 model->identifyInputsAndOutputs(
901 {input03},
902 {output03});
903 assert(model->isValid());
904 }
905
is_ignored_dynamic_output_shape_quant8_4(int i)906 inline bool is_ignored_dynamic_output_shape_quant8_4(int i) {
907 static std::set<int> ignore = {};
908 return ignore.find(i) != ignore.end();
909 }
910
911