1 // clang-format off
2 // Generated file (from: reduce_prod.mod.py). Do not edit
CreateModel(Model * model)3 void CreateModel(Model *model) {
4 OperandType type0(Type::TENSOR_FLOAT32, {3, 2});
5 OperandType type1(Type::TENSOR_FLOAT32, {3});
6 OperandType type2(Type::TENSOR_INT32, {1});
7 OperandType type3(Type::BOOL, {});
8 // Phase 1, operands
9 auto input0 = model->addOperand(&type0);
10 auto param = model->addOperand(&type2);
11 auto param1 = model->addOperand(&type3);
12 auto output0 = model->addOperand(&type1);
13 // Phase 2, operations
14 static int32_t param_init[] = {-1};
15 model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
16 static bool8 param1_init[] = {false};
17 model->setOperandValue(param1, param1_init, sizeof(bool8) * 1);
18 model->addOperation(ANEURALNETWORKS_REDUCE_PROD, {input0, param, param1}, {output0});
19 // Phase 3, inputs and outputs
20 model->identifyInputsAndOutputs(
21 {input0},
22 {output0});
23 assert(model->isValid());
24 }
25
is_ignored(int i)26 inline bool is_ignored(int i) {
27 static std::set<int> ignore = {};
28 return ignore.find(i) != ignore.end();
29 }
30
CreateModel_relaxed(Model * model)31 void CreateModel_relaxed(Model *model) {
32 OperandType type0(Type::TENSOR_FLOAT32, {3, 2});
33 OperandType type1(Type::TENSOR_FLOAT32, {3});
34 OperandType type2(Type::TENSOR_INT32, {1});
35 OperandType type3(Type::BOOL, {});
36 // Phase 1, operands
37 auto input0 = model->addOperand(&type0);
38 auto param = model->addOperand(&type2);
39 auto param1 = model->addOperand(&type3);
40 auto output0 = model->addOperand(&type1);
41 // Phase 2, operations
42 static int32_t param_init[] = {-1};
43 model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
44 static bool8 param1_init[] = {false};
45 model->setOperandValue(param1, param1_init, sizeof(bool8) * 1);
46 model->addOperation(ANEURALNETWORKS_REDUCE_PROD, {input0, param, param1}, {output0});
47 // Phase 3, inputs and outputs
48 model->identifyInputsAndOutputs(
49 {input0},
50 {output0});
51 // Phase 4: set relaxed execution
52 model->relaxComputationFloat32toFloat16(true);
53 assert(model->isValid());
54 }
55
is_ignored_relaxed(int i)56 inline bool is_ignored_relaxed(int i) {
57 static std::set<int> ignore = {};
58 return ignore.find(i) != ignore.end();
59 }
60
CreateModel_float16(Model * model)61 void CreateModel_float16(Model *model) {
62 OperandType type10(Type::TENSOR_FLOAT16, {3, 2});
63 OperandType type11(Type::TENSOR_FLOAT16, {3});
64 OperandType type2(Type::TENSOR_INT32, {1});
65 OperandType type3(Type::BOOL, {});
66 // Phase 1, operands
67 auto input0 = model->addOperand(&type10);
68 auto param = model->addOperand(&type2);
69 auto param1 = model->addOperand(&type3);
70 auto output0 = model->addOperand(&type11);
71 // Phase 2, operations
72 static int32_t param_init[] = {-1};
73 model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
74 static bool8 param1_init[] = {false};
75 model->setOperandValue(param1, param1_init, sizeof(bool8) * 1);
76 model->addOperation(ANEURALNETWORKS_REDUCE_PROD, {input0, param, param1}, {output0});
77 // Phase 3, inputs and outputs
78 model->identifyInputsAndOutputs(
79 {input0},
80 {output0});
81 assert(model->isValid());
82 }
83
is_ignored_float16(int i)84 inline bool is_ignored_float16(int i) {
85 static std::set<int> ignore = {};
86 return ignore.find(i) != ignore.end();
87 }
88
CreateModel_dynamic_output_shape(Model * model)89 void CreateModel_dynamic_output_shape(Model *model) {
90 OperandType type0(Type::TENSOR_FLOAT32, {3, 2});
91 OperandType type12(Type::TENSOR_FLOAT32, {0});
92 OperandType type2(Type::TENSOR_INT32, {1});
93 OperandType type3(Type::BOOL, {});
94 // Phase 1, operands
95 auto input0 = model->addOperand(&type0);
96 auto param = model->addOperand(&type2);
97 auto param1 = model->addOperand(&type3);
98 auto output0 = model->addOperand(&type12);
99 // Phase 2, operations
100 static int32_t param_init[] = {-1};
101 model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
102 static bool8 param1_init[] = {false};
103 model->setOperandValue(param1, param1_init, sizeof(bool8) * 1);
104 model->addOperation(ANEURALNETWORKS_REDUCE_PROD, {input0, param, param1}, {output0});
105 // Phase 3, inputs and outputs
106 model->identifyInputsAndOutputs(
107 {input0},
108 {output0});
109 assert(model->isValid());
110 }
111
is_ignored_dynamic_output_shape(int i)112 inline bool is_ignored_dynamic_output_shape(int i) {
113 static std::set<int> ignore = {};
114 return ignore.find(i) != ignore.end();
115 }
116
CreateModel_dynamic_output_shape_relaxed(Model * model)117 void CreateModel_dynamic_output_shape_relaxed(Model *model) {
118 OperandType type0(Type::TENSOR_FLOAT32, {3, 2});
119 OperandType type12(Type::TENSOR_FLOAT32, {0});
120 OperandType type2(Type::TENSOR_INT32, {1});
121 OperandType type3(Type::BOOL, {});
122 // Phase 1, operands
123 auto input0 = model->addOperand(&type0);
124 auto param = model->addOperand(&type2);
125 auto param1 = model->addOperand(&type3);
126 auto output0 = model->addOperand(&type12);
127 // Phase 2, operations
128 static int32_t param_init[] = {-1};
129 model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
130 static bool8 param1_init[] = {false};
131 model->setOperandValue(param1, param1_init, sizeof(bool8) * 1);
132 model->addOperation(ANEURALNETWORKS_REDUCE_PROD, {input0, param, param1}, {output0});
133 // Phase 3, inputs and outputs
134 model->identifyInputsAndOutputs(
135 {input0},
136 {output0});
137 // Phase 4: set relaxed execution
138 model->relaxComputationFloat32toFloat16(true);
139 assert(model->isValid());
140 }
141
is_ignored_dynamic_output_shape_relaxed(int i)142 inline bool is_ignored_dynamic_output_shape_relaxed(int i) {
143 static std::set<int> ignore = {};
144 return ignore.find(i) != ignore.end();
145 }
146
CreateModel_dynamic_output_shape_float16(Model * model)147 void CreateModel_dynamic_output_shape_float16(Model *model) {
148 OperandType type10(Type::TENSOR_FLOAT16, {3, 2});
149 OperandType type13(Type::TENSOR_FLOAT16, {0});
150 OperandType type2(Type::TENSOR_INT32, {1});
151 OperandType type3(Type::BOOL, {});
152 // Phase 1, operands
153 auto input0 = model->addOperand(&type10);
154 auto param = model->addOperand(&type2);
155 auto param1 = model->addOperand(&type3);
156 auto output0 = model->addOperand(&type13);
157 // Phase 2, operations
158 static int32_t param_init[] = {-1};
159 model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
160 static bool8 param1_init[] = {false};
161 model->setOperandValue(param1, param1_init, sizeof(bool8) * 1);
162 model->addOperation(ANEURALNETWORKS_REDUCE_PROD, {input0, param, param1}, {output0});
163 // Phase 3, inputs and outputs
164 model->identifyInputsAndOutputs(
165 {input0},
166 {output0});
167 assert(model->isValid());
168 }
169
is_ignored_dynamic_output_shape_float16(int i)170 inline bool is_ignored_dynamic_output_shape_float16(int i) {
171 static std::set<int> ignore = {};
172 return ignore.find(i) != ignore.end();
173 }
174
CreateModel_2(Model * model)175 void CreateModel_2(Model *model) {
176 OperandType type2(Type::TENSOR_INT32, {1});
177 OperandType type3(Type::BOOL, {});
178 OperandType type4(Type::TENSOR_FLOAT32, {1});
179 // Phase 1, operands
180 auto input01 = model->addOperand(&type4);
181 auto param2 = model->addOperand(&type2);
182 auto param3 = model->addOperand(&type3);
183 auto output01 = model->addOperand(&type4);
184 // Phase 2, operations
185 static int32_t param2_init[] = {0};
186 model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
187 static bool8 param3_init[] = {true};
188 model->setOperandValue(param3, param3_init, sizeof(bool8) * 1);
189 model->addOperation(ANEURALNETWORKS_REDUCE_PROD, {input01, param2, param3}, {output01});
190 // Phase 3, inputs and outputs
191 model->identifyInputsAndOutputs(
192 {input01},
193 {output01});
194 assert(model->isValid());
195 }
196
is_ignored_2(int i)197 inline bool is_ignored_2(int i) {
198 static std::set<int> ignore = {};
199 return ignore.find(i) != ignore.end();
200 }
201
CreateModel_relaxed_2(Model * model)202 void CreateModel_relaxed_2(Model *model) {
203 OperandType type2(Type::TENSOR_INT32, {1});
204 OperandType type3(Type::BOOL, {});
205 OperandType type4(Type::TENSOR_FLOAT32, {1});
206 // Phase 1, operands
207 auto input01 = model->addOperand(&type4);
208 auto param2 = model->addOperand(&type2);
209 auto param3 = model->addOperand(&type3);
210 auto output01 = model->addOperand(&type4);
211 // Phase 2, operations
212 static int32_t param2_init[] = {0};
213 model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
214 static bool8 param3_init[] = {true};
215 model->setOperandValue(param3, param3_init, sizeof(bool8) * 1);
216 model->addOperation(ANEURALNETWORKS_REDUCE_PROD, {input01, param2, param3}, {output01});
217 // Phase 3, inputs and outputs
218 model->identifyInputsAndOutputs(
219 {input01},
220 {output01});
221 // Phase 4: set relaxed execution
222 model->relaxComputationFloat32toFloat16(true);
223 assert(model->isValid());
224 }
225
is_ignored_relaxed_2(int i)226 inline bool is_ignored_relaxed_2(int i) {
227 static std::set<int> ignore = {};
228 return ignore.find(i) != ignore.end();
229 }
230
CreateModel_float16_2(Model * model)231 void CreateModel_float16_2(Model *model) {
232 OperandType type14(Type::TENSOR_FLOAT16, {1});
233 OperandType type2(Type::TENSOR_INT32, {1});
234 OperandType type3(Type::BOOL, {});
235 // Phase 1, operands
236 auto input01 = model->addOperand(&type14);
237 auto param2 = model->addOperand(&type2);
238 auto param3 = model->addOperand(&type3);
239 auto output01 = model->addOperand(&type14);
240 // Phase 2, operations
241 static int32_t param2_init[] = {0};
242 model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
243 static bool8 param3_init[] = {true};
244 model->setOperandValue(param3, param3_init, sizeof(bool8) * 1);
245 model->addOperation(ANEURALNETWORKS_REDUCE_PROD, {input01, param2, param3}, {output01});
246 // Phase 3, inputs and outputs
247 model->identifyInputsAndOutputs(
248 {input01},
249 {output01});
250 assert(model->isValid());
251 }
252
is_ignored_float16_2(int i)253 inline bool is_ignored_float16_2(int i) {
254 static std::set<int> ignore = {};
255 return ignore.find(i) != ignore.end();
256 }
257
CreateModel_dynamic_output_shape_2(Model * model)258 void CreateModel_dynamic_output_shape_2(Model *model) {
259 OperandType type12(Type::TENSOR_FLOAT32, {0});
260 OperandType type2(Type::TENSOR_INT32, {1});
261 OperandType type3(Type::BOOL, {});
262 OperandType type4(Type::TENSOR_FLOAT32, {1});
263 // Phase 1, operands
264 auto input01 = model->addOperand(&type4);
265 auto param2 = model->addOperand(&type2);
266 auto param3 = model->addOperand(&type3);
267 auto output01 = model->addOperand(&type12);
268 // Phase 2, operations
269 static int32_t param2_init[] = {0};
270 model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
271 static bool8 param3_init[] = {true};
272 model->setOperandValue(param3, param3_init, sizeof(bool8) * 1);
273 model->addOperation(ANEURALNETWORKS_REDUCE_PROD, {input01, param2, param3}, {output01});
274 // Phase 3, inputs and outputs
275 model->identifyInputsAndOutputs(
276 {input01},
277 {output01});
278 assert(model->isValid());
279 }
280
is_ignored_dynamic_output_shape_2(int i)281 inline bool is_ignored_dynamic_output_shape_2(int i) {
282 static std::set<int> ignore = {};
283 return ignore.find(i) != ignore.end();
284 }
285
CreateModel_dynamic_output_shape_relaxed_2(Model * model)286 void CreateModel_dynamic_output_shape_relaxed_2(Model *model) {
287 OperandType type12(Type::TENSOR_FLOAT32, {0});
288 OperandType type2(Type::TENSOR_INT32, {1});
289 OperandType type3(Type::BOOL, {});
290 OperandType type4(Type::TENSOR_FLOAT32, {1});
291 // Phase 1, operands
292 auto input01 = model->addOperand(&type4);
293 auto param2 = model->addOperand(&type2);
294 auto param3 = model->addOperand(&type3);
295 auto output01 = model->addOperand(&type12);
296 // Phase 2, operations
297 static int32_t param2_init[] = {0};
298 model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
299 static bool8 param3_init[] = {true};
300 model->setOperandValue(param3, param3_init, sizeof(bool8) * 1);
301 model->addOperation(ANEURALNETWORKS_REDUCE_PROD, {input01, param2, param3}, {output01});
302 // Phase 3, inputs and outputs
303 model->identifyInputsAndOutputs(
304 {input01},
305 {output01});
306 // Phase 4: set relaxed execution
307 model->relaxComputationFloat32toFloat16(true);
308 assert(model->isValid());
309 }
310
is_ignored_dynamic_output_shape_relaxed_2(int i)311 inline bool is_ignored_dynamic_output_shape_relaxed_2(int i) {
312 static std::set<int> ignore = {};
313 return ignore.find(i) != ignore.end();
314 }
315
CreateModel_dynamic_output_shape_float16_2(Model * model)316 void CreateModel_dynamic_output_shape_float16_2(Model *model) {
317 OperandType type13(Type::TENSOR_FLOAT16, {0});
318 OperandType type14(Type::TENSOR_FLOAT16, {1});
319 OperandType type2(Type::TENSOR_INT32, {1});
320 OperandType type3(Type::BOOL, {});
321 // Phase 1, operands
322 auto input01 = model->addOperand(&type14);
323 auto param2 = model->addOperand(&type2);
324 auto param3 = model->addOperand(&type3);
325 auto output01 = model->addOperand(&type13);
326 // Phase 2, operations
327 static int32_t param2_init[] = {0};
328 model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
329 static bool8 param3_init[] = {true};
330 model->setOperandValue(param3, param3_init, sizeof(bool8) * 1);
331 model->addOperation(ANEURALNETWORKS_REDUCE_PROD, {input01, param2, param3}, {output01});
332 // Phase 3, inputs and outputs
333 model->identifyInputsAndOutputs(
334 {input01},
335 {output01});
336 assert(model->isValid());
337 }
338
is_ignored_dynamic_output_shape_float16_2(int i)339 inline bool is_ignored_dynamic_output_shape_float16_2(int i) {
340 static std::set<int> ignore = {};
341 return ignore.find(i) != ignore.end();
342 }
343
CreateModel_3(Model * model)344 void CreateModel_3(Model *model) {
345 OperandType type3(Type::BOOL, {});
346 OperandType type5(Type::TENSOR_FLOAT32, {4, 3, 2});
347 OperandType type6(Type::TENSOR_FLOAT32, {2});
348 OperandType type7(Type::TENSOR_INT32, {4});
349 // Phase 1, operands
350 auto input02 = model->addOperand(&type5);
351 auto param4 = model->addOperand(&type7);
352 auto param5 = model->addOperand(&type3);
353 auto output02 = model->addOperand(&type6);
354 // Phase 2, operations
355 static int32_t param4_init[] = {1, 0, -3, -3};
356 model->setOperandValue(param4, param4_init, sizeof(int32_t) * 4);
357 static bool8 param5_init[] = {false};
358 model->setOperandValue(param5, param5_init, sizeof(bool8) * 1);
359 model->addOperation(ANEURALNETWORKS_REDUCE_PROD, {input02, param4, param5}, {output02});
360 // Phase 3, inputs and outputs
361 model->identifyInputsAndOutputs(
362 {input02},
363 {output02});
364 assert(model->isValid());
365 }
366
is_ignored_3(int i)367 inline bool is_ignored_3(int i) {
368 static std::set<int> ignore = {};
369 return ignore.find(i) != ignore.end();
370 }
371
CreateModel_relaxed_3(Model * model)372 void CreateModel_relaxed_3(Model *model) {
373 OperandType type3(Type::BOOL, {});
374 OperandType type5(Type::TENSOR_FLOAT32, {4, 3, 2});
375 OperandType type6(Type::TENSOR_FLOAT32, {2});
376 OperandType type7(Type::TENSOR_INT32, {4});
377 // Phase 1, operands
378 auto input02 = model->addOperand(&type5);
379 auto param4 = model->addOperand(&type7);
380 auto param5 = model->addOperand(&type3);
381 auto output02 = model->addOperand(&type6);
382 // Phase 2, operations
383 static int32_t param4_init[] = {1, 0, -3, -3};
384 model->setOperandValue(param4, param4_init, sizeof(int32_t) * 4);
385 static bool8 param5_init[] = {false};
386 model->setOperandValue(param5, param5_init, sizeof(bool8) * 1);
387 model->addOperation(ANEURALNETWORKS_REDUCE_PROD, {input02, param4, param5}, {output02});
388 // Phase 3, inputs and outputs
389 model->identifyInputsAndOutputs(
390 {input02},
391 {output02});
392 // Phase 4: set relaxed execution
393 model->relaxComputationFloat32toFloat16(true);
394 assert(model->isValid());
395 }
396
is_ignored_relaxed_3(int i)397 inline bool is_ignored_relaxed_3(int i) {
398 static std::set<int> ignore = {};
399 return ignore.find(i) != ignore.end();
400 }
401
CreateModel_float16_3(Model * model)402 void CreateModel_float16_3(Model *model) {
403 OperandType type15(Type::TENSOR_FLOAT16, {4, 3, 2});
404 OperandType type16(Type::TENSOR_FLOAT16, {2});
405 OperandType type3(Type::BOOL, {});
406 OperandType type7(Type::TENSOR_INT32, {4});
407 // Phase 1, operands
408 auto input02 = model->addOperand(&type15);
409 auto param4 = model->addOperand(&type7);
410 auto param5 = model->addOperand(&type3);
411 auto output02 = model->addOperand(&type16);
412 // Phase 2, operations
413 static int32_t param4_init[] = {1, 0, -3, -3};
414 model->setOperandValue(param4, param4_init, sizeof(int32_t) * 4);
415 static bool8 param5_init[] = {false};
416 model->setOperandValue(param5, param5_init, sizeof(bool8) * 1);
417 model->addOperation(ANEURALNETWORKS_REDUCE_PROD, {input02, param4, param5}, {output02});
418 // Phase 3, inputs and outputs
419 model->identifyInputsAndOutputs(
420 {input02},
421 {output02});
422 assert(model->isValid());
423 }
424
is_ignored_float16_3(int i)425 inline bool is_ignored_float16_3(int i) {
426 static std::set<int> ignore = {};
427 return ignore.find(i) != ignore.end();
428 }
429
CreateModel_dynamic_output_shape_3(Model * model)430 void CreateModel_dynamic_output_shape_3(Model *model) {
431 OperandType type12(Type::TENSOR_FLOAT32, {0});
432 OperandType type3(Type::BOOL, {});
433 OperandType type5(Type::TENSOR_FLOAT32, {4, 3, 2});
434 OperandType type7(Type::TENSOR_INT32, {4});
435 // Phase 1, operands
436 auto input02 = model->addOperand(&type5);
437 auto param4 = model->addOperand(&type7);
438 auto param5 = model->addOperand(&type3);
439 auto output02 = model->addOperand(&type12);
440 // Phase 2, operations
441 static int32_t param4_init[] = {1, 0, -3, -3};
442 model->setOperandValue(param4, param4_init, sizeof(int32_t) * 4);
443 static bool8 param5_init[] = {false};
444 model->setOperandValue(param5, param5_init, sizeof(bool8) * 1);
445 model->addOperation(ANEURALNETWORKS_REDUCE_PROD, {input02, param4, param5}, {output02});
446 // Phase 3, inputs and outputs
447 model->identifyInputsAndOutputs(
448 {input02},
449 {output02});
450 assert(model->isValid());
451 }
452
is_ignored_dynamic_output_shape_3(int i)453 inline bool is_ignored_dynamic_output_shape_3(int i) {
454 static std::set<int> ignore = {};
455 return ignore.find(i) != ignore.end();
456 }
457
CreateModel_dynamic_output_shape_relaxed_3(Model * model)458 void CreateModel_dynamic_output_shape_relaxed_3(Model *model) {
459 OperandType type12(Type::TENSOR_FLOAT32, {0});
460 OperandType type3(Type::BOOL, {});
461 OperandType type5(Type::TENSOR_FLOAT32, {4, 3, 2});
462 OperandType type7(Type::TENSOR_INT32, {4});
463 // Phase 1, operands
464 auto input02 = model->addOperand(&type5);
465 auto param4 = model->addOperand(&type7);
466 auto param5 = model->addOperand(&type3);
467 auto output02 = model->addOperand(&type12);
468 // Phase 2, operations
469 static int32_t param4_init[] = {1, 0, -3, -3};
470 model->setOperandValue(param4, param4_init, sizeof(int32_t) * 4);
471 static bool8 param5_init[] = {false};
472 model->setOperandValue(param5, param5_init, sizeof(bool8) * 1);
473 model->addOperation(ANEURALNETWORKS_REDUCE_PROD, {input02, param4, param5}, {output02});
474 // Phase 3, inputs and outputs
475 model->identifyInputsAndOutputs(
476 {input02},
477 {output02});
478 // Phase 4: set relaxed execution
479 model->relaxComputationFloat32toFloat16(true);
480 assert(model->isValid());
481 }
482
is_ignored_dynamic_output_shape_relaxed_3(int i)483 inline bool is_ignored_dynamic_output_shape_relaxed_3(int i) {
484 static std::set<int> ignore = {};
485 return ignore.find(i) != ignore.end();
486 }
487
CreateModel_dynamic_output_shape_float16_3(Model * model)488 void CreateModel_dynamic_output_shape_float16_3(Model *model) {
489 OperandType type13(Type::TENSOR_FLOAT16, {0});
490 OperandType type15(Type::TENSOR_FLOAT16, {4, 3, 2});
491 OperandType type3(Type::BOOL, {});
492 OperandType type7(Type::TENSOR_INT32, {4});
493 // Phase 1, operands
494 auto input02 = model->addOperand(&type15);
495 auto param4 = model->addOperand(&type7);
496 auto param5 = model->addOperand(&type3);
497 auto output02 = model->addOperand(&type13);
498 // Phase 2, operations
499 static int32_t param4_init[] = {1, 0, -3, -3};
500 model->setOperandValue(param4, param4_init, sizeof(int32_t) * 4);
501 static bool8 param5_init[] = {false};
502 model->setOperandValue(param5, param5_init, sizeof(bool8) * 1);
503 model->addOperation(ANEURALNETWORKS_REDUCE_PROD, {input02, param4, param5}, {output02});
504 // Phase 3, inputs and outputs
505 model->identifyInputsAndOutputs(
506 {input02},
507 {output02});
508 assert(model->isValid());
509 }
510
is_ignored_dynamic_output_shape_float16_3(int i)511 inline bool is_ignored_dynamic_output_shape_float16_3(int i) {
512 static std::set<int> ignore = {};
513 return ignore.find(i) != ignore.end();
514 }
515
CreateModel_4(Model * model)516 void CreateModel_4(Model *model) {
517 OperandType type3(Type::BOOL, {});
518 OperandType type5(Type::TENSOR_FLOAT32, {4, 3, 2});
519 OperandType type8(Type::TENSOR_FLOAT32, {1, 3, 1});
520 OperandType type9(Type::TENSOR_INT32, {2});
521 // Phase 1, operands
522 auto input03 = model->addOperand(&type5);
523 auto param6 = model->addOperand(&type9);
524 auto param7 = model->addOperand(&type3);
525 auto output03 = model->addOperand(&type8);
526 // Phase 2, operations
527 static int32_t param6_init[] = {0, 2};
528 model->setOperandValue(param6, param6_init, sizeof(int32_t) * 2);
529 static bool8 param7_init[] = {true};
530 model->setOperandValue(param7, param7_init, sizeof(bool8) * 1);
531 model->addOperation(ANEURALNETWORKS_REDUCE_PROD, {input03, param6, param7}, {output03});
532 // Phase 3, inputs and outputs
533 model->identifyInputsAndOutputs(
534 {input03},
535 {output03});
536 assert(model->isValid());
537 }
538
is_ignored_4(int i)539 inline bool is_ignored_4(int i) {
540 static std::set<int> ignore = {};
541 return ignore.find(i) != ignore.end();
542 }
543
CreateModel_relaxed_4(Model * model)544 void CreateModel_relaxed_4(Model *model) {
545 OperandType type3(Type::BOOL, {});
546 OperandType type5(Type::TENSOR_FLOAT32, {4, 3, 2});
547 OperandType type8(Type::TENSOR_FLOAT32, {1, 3, 1});
548 OperandType type9(Type::TENSOR_INT32, {2});
549 // Phase 1, operands
550 auto input03 = model->addOperand(&type5);
551 auto param6 = model->addOperand(&type9);
552 auto param7 = model->addOperand(&type3);
553 auto output03 = model->addOperand(&type8);
554 // Phase 2, operations
555 static int32_t param6_init[] = {0, 2};
556 model->setOperandValue(param6, param6_init, sizeof(int32_t) * 2);
557 static bool8 param7_init[] = {true};
558 model->setOperandValue(param7, param7_init, sizeof(bool8) * 1);
559 model->addOperation(ANEURALNETWORKS_REDUCE_PROD, {input03, param6, param7}, {output03});
560 // Phase 3, inputs and outputs
561 model->identifyInputsAndOutputs(
562 {input03},
563 {output03});
564 // Phase 4: set relaxed execution
565 model->relaxComputationFloat32toFloat16(true);
566 assert(model->isValid());
567 }
568
is_ignored_relaxed_4(int i)569 inline bool is_ignored_relaxed_4(int i) {
570 static std::set<int> ignore = {};
571 return ignore.find(i) != ignore.end();
572 }
573
CreateModel_float16_4(Model * model)574 void CreateModel_float16_4(Model *model) {
575 OperandType type15(Type::TENSOR_FLOAT16, {4, 3, 2});
576 OperandType type17(Type::TENSOR_FLOAT16, {1, 3, 1});
577 OperandType type3(Type::BOOL, {});
578 OperandType type9(Type::TENSOR_INT32, {2});
579 // Phase 1, operands
580 auto input03 = model->addOperand(&type15);
581 auto param6 = model->addOperand(&type9);
582 auto param7 = model->addOperand(&type3);
583 auto output03 = model->addOperand(&type17);
584 // Phase 2, operations
585 static int32_t param6_init[] = {0, 2};
586 model->setOperandValue(param6, param6_init, sizeof(int32_t) * 2);
587 static bool8 param7_init[] = {true};
588 model->setOperandValue(param7, param7_init, sizeof(bool8) * 1);
589 model->addOperation(ANEURALNETWORKS_REDUCE_PROD, {input03, param6, param7}, {output03});
590 // Phase 3, inputs and outputs
591 model->identifyInputsAndOutputs(
592 {input03},
593 {output03});
594 assert(model->isValid());
595 }
596
is_ignored_float16_4(int i)597 inline bool is_ignored_float16_4(int i) {
598 static std::set<int> ignore = {};
599 return ignore.find(i) != ignore.end();
600 }
601
CreateModel_dynamic_output_shape_4(Model * model)602 void CreateModel_dynamic_output_shape_4(Model *model) {
603 OperandType type18(Type::TENSOR_FLOAT32, {0, 0, 0});
604 OperandType type3(Type::BOOL, {});
605 OperandType type5(Type::TENSOR_FLOAT32, {4, 3, 2});
606 OperandType type9(Type::TENSOR_INT32, {2});
607 // Phase 1, operands
608 auto input03 = model->addOperand(&type5);
609 auto param6 = model->addOperand(&type9);
610 auto param7 = model->addOperand(&type3);
611 auto output03 = model->addOperand(&type18);
612 // Phase 2, operations
613 static int32_t param6_init[] = {0, 2};
614 model->setOperandValue(param6, param6_init, sizeof(int32_t) * 2);
615 static bool8 param7_init[] = {true};
616 model->setOperandValue(param7, param7_init, sizeof(bool8) * 1);
617 model->addOperation(ANEURALNETWORKS_REDUCE_PROD, {input03, param6, param7}, {output03});
618 // Phase 3, inputs and outputs
619 model->identifyInputsAndOutputs(
620 {input03},
621 {output03});
622 assert(model->isValid());
623 }
624
is_ignored_dynamic_output_shape_4(int i)625 inline bool is_ignored_dynamic_output_shape_4(int i) {
626 static std::set<int> ignore = {};
627 return ignore.find(i) != ignore.end();
628 }
629
CreateModel_dynamic_output_shape_relaxed_4(Model * model)630 void CreateModel_dynamic_output_shape_relaxed_4(Model *model) {
631 OperandType type18(Type::TENSOR_FLOAT32, {0, 0, 0});
632 OperandType type3(Type::BOOL, {});
633 OperandType type5(Type::TENSOR_FLOAT32, {4, 3, 2});
634 OperandType type9(Type::TENSOR_INT32, {2});
635 // Phase 1, operands
636 auto input03 = model->addOperand(&type5);
637 auto param6 = model->addOperand(&type9);
638 auto param7 = model->addOperand(&type3);
639 auto output03 = model->addOperand(&type18);
640 // Phase 2, operations
641 static int32_t param6_init[] = {0, 2};
642 model->setOperandValue(param6, param6_init, sizeof(int32_t) * 2);
643 static bool8 param7_init[] = {true};
644 model->setOperandValue(param7, param7_init, sizeof(bool8) * 1);
645 model->addOperation(ANEURALNETWORKS_REDUCE_PROD, {input03, param6, param7}, {output03});
646 // Phase 3, inputs and outputs
647 model->identifyInputsAndOutputs(
648 {input03},
649 {output03});
650 // Phase 4: set relaxed execution
651 model->relaxComputationFloat32toFloat16(true);
652 assert(model->isValid());
653 }
654
is_ignored_dynamic_output_shape_relaxed_4(int i)655 inline bool is_ignored_dynamic_output_shape_relaxed_4(int i) {
656 static std::set<int> ignore = {};
657 return ignore.find(i) != ignore.end();
658 }
659
CreateModel_dynamic_output_shape_float16_4(Model * model)660 void CreateModel_dynamic_output_shape_float16_4(Model *model) {
661 OperandType type15(Type::TENSOR_FLOAT16, {4, 3, 2});
662 OperandType type19(Type::TENSOR_FLOAT16, {0, 0, 0});
663 OperandType type3(Type::BOOL, {});
664 OperandType type9(Type::TENSOR_INT32, {2});
665 // Phase 1, operands
666 auto input03 = model->addOperand(&type15);
667 auto param6 = model->addOperand(&type9);
668 auto param7 = model->addOperand(&type3);
669 auto output03 = model->addOperand(&type19);
670 // Phase 2, operations
671 static int32_t param6_init[] = {0, 2};
672 model->setOperandValue(param6, param6_init, sizeof(int32_t) * 2);
673 static bool8 param7_init[] = {true};
674 model->setOperandValue(param7, param7_init, sizeof(bool8) * 1);
675 model->addOperation(ANEURALNETWORKS_REDUCE_PROD, {input03, param6, param7}, {output03});
676 // Phase 3, inputs and outputs
677 model->identifyInputsAndOutputs(
678 {input03},
679 {output03});
680 assert(model->isValid());
681 }
682
is_ignored_dynamic_output_shape_float16_4(int i)683 inline bool is_ignored_dynamic_output_shape_float16_4(int i) {
684 static std::set<int> ignore = {};
685 return ignore.find(i) != ignore.end();
686 }
687
688