1 // clang-format off
2 // Generated file (from: axis_aligned_bbox_transform.mod.py). Do not edit
CreateModel(Model * model)3 void CreateModel(Model *model) {
4 OperandType type0(Type::TENSOR_FLOAT32, {5, 4});
5 OperandType type1(Type::TENSOR_FLOAT32, {5, 8});
6 OperandType type2(Type::TENSOR_INT32, {5});
7 OperandType type3(Type::TENSOR_FLOAT32, {4, 2});
8 // Phase 1, operands
9 auto roi = model->addOperand(&type0);
10 auto bboxDeltas = model->addOperand(&type1);
11 auto batchSplit = model->addOperand(&type2);
12 auto imageInfo = model->addOperand(&type3);
13 auto out = model->addOperand(&type1);
14 // Phase 2, operations
15 model->addOperation(ANEURALNETWORKS_AXIS_ALIGNED_BBOX_TRANSFORM, {roi, bboxDeltas, batchSplit, imageInfo}, {out});
16 // Phase 3, inputs and outputs
17 model->identifyInputsAndOutputs(
18 {roi, bboxDeltas, batchSplit, imageInfo},
19 {out});
20 assert(model->isValid());
21 }
22
is_ignored(int i)23 inline bool is_ignored(int i) {
24 static std::set<int> ignore = {};
25 return ignore.find(i) != ignore.end();
26 }
27
CreateModel_relaxed(Model * model)28 void CreateModel_relaxed(Model *model) {
29 OperandType type0(Type::TENSOR_FLOAT32, {5, 4});
30 OperandType type1(Type::TENSOR_FLOAT32, {5, 8});
31 OperandType type2(Type::TENSOR_INT32, {5});
32 OperandType type3(Type::TENSOR_FLOAT32, {4, 2});
33 // Phase 1, operands
34 auto roi = model->addOperand(&type0);
35 auto bboxDeltas = model->addOperand(&type1);
36 auto batchSplit = model->addOperand(&type2);
37 auto imageInfo = model->addOperand(&type3);
38 auto out = model->addOperand(&type1);
39 // Phase 2, operations
40 model->addOperation(ANEURALNETWORKS_AXIS_ALIGNED_BBOX_TRANSFORM, {roi, bboxDeltas, batchSplit, imageInfo}, {out});
41 // Phase 3, inputs and outputs
42 model->identifyInputsAndOutputs(
43 {roi, bboxDeltas, batchSplit, imageInfo},
44 {out});
45 // Phase 4: set relaxed execution
46 model->relaxComputationFloat32toFloat16(true);
47 assert(model->isValid());
48 }
49
is_ignored_relaxed(int i)50 inline bool is_ignored_relaxed(int i) {
51 static std::set<int> ignore = {};
52 return ignore.find(i) != ignore.end();
53 }
54
CreateModel_float16(Model * model)55 void CreateModel_float16(Model *model) {
56 OperandType type2(Type::TENSOR_INT32, {5});
57 OperandType type5(Type::TENSOR_FLOAT16, {5, 8});
58 OperandType type6(Type::TENSOR_FLOAT16, {4, 2});
59 OperandType type7(Type::TENSOR_FLOAT16, {5, 4});
60 // Phase 1, operands
61 auto roi = model->addOperand(&type7);
62 auto bboxDeltas = model->addOperand(&type5);
63 auto batchSplit = model->addOperand(&type2);
64 auto imageInfo = model->addOperand(&type6);
65 auto out = model->addOperand(&type5);
66 // Phase 2, operations
67 model->addOperation(ANEURALNETWORKS_AXIS_ALIGNED_BBOX_TRANSFORM, {roi, bboxDeltas, batchSplit, imageInfo}, {out});
68 // Phase 3, inputs and outputs
69 model->identifyInputsAndOutputs(
70 {roi, bboxDeltas, batchSplit, imageInfo},
71 {out});
72 assert(model->isValid());
73 }
74
is_ignored_float16(int i)75 inline bool is_ignored_float16(int i) {
76 static std::set<int> ignore = {};
77 return ignore.find(i) != ignore.end();
78 }
79
CreateModel_quant8(Model * model)80 void CreateModel_quant8(Model *model) {
81 OperandType type10(Type::TENSOR_QUANT16_ASYMM, {5, 8}, 0.125f, 0);
82 OperandType type11(Type::TENSOR_QUANT16_ASYMM, {5, 4}, 0.125f, 0);
83 OperandType type2(Type::TENSOR_INT32, {5});
84 OperandType type8(Type::TENSOR_QUANT8_ASYMM, {5, 8}, 0.05f, 128);
85 OperandType type9(Type::TENSOR_QUANT16_ASYMM, {4, 2}, 0.125f, 0);
86 // Phase 1, operands
87 auto roi = model->addOperand(&type11);
88 auto bboxDeltas = model->addOperand(&type8);
89 auto batchSplit = model->addOperand(&type2);
90 auto imageInfo = model->addOperand(&type9);
91 auto out = model->addOperand(&type10);
92 // Phase 2, operations
93 model->addOperation(ANEURALNETWORKS_AXIS_ALIGNED_BBOX_TRANSFORM, {roi, bboxDeltas, batchSplit, imageInfo}, {out});
94 // Phase 3, inputs and outputs
95 model->identifyInputsAndOutputs(
96 {roi, bboxDeltas, batchSplit, imageInfo},
97 {out});
98 assert(model->isValid());
99 }
100
is_ignored_quant8(int i)101 inline bool is_ignored_quant8(int i) {
102 static std::set<int> ignore = {};
103 return ignore.find(i) != ignore.end();
104 }
105
CreateModel_dynamic_output_shape(Model * model)106 void CreateModel_dynamic_output_shape(Model *model) {
107 OperandType type0(Type::TENSOR_FLOAT32, {5, 4});
108 OperandType type1(Type::TENSOR_FLOAT32, {5, 8});
109 OperandType type12(Type::TENSOR_FLOAT32, {0, 0});
110 OperandType type2(Type::TENSOR_INT32, {5});
111 OperandType type3(Type::TENSOR_FLOAT32, {4, 2});
112 // Phase 1, operands
113 auto roi = model->addOperand(&type0);
114 auto bboxDeltas = model->addOperand(&type1);
115 auto batchSplit = model->addOperand(&type2);
116 auto imageInfo = model->addOperand(&type3);
117 auto out = model->addOperand(&type12);
118 // Phase 2, operations
119 model->addOperation(ANEURALNETWORKS_AXIS_ALIGNED_BBOX_TRANSFORM, {roi, bboxDeltas, batchSplit, imageInfo}, {out});
120 // Phase 3, inputs and outputs
121 model->identifyInputsAndOutputs(
122 {roi, bboxDeltas, batchSplit, imageInfo},
123 {out});
124 assert(model->isValid());
125 }
126
is_ignored_dynamic_output_shape(int i)127 inline bool is_ignored_dynamic_output_shape(int i) {
128 static std::set<int> ignore = {};
129 return ignore.find(i) != ignore.end();
130 }
131
CreateModel_dynamic_output_shape_relaxed(Model * model)132 void CreateModel_dynamic_output_shape_relaxed(Model *model) {
133 OperandType type0(Type::TENSOR_FLOAT32, {5, 4});
134 OperandType type1(Type::TENSOR_FLOAT32, {5, 8});
135 OperandType type12(Type::TENSOR_FLOAT32, {0, 0});
136 OperandType type2(Type::TENSOR_INT32, {5});
137 OperandType type3(Type::TENSOR_FLOAT32, {4, 2});
138 // Phase 1, operands
139 auto roi = model->addOperand(&type0);
140 auto bboxDeltas = model->addOperand(&type1);
141 auto batchSplit = model->addOperand(&type2);
142 auto imageInfo = model->addOperand(&type3);
143 auto out = model->addOperand(&type12);
144 // Phase 2, operations
145 model->addOperation(ANEURALNETWORKS_AXIS_ALIGNED_BBOX_TRANSFORM, {roi, bboxDeltas, batchSplit, imageInfo}, {out});
146 // Phase 3, inputs and outputs
147 model->identifyInputsAndOutputs(
148 {roi, bboxDeltas, batchSplit, imageInfo},
149 {out});
150 // Phase 4: set relaxed execution
151 model->relaxComputationFloat32toFloat16(true);
152 assert(model->isValid());
153 }
154
is_ignored_dynamic_output_shape_relaxed(int i)155 inline bool is_ignored_dynamic_output_shape_relaxed(int i) {
156 static std::set<int> ignore = {};
157 return ignore.find(i) != ignore.end();
158 }
159
CreateModel_dynamic_output_shape_float16(Model * model)160 void CreateModel_dynamic_output_shape_float16(Model *model) {
161 OperandType type13(Type::TENSOR_FLOAT16, {0, 0});
162 OperandType type2(Type::TENSOR_INT32, {5});
163 OperandType type5(Type::TENSOR_FLOAT16, {5, 8});
164 OperandType type6(Type::TENSOR_FLOAT16, {4, 2});
165 OperandType type7(Type::TENSOR_FLOAT16, {5, 4});
166 // Phase 1, operands
167 auto roi = model->addOperand(&type7);
168 auto bboxDeltas = model->addOperand(&type5);
169 auto batchSplit = model->addOperand(&type2);
170 auto imageInfo = model->addOperand(&type6);
171 auto out = model->addOperand(&type13);
172 // Phase 2, operations
173 model->addOperation(ANEURALNETWORKS_AXIS_ALIGNED_BBOX_TRANSFORM, {roi, bboxDeltas, batchSplit, imageInfo}, {out});
174 // Phase 3, inputs and outputs
175 model->identifyInputsAndOutputs(
176 {roi, bboxDeltas, batchSplit, imageInfo},
177 {out});
178 assert(model->isValid());
179 }
180
is_ignored_dynamic_output_shape_float16(int i)181 inline bool is_ignored_dynamic_output_shape_float16(int i) {
182 static std::set<int> ignore = {};
183 return ignore.find(i) != ignore.end();
184 }
185
CreateModel_dynamic_output_shape_quant8(Model * model)186 void CreateModel_dynamic_output_shape_quant8(Model *model) {
187 OperandType type11(Type::TENSOR_QUANT16_ASYMM, {5, 4}, 0.125f, 0);
188 OperandType type14(Type::TENSOR_QUANT16_ASYMM, {0, 0}, 0.125f, 0);
189 OperandType type2(Type::TENSOR_INT32, {5});
190 OperandType type8(Type::TENSOR_QUANT8_ASYMM, {5, 8}, 0.05f, 128);
191 OperandType type9(Type::TENSOR_QUANT16_ASYMM, {4, 2}, 0.125f, 0);
192 // Phase 1, operands
193 auto roi = model->addOperand(&type11);
194 auto bboxDeltas = model->addOperand(&type8);
195 auto batchSplit = model->addOperand(&type2);
196 auto imageInfo = model->addOperand(&type9);
197 auto out = model->addOperand(&type14);
198 // Phase 2, operations
199 model->addOperation(ANEURALNETWORKS_AXIS_ALIGNED_BBOX_TRANSFORM, {roi, bboxDeltas, batchSplit, imageInfo}, {out});
200 // Phase 3, inputs and outputs
201 model->identifyInputsAndOutputs(
202 {roi, bboxDeltas, batchSplit, imageInfo},
203 {out});
204 assert(model->isValid());
205 }
206
is_ignored_dynamic_output_shape_quant8(int i)207 inline bool is_ignored_dynamic_output_shape_quant8(int i) {
208 static std::set<int> ignore = {};
209 return ignore.find(i) != ignore.end();
210 }
211
CreateModel_2(Model * model)212 void CreateModel_2(Model *model) {
213 OperandType type0(Type::TENSOR_FLOAT32, {5, 4});
214 OperandType type1(Type::TENSOR_FLOAT32, {5, 8});
215 OperandType type2(Type::TENSOR_INT32, {5});
216 OperandType type4(Type::TENSOR_FLOAT32, {7, 2});
217 // Phase 1, operands
218 auto roi1 = model->addOperand(&type0);
219 auto bboxDeltas1 = model->addOperand(&type1);
220 auto batchSplit1 = model->addOperand(&type2);
221 auto imageInfo1 = model->addOperand(&type4);
222 auto out1 = model->addOperand(&type1);
223 // Phase 2, operations
224 model->addOperation(ANEURALNETWORKS_AXIS_ALIGNED_BBOX_TRANSFORM, {roi1, bboxDeltas1, batchSplit1, imageInfo1}, {out1});
225 // Phase 3, inputs and outputs
226 model->identifyInputsAndOutputs(
227 {roi1, bboxDeltas1, batchSplit1, imageInfo1},
228 {out1});
229 assert(model->isValid());
230 }
231
is_ignored_2(int i)232 inline bool is_ignored_2(int i) {
233 static std::set<int> ignore = {};
234 return ignore.find(i) != ignore.end();
235 }
236
CreateModel_relaxed_2(Model * model)237 void CreateModel_relaxed_2(Model *model) {
238 OperandType type0(Type::TENSOR_FLOAT32, {5, 4});
239 OperandType type1(Type::TENSOR_FLOAT32, {5, 8});
240 OperandType type2(Type::TENSOR_INT32, {5});
241 OperandType type4(Type::TENSOR_FLOAT32, {7, 2});
242 // Phase 1, operands
243 auto roi1 = model->addOperand(&type0);
244 auto bboxDeltas1 = model->addOperand(&type1);
245 auto batchSplit1 = model->addOperand(&type2);
246 auto imageInfo1 = model->addOperand(&type4);
247 auto out1 = model->addOperand(&type1);
248 // Phase 2, operations
249 model->addOperation(ANEURALNETWORKS_AXIS_ALIGNED_BBOX_TRANSFORM, {roi1, bboxDeltas1, batchSplit1, imageInfo1}, {out1});
250 // Phase 3, inputs and outputs
251 model->identifyInputsAndOutputs(
252 {roi1, bboxDeltas1, batchSplit1, imageInfo1},
253 {out1});
254 // Phase 4: set relaxed execution
255 model->relaxComputationFloat32toFloat16(true);
256 assert(model->isValid());
257 }
258
is_ignored_relaxed_2(int i)259 inline bool is_ignored_relaxed_2(int i) {
260 static std::set<int> ignore = {};
261 return ignore.find(i) != ignore.end();
262 }
263
CreateModel_float16_2(Model * model)264 void CreateModel_float16_2(Model *model) {
265 OperandType type15(Type::TENSOR_FLOAT16, {7, 2});
266 OperandType type2(Type::TENSOR_INT32, {5});
267 OperandType type5(Type::TENSOR_FLOAT16, {5, 8});
268 OperandType type7(Type::TENSOR_FLOAT16, {5, 4});
269 // Phase 1, operands
270 auto roi1 = model->addOperand(&type7);
271 auto bboxDeltas1 = model->addOperand(&type5);
272 auto batchSplit1 = model->addOperand(&type2);
273 auto imageInfo1 = model->addOperand(&type15);
274 auto out1 = model->addOperand(&type5);
275 // Phase 2, operations
276 model->addOperation(ANEURALNETWORKS_AXIS_ALIGNED_BBOX_TRANSFORM, {roi1, bboxDeltas1, batchSplit1, imageInfo1}, {out1});
277 // Phase 3, inputs and outputs
278 model->identifyInputsAndOutputs(
279 {roi1, bboxDeltas1, batchSplit1, imageInfo1},
280 {out1});
281 assert(model->isValid());
282 }
283
is_ignored_float16_2(int i)284 inline bool is_ignored_float16_2(int i) {
285 static std::set<int> ignore = {};
286 return ignore.find(i) != ignore.end();
287 }
288
CreateModel_quant8_2(Model * model)289 void CreateModel_quant8_2(Model *model) {
290 OperandType type10(Type::TENSOR_QUANT16_ASYMM, {5, 8}, 0.125f, 0);
291 OperandType type11(Type::TENSOR_QUANT16_ASYMM, {5, 4}, 0.125f, 0);
292 OperandType type16(Type::TENSOR_QUANT16_ASYMM, {7, 2}, 0.125f, 0);
293 OperandType type2(Type::TENSOR_INT32, {5});
294 OperandType type8(Type::TENSOR_QUANT8_ASYMM, {5, 8}, 0.05f, 128);
295 // Phase 1, operands
296 auto roi1 = model->addOperand(&type11);
297 auto bboxDeltas1 = model->addOperand(&type8);
298 auto batchSplit1 = model->addOperand(&type2);
299 auto imageInfo1 = model->addOperand(&type16);
300 auto out1 = model->addOperand(&type10);
301 // Phase 2, operations
302 model->addOperation(ANEURALNETWORKS_AXIS_ALIGNED_BBOX_TRANSFORM, {roi1, bboxDeltas1, batchSplit1, imageInfo1}, {out1});
303 // Phase 3, inputs and outputs
304 model->identifyInputsAndOutputs(
305 {roi1, bboxDeltas1, batchSplit1, imageInfo1},
306 {out1});
307 assert(model->isValid());
308 }
309
is_ignored_quant8_2(int i)310 inline bool is_ignored_quant8_2(int i) {
311 static std::set<int> ignore = {};
312 return ignore.find(i) != ignore.end();
313 }
314
CreateModel_dynamic_output_shape_2(Model * model)315 void CreateModel_dynamic_output_shape_2(Model *model) {
316 OperandType type0(Type::TENSOR_FLOAT32, {5, 4});
317 OperandType type1(Type::TENSOR_FLOAT32, {5, 8});
318 OperandType type12(Type::TENSOR_FLOAT32, {0, 0});
319 OperandType type2(Type::TENSOR_INT32, {5});
320 OperandType type4(Type::TENSOR_FLOAT32, {7, 2});
321 // Phase 1, operands
322 auto roi1 = model->addOperand(&type0);
323 auto bboxDeltas1 = model->addOperand(&type1);
324 auto batchSplit1 = model->addOperand(&type2);
325 auto imageInfo1 = model->addOperand(&type4);
326 auto out1 = model->addOperand(&type12);
327 // Phase 2, operations
328 model->addOperation(ANEURALNETWORKS_AXIS_ALIGNED_BBOX_TRANSFORM, {roi1, bboxDeltas1, batchSplit1, imageInfo1}, {out1});
329 // Phase 3, inputs and outputs
330 model->identifyInputsAndOutputs(
331 {roi1, bboxDeltas1, batchSplit1, imageInfo1},
332 {out1});
333 assert(model->isValid());
334 }
335
is_ignored_dynamic_output_shape_2(int i)336 inline bool is_ignored_dynamic_output_shape_2(int i) {
337 static std::set<int> ignore = {};
338 return ignore.find(i) != ignore.end();
339 }
340
CreateModel_dynamic_output_shape_relaxed_2(Model * model)341 void CreateModel_dynamic_output_shape_relaxed_2(Model *model) {
342 OperandType type0(Type::TENSOR_FLOAT32, {5, 4});
343 OperandType type1(Type::TENSOR_FLOAT32, {5, 8});
344 OperandType type12(Type::TENSOR_FLOAT32, {0, 0});
345 OperandType type2(Type::TENSOR_INT32, {5});
346 OperandType type4(Type::TENSOR_FLOAT32, {7, 2});
347 // Phase 1, operands
348 auto roi1 = model->addOperand(&type0);
349 auto bboxDeltas1 = model->addOperand(&type1);
350 auto batchSplit1 = model->addOperand(&type2);
351 auto imageInfo1 = model->addOperand(&type4);
352 auto out1 = model->addOperand(&type12);
353 // Phase 2, operations
354 model->addOperation(ANEURALNETWORKS_AXIS_ALIGNED_BBOX_TRANSFORM, {roi1, bboxDeltas1, batchSplit1, imageInfo1}, {out1});
355 // Phase 3, inputs and outputs
356 model->identifyInputsAndOutputs(
357 {roi1, bboxDeltas1, batchSplit1, imageInfo1},
358 {out1});
359 // Phase 4: set relaxed execution
360 model->relaxComputationFloat32toFloat16(true);
361 assert(model->isValid());
362 }
363
is_ignored_dynamic_output_shape_relaxed_2(int i)364 inline bool is_ignored_dynamic_output_shape_relaxed_2(int i) {
365 static std::set<int> ignore = {};
366 return ignore.find(i) != ignore.end();
367 }
368
CreateModel_dynamic_output_shape_float16_2(Model * model)369 void CreateModel_dynamic_output_shape_float16_2(Model *model) {
370 OperandType type13(Type::TENSOR_FLOAT16, {0, 0});
371 OperandType type15(Type::TENSOR_FLOAT16, {7, 2});
372 OperandType type2(Type::TENSOR_INT32, {5});
373 OperandType type5(Type::TENSOR_FLOAT16, {5, 8});
374 OperandType type7(Type::TENSOR_FLOAT16, {5, 4});
375 // Phase 1, operands
376 auto roi1 = model->addOperand(&type7);
377 auto bboxDeltas1 = model->addOperand(&type5);
378 auto batchSplit1 = model->addOperand(&type2);
379 auto imageInfo1 = model->addOperand(&type15);
380 auto out1 = model->addOperand(&type13);
381 // Phase 2, operations
382 model->addOperation(ANEURALNETWORKS_AXIS_ALIGNED_BBOX_TRANSFORM, {roi1, bboxDeltas1, batchSplit1, imageInfo1}, {out1});
383 // Phase 3, inputs and outputs
384 model->identifyInputsAndOutputs(
385 {roi1, bboxDeltas1, batchSplit1, imageInfo1},
386 {out1});
387 assert(model->isValid());
388 }
389
is_ignored_dynamic_output_shape_float16_2(int i)390 inline bool is_ignored_dynamic_output_shape_float16_2(int i) {
391 static std::set<int> ignore = {};
392 return ignore.find(i) != ignore.end();
393 }
394
CreateModel_dynamic_output_shape_quant8_2(Model * model)395 void CreateModel_dynamic_output_shape_quant8_2(Model *model) {
396 OperandType type11(Type::TENSOR_QUANT16_ASYMM, {5, 4}, 0.125f, 0);
397 OperandType type14(Type::TENSOR_QUANT16_ASYMM, {0, 0}, 0.125f, 0);
398 OperandType type16(Type::TENSOR_QUANT16_ASYMM, {7, 2}, 0.125f, 0);
399 OperandType type2(Type::TENSOR_INT32, {5});
400 OperandType type8(Type::TENSOR_QUANT8_ASYMM, {5, 8}, 0.05f, 128);
401 // Phase 1, operands
402 auto roi1 = model->addOperand(&type11);
403 auto bboxDeltas1 = model->addOperand(&type8);
404 auto batchSplit1 = model->addOperand(&type2);
405 auto imageInfo1 = model->addOperand(&type16);
406 auto out1 = model->addOperand(&type14);
407 // Phase 2, operations
408 model->addOperation(ANEURALNETWORKS_AXIS_ALIGNED_BBOX_TRANSFORM, {roi1, bboxDeltas1, batchSplit1, imageInfo1}, {out1});
409 // Phase 3, inputs and outputs
410 model->identifyInputsAndOutputs(
411 {roi1, bboxDeltas1, batchSplit1, imageInfo1},
412 {out1});
413 assert(model->isValid());
414 }
415
is_ignored_dynamic_output_shape_quant8_2(int i)416 inline bool is_ignored_dynamic_output_shape_quant8_2(int i) {
417 static std::set<int> ignore = {};
418 return ignore.find(i) != ignore.end();
419 }
420
421