1 // clang-format off
2 // Generated file (from: concat_mixed_quant.mod.py). Do not edit
CreateModel_quant8(Model * model)3 void CreateModel_quant8(Model *model) {
4 OperandType type2(Type::INT32, {});
5 OperandType type3(Type::TENSOR_QUANT8_ASYMM, {2, 1, 2}, 0.084f, 127);
6 OperandType type4(Type::TENSOR_QUANT8_ASYMM, {2, 1, 2}, 0.05f, 0);
7 OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 1, 2}, 0.089f, 123);
8 OperandType type6(Type::TENSOR_QUANT8_ASYMM, {2, 1, 2}, 0.029f, 0);
9 OperandType type7(Type::TENSOR_QUANT8_ASYMM, {2, 1, 8}, 0.1f, 127);
10 // Phase 1, operands
11 auto input0 = model->addOperand(&type3);
12 auto input1 = model->addOperand(&type4);
13 auto input2 = model->addOperand(&type5);
14 auto input3 = model->addOperand(&type6);
15 auto param = model->addOperand(&type2);
16 auto output0 = model->addOperand(&type7);
17 // Phase 2, operations
18 static int32_t param_init[] = {2};
19 model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
20 model->addOperation(ANEURALNETWORKS_CONCATENATION, {input0, input1, input2, input3, param}, {output0});
21 // Phase 3, inputs and outputs
22 model->identifyInputsAndOutputs(
23 {input0, input1, input2, input3},
24 {output0});
25 assert(model->isValid());
26 }
27
is_ignored_quant8(int i)28 inline bool is_ignored_quant8(int i) {
29 static std::set<int> ignore = {};
30 return ignore.find(i) != ignore.end();
31 }
32
CreateModel_dynamic_output_shape_quant8(Model * model)33 void CreateModel_dynamic_output_shape_quant8(Model *model) {
34 OperandType type2(Type::INT32, {});
35 OperandType type3(Type::TENSOR_QUANT8_ASYMM, {2, 1, 2}, 0.084f, 127);
36 OperandType type4(Type::TENSOR_QUANT8_ASYMM, {2, 1, 2}, 0.05f, 0);
37 OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 1, 2}, 0.089f, 123);
38 OperandType type6(Type::TENSOR_QUANT8_ASYMM, {2, 1, 2}, 0.029f, 0);
39 OperandType type8(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0}, 0.1f, 127);
40 // Phase 1, operands
41 auto input0 = model->addOperand(&type3);
42 auto input1 = model->addOperand(&type4);
43 auto input2 = model->addOperand(&type5);
44 auto input3 = model->addOperand(&type6);
45 auto param = model->addOperand(&type2);
46 auto output0 = model->addOperand(&type8);
47 // Phase 2, operations
48 static int32_t param_init[] = {2};
49 model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
50 model->addOperation(ANEURALNETWORKS_CONCATENATION, {input0, input1, input2, input3, param}, {output0});
51 // Phase 3, inputs and outputs
52 model->identifyInputsAndOutputs(
53 {input0, input1, input2, input3},
54 {output0});
55 assert(model->isValid());
56 }
57
is_ignored_dynamic_output_shape_quant8(int i)58 inline bool is_ignored_dynamic_output_shape_quant8(int i) {
59 static std::set<int> ignore = {};
60 return ignore.find(i) != ignore.end();
61 }
62
CreateModel_quant8_2(Model * model)63 void CreateModel_quant8_2(Model *model) {
64 OperandType type2(Type::INT32, {});
65 OperandType type3(Type::TENSOR_QUANT8_ASYMM, {2, 1, 2}, 0.084f, 127);
66 OperandType type4(Type::TENSOR_QUANT8_ASYMM, {2, 1, 2}, 0.05f, 0);
67 OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 1, 2}, 0.089f, 123);
68 OperandType type6(Type::TENSOR_QUANT8_ASYMM, {2, 1, 2}, 0.029f, 0);
69 OperandType type9(Type::TENSOR_QUANT8_ASYMM, {2, 1, 8}, 0.0078125f, 127);
70 // Phase 1, operands
71 auto input0 = model->addOperand(&type3);
72 auto input1 = model->addOperand(&type4);
73 auto input2 = model->addOperand(&type5);
74 auto input3 = model->addOperand(&type6);
75 auto param = model->addOperand(&type2);
76 auto output0 = model->addOperand(&type9);
77 // Phase 2, operations
78 static int32_t param_init[] = {2};
79 model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
80 model->addOperation(ANEURALNETWORKS_CONCATENATION, {input0, input1, input2, input3, param}, {output0});
81 // Phase 3, inputs and outputs
82 model->identifyInputsAndOutputs(
83 {input0, input1, input2, input3},
84 {output0});
85 assert(model->isValid());
86 }
87
is_ignored_quant8_2(int i)88 inline bool is_ignored_quant8_2(int i) {
89 static std::set<int> ignore = {};
90 return ignore.find(i) != ignore.end();
91 }
92
CreateModel_dynamic_output_shape_quant8_2(Model * model)93 void CreateModel_dynamic_output_shape_quant8_2(Model *model) {
94 OperandType type10(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0}, 0.0078125f, 127);
95 OperandType type2(Type::INT32, {});
96 OperandType type3(Type::TENSOR_QUANT8_ASYMM, {2, 1, 2}, 0.084f, 127);
97 OperandType type4(Type::TENSOR_QUANT8_ASYMM, {2, 1, 2}, 0.05f, 0);
98 OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 1, 2}, 0.089f, 123);
99 OperandType type6(Type::TENSOR_QUANT8_ASYMM, {2, 1, 2}, 0.029f, 0);
100 // Phase 1, operands
101 auto input0 = model->addOperand(&type3);
102 auto input1 = model->addOperand(&type4);
103 auto input2 = model->addOperand(&type5);
104 auto input3 = model->addOperand(&type6);
105 auto param = model->addOperand(&type2);
106 auto output0 = model->addOperand(&type10);
107 // Phase 2, operations
108 static int32_t param_init[] = {2};
109 model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
110 model->addOperation(ANEURALNETWORKS_CONCATENATION, {input0, input1, input2, input3, param}, {output0});
111 // Phase 3, inputs and outputs
112 model->identifyInputsAndOutputs(
113 {input0, input1, input2, input3},
114 {output0});
115 assert(model->isValid());
116 }
117
is_ignored_dynamic_output_shape_quant8_2(int i)118 inline bool is_ignored_dynamic_output_shape_quant8_2(int i) {
119 static std::set<int> ignore = {};
120 return ignore.find(i) != ignore.end();
121 }
122
123