1 // clang-format off
2 // Generated file (from: tile_3.mod.py). Do not edit
CreateModel(Model * model)3 void CreateModel(Model *model) {
4 OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 3});
5 OperandType type1(Type::TENSOR_INT32, {3});
6 OperandType type2(Type::TENSOR_FLOAT32, {2, 6, 3});
7 // Phase 1, operands
8 auto input0 = model->addOperand(&type0);
9 auto multipliers = model->addOperand(&type1);
10 auto output0 = model->addOperand(&type2);
11 // Phase 2, operations
12 model->addOperation(ANEURALNETWORKS_TILE, {input0, multipliers}, {output0});
13 // Phase 3, inputs and outputs
14 model->identifyInputsAndOutputs(
15 {input0, multipliers},
16 {output0});
17 assert(model->isValid());
18 }
19
is_ignored(int i)20 inline bool is_ignored(int i) {
21 static std::set<int> ignore = {};
22 return ignore.find(i) != ignore.end();
23 }
24
CreateModel_relaxed(Model * model)25 void CreateModel_relaxed(Model *model) {
26 OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 3});
27 OperandType type1(Type::TENSOR_INT32, {3});
28 OperandType type2(Type::TENSOR_FLOAT32, {2, 6, 3});
29 // Phase 1, operands
30 auto input0 = model->addOperand(&type0);
31 auto multipliers = model->addOperand(&type1);
32 auto output0 = model->addOperand(&type2);
33 // Phase 2, operations
34 model->addOperation(ANEURALNETWORKS_TILE, {input0, multipliers}, {output0});
35 // Phase 3, inputs and outputs
36 model->identifyInputsAndOutputs(
37 {input0, multipliers},
38 {output0});
39 // Phase 4: set relaxed execution
40 model->relaxComputationFloat32toFloat16(true);
41 assert(model->isValid());
42 }
43
is_ignored_relaxed(int i)44 inline bool is_ignored_relaxed(int i) {
45 static std::set<int> ignore = {};
46 return ignore.find(i) != ignore.end();
47 }
48
CreateModel_float16(Model * model)49 void CreateModel_float16(Model *model) {
50 OperandType type1(Type::TENSOR_INT32, {3});
51 OperandType type3(Type::TENSOR_FLOAT16, {1, 2, 3});
52 OperandType type4(Type::TENSOR_FLOAT16, {2, 6, 3});
53 // Phase 1, operands
54 auto input0 = model->addOperand(&type3);
55 auto multipliers = model->addOperand(&type1);
56 auto output0 = model->addOperand(&type4);
57 // Phase 2, operations
58 model->addOperation(ANEURALNETWORKS_TILE, {input0, multipliers}, {output0});
59 // Phase 3, inputs and outputs
60 model->identifyInputsAndOutputs(
61 {input0, multipliers},
62 {output0});
63 assert(model->isValid());
64 }
65
is_ignored_float16(int i)66 inline bool is_ignored_float16(int i) {
67 static std::set<int> ignore = {};
68 return ignore.find(i) != ignore.end();
69 }
70
CreateModel_quant8(Model * model)71 void CreateModel_quant8(Model *model) {
72 OperandType type1(Type::TENSOR_INT32, {3});
73 OperandType type5(Type::TENSOR_QUANT8_ASYMM, {1, 2, 3}, 0.5f, 127);
74 OperandType type6(Type::TENSOR_QUANT8_ASYMM, {2, 6, 3}, 0.5f, 127);
75 // Phase 1, operands
76 auto input0 = model->addOperand(&type5);
77 auto multipliers = model->addOperand(&type1);
78 auto output0 = model->addOperand(&type6);
79 // Phase 2, operations
80 model->addOperation(ANEURALNETWORKS_TILE, {input0, multipliers}, {output0});
81 // Phase 3, inputs and outputs
82 model->identifyInputsAndOutputs(
83 {input0, multipliers},
84 {output0});
85 assert(model->isValid());
86 }
87
is_ignored_quant8(int i)88 inline bool is_ignored_quant8(int i) {
89 static std::set<int> ignore = {};
90 return ignore.find(i) != ignore.end();
91 }
92
CreateModel_int32(Model * model)93 void CreateModel_int32(Model *model) {
94 OperandType type1(Type::TENSOR_INT32, {3});
95 OperandType type7(Type::TENSOR_INT32, {1, 2, 3});
96 OperandType type8(Type::TENSOR_INT32, {2, 6, 3});
97 // Phase 1, operands
98 auto input0 = model->addOperand(&type7);
99 auto multipliers = model->addOperand(&type1);
100 auto output0 = model->addOperand(&type8);
101 // Phase 2, operations
102 model->addOperation(ANEURALNETWORKS_TILE, {input0, multipliers}, {output0});
103 // Phase 3, inputs and outputs
104 model->identifyInputsAndOutputs(
105 {input0, multipliers},
106 {output0});
107 assert(model->isValid());
108 }
109
is_ignored_int32(int i)110 inline bool is_ignored_int32(int i) {
111 static std::set<int> ignore = {};
112 return ignore.find(i) != ignore.end();
113 }
114
CreateModel_dynamic_output_shape(Model * model)115 void CreateModel_dynamic_output_shape(Model *model) {
116 OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 3});
117 OperandType type1(Type::TENSOR_INT32, {3});
118 OperandType type9(Type::TENSOR_FLOAT32, {0, 0, 0});
119 // Phase 1, operands
120 auto input0 = model->addOperand(&type0);
121 auto multipliers = model->addOperand(&type1);
122 auto output0 = model->addOperand(&type9);
123 // Phase 2, operations
124 model->addOperation(ANEURALNETWORKS_TILE, {input0, multipliers}, {output0});
125 // Phase 3, inputs and outputs
126 model->identifyInputsAndOutputs(
127 {input0, multipliers},
128 {output0});
129 assert(model->isValid());
130 }
131
is_ignored_dynamic_output_shape(int i)132 inline bool is_ignored_dynamic_output_shape(int i) {
133 static std::set<int> ignore = {};
134 return ignore.find(i) != ignore.end();
135 }
136
CreateModel_dynamic_output_shape_relaxed(Model * model)137 void CreateModel_dynamic_output_shape_relaxed(Model *model) {
138 OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 3});
139 OperandType type1(Type::TENSOR_INT32, {3});
140 OperandType type9(Type::TENSOR_FLOAT32, {0, 0, 0});
141 // Phase 1, operands
142 auto input0 = model->addOperand(&type0);
143 auto multipliers = model->addOperand(&type1);
144 auto output0 = model->addOperand(&type9);
145 // Phase 2, operations
146 model->addOperation(ANEURALNETWORKS_TILE, {input0, multipliers}, {output0});
147 // Phase 3, inputs and outputs
148 model->identifyInputsAndOutputs(
149 {input0, multipliers},
150 {output0});
151 // Phase 4: set relaxed execution
152 model->relaxComputationFloat32toFloat16(true);
153 assert(model->isValid());
154 }
155
is_ignored_dynamic_output_shape_relaxed(int i)156 inline bool is_ignored_dynamic_output_shape_relaxed(int i) {
157 static std::set<int> ignore = {};
158 return ignore.find(i) != ignore.end();
159 }
160
CreateModel_dynamic_output_shape_float16(Model * model)161 void CreateModel_dynamic_output_shape_float16(Model *model) {
162 OperandType type1(Type::TENSOR_INT32, {3});
163 OperandType type10(Type::TENSOR_FLOAT16, {0, 0, 0});
164 OperandType type3(Type::TENSOR_FLOAT16, {1, 2, 3});
165 // Phase 1, operands
166 auto input0 = model->addOperand(&type3);
167 auto multipliers = model->addOperand(&type1);
168 auto output0 = model->addOperand(&type10);
169 // Phase 2, operations
170 model->addOperation(ANEURALNETWORKS_TILE, {input0, multipliers}, {output0});
171 // Phase 3, inputs and outputs
172 model->identifyInputsAndOutputs(
173 {input0, multipliers},
174 {output0});
175 assert(model->isValid());
176 }
177
is_ignored_dynamic_output_shape_float16(int i)178 inline bool is_ignored_dynamic_output_shape_float16(int i) {
179 static std::set<int> ignore = {};
180 return ignore.find(i) != ignore.end();
181 }
182
CreateModel_dynamic_output_shape_quant8(Model * model)183 void CreateModel_dynamic_output_shape_quant8(Model *model) {
184 OperandType type1(Type::TENSOR_INT32, {3});
185 OperandType type11(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0}, 0.5f, 127);
186 OperandType type5(Type::TENSOR_QUANT8_ASYMM, {1, 2, 3}, 0.5f, 127);
187 // Phase 1, operands
188 auto input0 = model->addOperand(&type5);
189 auto multipliers = model->addOperand(&type1);
190 auto output0 = model->addOperand(&type11);
191 // Phase 2, operations
192 model->addOperation(ANEURALNETWORKS_TILE, {input0, multipliers}, {output0});
193 // Phase 3, inputs and outputs
194 model->identifyInputsAndOutputs(
195 {input0, multipliers},
196 {output0});
197 assert(model->isValid());
198 }
199
is_ignored_dynamic_output_shape_quant8(int i)200 inline bool is_ignored_dynamic_output_shape_quant8(int i) {
201 static std::set<int> ignore = {};
202 return ignore.find(i) != ignore.end();
203 }
204
CreateModel_dynamic_output_shape_int32(Model * model)205 void CreateModel_dynamic_output_shape_int32(Model *model) {
206 OperandType type1(Type::TENSOR_INT32, {3});
207 OperandType type12(Type::TENSOR_INT32, {0, 0, 0});
208 OperandType type7(Type::TENSOR_INT32, {1, 2, 3});
209 // Phase 1, operands
210 auto input0 = model->addOperand(&type7);
211 auto multipliers = model->addOperand(&type1);
212 auto output0 = model->addOperand(&type12);
213 // Phase 2, operations
214 model->addOperation(ANEURALNETWORKS_TILE, {input0, multipliers}, {output0});
215 // Phase 3, inputs and outputs
216 model->identifyInputsAndOutputs(
217 {input0, multipliers},
218 {output0});
219 assert(model->isValid());
220 }
221
is_ignored_dynamic_output_shape_int32(int i)222 inline bool is_ignored_dynamic_output_shape_int32(int i) {
223 static std::set<int> ignore = {};
224 return ignore.find(i) != ignore.end();
225 }
226
227