• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // clang-format off
2 // Generated file (from: argmin_3.mod.py). Do not edit
CreateModel(Model * model)3 void CreateModel(Model *model) {
4   OperandType type0(Type::TENSOR_FLOAT32, {2, 2});
5   OperandType type1(Type::INT32, {});
6   OperandType type2(Type::TENSOR_INT32, {2});
7   // Phase 1, operands
8   auto input0 = model->addOperand(&type0);
9   auto axis = model->addOperand(&type1);
10   auto output = model->addOperand(&type2);
11   // Phase 2, operations
12   static int32_t axis_init[] = {-1};
13   model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
14   model->addOperation(ANEURALNETWORKS_ARGMIN, {input0, axis}, {output});
15   // Phase 3, inputs and outputs
16   model->identifyInputsAndOutputs(
17     {input0},
18     {output});
19   assert(model->isValid());
20 }
21 
is_ignored(int i)22 inline bool is_ignored(int i) {
23   static std::set<int> ignore = {};
24   return ignore.find(i) != ignore.end();
25 }
26 
CreateModel_relaxed(Model * model)27 void CreateModel_relaxed(Model *model) {
28   OperandType type0(Type::TENSOR_FLOAT32, {2, 2});
29   OperandType type1(Type::INT32, {});
30   OperandType type2(Type::TENSOR_INT32, {2});
31   // Phase 1, operands
32   auto input0 = model->addOperand(&type0);
33   auto axis = model->addOperand(&type1);
34   auto output = model->addOperand(&type2);
35   // Phase 2, operations
36   static int32_t axis_init[] = {-1};
37   model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
38   model->addOperation(ANEURALNETWORKS_ARGMIN, {input0, axis}, {output});
39   // Phase 3, inputs and outputs
40   model->identifyInputsAndOutputs(
41     {input0},
42     {output});
43   // Phase 4: set relaxed execution
44   model->relaxComputationFloat32toFloat16(true);
45   assert(model->isValid());
46 }
47 
is_ignored_relaxed(int i)48 inline bool is_ignored_relaxed(int i) {
49   static std::set<int> ignore = {};
50   return ignore.find(i) != ignore.end();
51 }
52 
CreateModel_float16(Model * model)53 void CreateModel_float16(Model *model) {
54   OperandType type1(Type::INT32, {});
55   OperandType type2(Type::TENSOR_INT32, {2});
56   OperandType type3(Type::TENSOR_FLOAT16, {2, 2});
57   // Phase 1, operands
58   auto input0 = model->addOperand(&type3);
59   auto axis = model->addOperand(&type1);
60   auto output = model->addOperand(&type2);
61   // Phase 2, operations
62   static int32_t axis_init[] = {-1};
63   model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
64   model->addOperation(ANEURALNETWORKS_ARGMIN, {input0, axis}, {output});
65   // Phase 3, inputs and outputs
66   model->identifyInputsAndOutputs(
67     {input0},
68     {output});
69   assert(model->isValid());
70 }
71 
is_ignored_float16(int i)72 inline bool is_ignored_float16(int i) {
73   static std::set<int> ignore = {};
74   return ignore.find(i) != ignore.end();
75 }
76 
CreateModel_int32(Model * model)77 void CreateModel_int32(Model *model) {
78   OperandType type1(Type::INT32, {});
79   OperandType type2(Type::TENSOR_INT32, {2});
80   OperandType type4(Type::TENSOR_INT32, {2, 2});
81   // Phase 1, operands
82   auto input0 = model->addOperand(&type4);
83   auto axis = model->addOperand(&type1);
84   auto output = model->addOperand(&type2);
85   // Phase 2, operations
86   static int32_t axis_init[] = {-1};
87   model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
88   model->addOperation(ANEURALNETWORKS_ARGMIN, {input0, axis}, {output});
89   // Phase 3, inputs and outputs
90   model->identifyInputsAndOutputs(
91     {input0},
92     {output});
93   assert(model->isValid());
94 }
95 
is_ignored_int32(int i)96 inline bool is_ignored_int32(int i) {
97   static std::set<int> ignore = {};
98   return ignore.find(i) != ignore.end();
99 }
100 
CreateModel_quant8(Model * model)101 void CreateModel_quant8(Model *model) {
102   OperandType type1(Type::INT32, {});
103   OperandType type2(Type::TENSOR_INT32, {2});
104   OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2}, 1.0f, 0);
105   // Phase 1, operands
106   auto input0 = model->addOperand(&type5);
107   auto axis = model->addOperand(&type1);
108   auto output = model->addOperand(&type2);
109   // Phase 2, operations
110   static int32_t axis_init[] = {-1};
111   model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
112   model->addOperation(ANEURALNETWORKS_ARGMIN, {input0, axis}, {output});
113   // Phase 3, inputs and outputs
114   model->identifyInputsAndOutputs(
115     {input0},
116     {output});
117   assert(model->isValid());
118 }
119 
is_ignored_quant8(int i)120 inline bool is_ignored_quant8(int i) {
121   static std::set<int> ignore = {};
122   return ignore.find(i) != ignore.end();
123 }
124 
CreateModel_dynamic_output_shape(Model * model)125 void CreateModel_dynamic_output_shape(Model *model) {
126   OperandType type0(Type::TENSOR_FLOAT32, {2, 2});
127   OperandType type1(Type::INT32, {});
128   OperandType type6(Type::TENSOR_INT32, {0});
129   // Phase 1, operands
130   auto input0 = model->addOperand(&type0);
131   auto axis = model->addOperand(&type1);
132   auto output = model->addOperand(&type6);
133   // Phase 2, operations
134   static int32_t axis_init[] = {-1};
135   model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
136   model->addOperation(ANEURALNETWORKS_ARGMIN, {input0, axis}, {output});
137   // Phase 3, inputs and outputs
138   model->identifyInputsAndOutputs(
139     {input0},
140     {output});
141   assert(model->isValid());
142 }
143 
is_ignored_dynamic_output_shape(int i)144 inline bool is_ignored_dynamic_output_shape(int i) {
145   static std::set<int> ignore = {};
146   return ignore.find(i) != ignore.end();
147 }
148 
CreateModel_dynamic_output_shape_relaxed(Model * model)149 void CreateModel_dynamic_output_shape_relaxed(Model *model) {
150   OperandType type0(Type::TENSOR_FLOAT32, {2, 2});
151   OperandType type1(Type::INT32, {});
152   OperandType type6(Type::TENSOR_INT32, {0});
153   // Phase 1, operands
154   auto input0 = model->addOperand(&type0);
155   auto axis = model->addOperand(&type1);
156   auto output = model->addOperand(&type6);
157   // Phase 2, operations
158   static int32_t axis_init[] = {-1};
159   model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
160   model->addOperation(ANEURALNETWORKS_ARGMIN, {input0, axis}, {output});
161   // Phase 3, inputs and outputs
162   model->identifyInputsAndOutputs(
163     {input0},
164     {output});
165   // Phase 4: set relaxed execution
166   model->relaxComputationFloat32toFloat16(true);
167   assert(model->isValid());
168 }
169 
is_ignored_dynamic_output_shape_relaxed(int i)170 inline bool is_ignored_dynamic_output_shape_relaxed(int i) {
171   static std::set<int> ignore = {};
172   return ignore.find(i) != ignore.end();
173 }
174 
CreateModel_dynamic_output_shape_float16(Model * model)175 void CreateModel_dynamic_output_shape_float16(Model *model) {
176   OperandType type1(Type::INT32, {});
177   OperandType type3(Type::TENSOR_FLOAT16, {2, 2});
178   OperandType type6(Type::TENSOR_INT32, {0});
179   // Phase 1, operands
180   auto input0 = model->addOperand(&type3);
181   auto axis = model->addOperand(&type1);
182   auto output = model->addOperand(&type6);
183   // Phase 2, operations
184   static int32_t axis_init[] = {-1};
185   model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
186   model->addOperation(ANEURALNETWORKS_ARGMIN, {input0, axis}, {output});
187   // Phase 3, inputs and outputs
188   model->identifyInputsAndOutputs(
189     {input0},
190     {output});
191   assert(model->isValid());
192 }
193 
is_ignored_dynamic_output_shape_float16(int i)194 inline bool is_ignored_dynamic_output_shape_float16(int i) {
195   static std::set<int> ignore = {};
196   return ignore.find(i) != ignore.end();
197 }
198 
CreateModel_dynamic_output_shape_int32(Model * model)199 void CreateModel_dynamic_output_shape_int32(Model *model) {
200   OperandType type1(Type::INT32, {});
201   OperandType type4(Type::TENSOR_INT32, {2, 2});
202   OperandType type6(Type::TENSOR_INT32, {0});
203   // Phase 1, operands
204   auto input0 = model->addOperand(&type4);
205   auto axis = model->addOperand(&type1);
206   auto output = model->addOperand(&type6);
207   // Phase 2, operations
208   static int32_t axis_init[] = {-1};
209   model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
210   model->addOperation(ANEURALNETWORKS_ARGMIN, {input0, axis}, {output});
211   // Phase 3, inputs and outputs
212   model->identifyInputsAndOutputs(
213     {input0},
214     {output});
215   assert(model->isValid());
216 }
217 
is_ignored_dynamic_output_shape_int32(int i)218 inline bool is_ignored_dynamic_output_shape_int32(int i) {
219   static std::set<int> ignore = {};
220   return ignore.find(i) != ignore.end();
221 }
222 
CreateModel_dynamic_output_shape_quant8(Model * model)223 void CreateModel_dynamic_output_shape_quant8(Model *model) {
224   OperandType type1(Type::INT32, {});
225   OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2}, 1.0f, 0);
226   OperandType type6(Type::TENSOR_INT32, {0});
227   // Phase 1, operands
228   auto input0 = model->addOperand(&type5);
229   auto axis = model->addOperand(&type1);
230   auto output = model->addOperand(&type6);
231   // Phase 2, operations
232   static int32_t axis_init[] = {-1};
233   model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
234   model->addOperation(ANEURALNETWORKS_ARGMIN, {input0, axis}, {output});
235   // Phase 3, inputs and outputs
236   model->identifyInputsAndOutputs(
237     {input0},
238     {output});
239   assert(model->isValid());
240 }
241 
is_ignored_dynamic_output_shape_quant8(int i)242 inline bool is_ignored_dynamic_output_shape_quant8(int i) {
243   static std::set<int> ignore = {};
244   return ignore.find(i) != ignore.end();
245 }
246 
247