• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // clang-format off
2 // Generated file (from: pad_all_dims.mod.py). Do not edit
CreateModel(Model * model)3 void CreateModel(Model *model) {
4   OperandType type0(Type::TENSOR_FLOAT32, {1, 1, 2, 3});
5   OperandType type1(Type::TENSOR_INT32, {4, 2});
6   OperandType type2(Type::TENSOR_FLOAT32, {4, 8, 8, 6});
7   // Phase 1, operands
8   auto input0 = model->addOperand(&type0);
9   auto paddings = model->addOperand(&type1);
10   auto output0 = model->addOperand(&type2);
11   // Phase 2, operations
12   static int32_t paddings_init[] = {1, 2, 3, 4, 3, 3, 2, 1};
13   model->setOperandValue(paddings, paddings_init, sizeof(int32_t) * 8);
14   model->addOperation(ANEURALNETWORKS_PAD, {input0, paddings}, {output0});
15   // Phase 3, inputs and outputs
16   model->identifyInputsAndOutputs(
17     {input0},
18     {output0});
19   assert(model->isValid());
20 }
21 
is_ignored(int i)22 inline bool is_ignored(int i) {
23   static std::set<int> ignore = {};
24   return ignore.find(i) != ignore.end();
25 }
26 
CreateModel_float16(Model * model)27 void CreateModel_float16(Model *model) {
28   OperandType type1(Type::TENSOR_INT32, {4, 2});
29   OperandType type3(Type::TENSOR_FLOAT16, {1, 1, 2, 3});
30   OperandType type4(Type::TENSOR_FLOAT16, {4, 8, 8, 6});
31   // Phase 1, operands
32   auto input0 = model->addOperand(&type3);
33   auto paddings = model->addOperand(&type1);
34   auto output0 = model->addOperand(&type4);
35   // Phase 2, operations
36   static int32_t paddings_init[] = {1, 2, 3, 4, 3, 3, 2, 1};
37   model->setOperandValue(paddings, paddings_init, sizeof(int32_t) * 8);
38   model->addOperation(ANEURALNETWORKS_PAD, {input0, paddings}, {output0});
39   // Phase 3, inputs and outputs
40   model->identifyInputsAndOutputs(
41     {input0},
42     {output0});
43   assert(model->isValid());
44 }
45 
is_ignored_float16(int i)46 inline bool is_ignored_float16(int i) {
47   static std::set<int> ignore = {};
48   return ignore.find(i) != ignore.end();
49 }
50 
CreateModel_relaxed(Model * model)51 void CreateModel_relaxed(Model *model) {
52   OperandType type0(Type::TENSOR_FLOAT32, {1, 1, 2, 3});
53   OperandType type1(Type::TENSOR_INT32, {4, 2});
54   OperandType type2(Type::TENSOR_FLOAT32, {4, 8, 8, 6});
55   // Phase 1, operands
56   auto input0 = model->addOperand(&type0);
57   auto paddings = model->addOperand(&type1);
58   auto output0 = model->addOperand(&type2);
59   // Phase 2, operations
60   static int32_t paddings_init[] = {1, 2, 3, 4, 3, 3, 2, 1};
61   model->setOperandValue(paddings, paddings_init, sizeof(int32_t) * 8);
62   model->addOperation(ANEURALNETWORKS_PAD, {input0, paddings}, {output0});
63   // Phase 3, inputs and outputs
64   model->identifyInputsAndOutputs(
65     {input0},
66     {output0});
67   // Phase 4: set relaxed execution
68   model->relaxComputationFloat32toFloat16(true);
69   assert(model->isValid());
70 }
71 
is_ignored_relaxed(int i)72 inline bool is_ignored_relaxed(int i) {
73   static std::set<int> ignore = {};
74   return ignore.find(i) != ignore.end();
75 }
76 
CreateModel_quant8(Model * model)77 void CreateModel_quant8(Model *model) {
78   OperandType type1(Type::TENSOR_INT32, {4, 2});
79   OperandType type5(Type::TENSOR_QUANT8_ASYMM, {1, 1, 2, 3}, 2.3f, 0);
80   OperandType type6(Type::TENSOR_QUANT8_ASYMM, {4, 8, 8, 6}, 2.3f, 0);
81   // Phase 1, operands
82   auto input0 = model->addOperand(&type5);
83   auto paddings = model->addOperand(&type1);
84   auto output0 = model->addOperand(&type6);
85   // Phase 2, operations
86   static int32_t paddings_init[] = {1, 2, 3, 4, 3, 3, 2, 1};
87   model->setOperandValue(paddings, paddings_init, sizeof(int32_t) * 8);
88   model->addOperation(ANEURALNETWORKS_PAD, {input0, paddings}, {output0});
89   // Phase 3, inputs and outputs
90   model->identifyInputsAndOutputs(
91     {input0},
92     {output0});
93   assert(model->isValid());
94 }
95 
is_ignored_quant8(int i)96 inline bool is_ignored_quant8(int i) {
97   static std::set<int> ignore = {};
98   return ignore.find(i) != ignore.end();
99 }
100 
CreateModel_dynamic_output_shape(Model * model)101 void CreateModel_dynamic_output_shape(Model *model) {
102   OperandType type0(Type::TENSOR_FLOAT32, {1, 1, 2, 3});
103   OperandType type1(Type::TENSOR_INT32, {4, 2});
104   OperandType type7(Type::TENSOR_FLOAT32, {0, 0, 0, 0});
105   // Phase 1, operands
106   auto input0 = model->addOperand(&type0);
107   auto paddings = model->addOperand(&type1);
108   auto output0 = model->addOperand(&type7);
109   // Phase 2, operations
110   static int32_t paddings_init[] = {1, 2, 3, 4, 3, 3, 2, 1};
111   model->setOperandValue(paddings, paddings_init, sizeof(int32_t) * 8);
112   model->addOperation(ANEURALNETWORKS_PAD, {input0, paddings}, {output0});
113   // Phase 3, inputs and outputs
114   model->identifyInputsAndOutputs(
115     {input0},
116     {output0});
117   assert(model->isValid());
118 }
119 
is_ignored_dynamic_output_shape(int i)120 inline bool is_ignored_dynamic_output_shape(int i) {
121   static std::set<int> ignore = {};
122   return ignore.find(i) != ignore.end();
123 }
124 
CreateModel_dynamic_output_shape_float16(Model * model)125 void CreateModel_dynamic_output_shape_float16(Model *model) {
126   OperandType type1(Type::TENSOR_INT32, {4, 2});
127   OperandType type3(Type::TENSOR_FLOAT16, {1, 1, 2, 3});
128   OperandType type8(Type::TENSOR_FLOAT16, {0, 0, 0, 0});
129   // Phase 1, operands
130   auto input0 = model->addOperand(&type3);
131   auto paddings = model->addOperand(&type1);
132   auto output0 = model->addOperand(&type8);
133   // Phase 2, operations
134   static int32_t paddings_init[] = {1, 2, 3, 4, 3, 3, 2, 1};
135   model->setOperandValue(paddings, paddings_init, sizeof(int32_t) * 8);
136   model->addOperation(ANEURALNETWORKS_PAD, {input0, paddings}, {output0});
137   // Phase 3, inputs and outputs
138   model->identifyInputsAndOutputs(
139     {input0},
140     {output0});
141   assert(model->isValid());
142 }
143 
is_ignored_dynamic_output_shape_float16(int i)144 inline bool is_ignored_dynamic_output_shape_float16(int i) {
145   static std::set<int> ignore = {};
146   return ignore.find(i) != ignore.end();
147 }
148 
CreateModel_dynamic_output_shape_relaxed(Model * model)149 void CreateModel_dynamic_output_shape_relaxed(Model *model) {
150   OperandType type0(Type::TENSOR_FLOAT32, {1, 1, 2, 3});
151   OperandType type1(Type::TENSOR_INT32, {4, 2});
152   OperandType type7(Type::TENSOR_FLOAT32, {0, 0, 0, 0});
153   // Phase 1, operands
154   auto input0 = model->addOperand(&type0);
155   auto paddings = model->addOperand(&type1);
156   auto output0 = model->addOperand(&type7);
157   // Phase 2, operations
158   static int32_t paddings_init[] = {1, 2, 3, 4, 3, 3, 2, 1};
159   model->setOperandValue(paddings, paddings_init, sizeof(int32_t) * 8);
160   model->addOperation(ANEURALNETWORKS_PAD, {input0, paddings}, {output0});
161   // Phase 3, inputs and outputs
162   model->identifyInputsAndOutputs(
163     {input0},
164     {output0});
165   // Phase 4: set relaxed execution
166   model->relaxComputationFloat32toFloat16(true);
167   assert(model->isValid());
168 }
169 
is_ignored_dynamic_output_shape_relaxed(int i)170 inline bool is_ignored_dynamic_output_shape_relaxed(int i) {
171   static std::set<int> ignore = {};
172   return ignore.find(i) != ignore.end();
173 }
174 
CreateModel_dynamic_output_shape_quant8(Model * model)175 void CreateModel_dynamic_output_shape_quant8(Model *model) {
176   OperandType type1(Type::TENSOR_INT32, {4, 2});
177   OperandType type5(Type::TENSOR_QUANT8_ASYMM, {1, 1, 2, 3}, 2.3f, 0);
178   OperandType type9(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0, 0}, 2.3f, 0);
179   // Phase 1, operands
180   auto input0 = model->addOperand(&type5);
181   auto paddings = model->addOperand(&type1);
182   auto output0 = model->addOperand(&type9);
183   // Phase 2, operations
184   static int32_t paddings_init[] = {1, 2, 3, 4, 3, 3, 2, 1};
185   model->setOperandValue(paddings, paddings_init, sizeof(int32_t) * 8);
186   model->addOperation(ANEURALNETWORKS_PAD, {input0, paddings}, {output0});
187   // Phase 3, inputs and outputs
188   model->identifyInputsAndOutputs(
189     {input0},
190     {output0});
191   assert(model->isValid());
192 }
193 
is_ignored_dynamic_output_shape_quant8(int i)194 inline bool is_ignored_dynamic_output_shape_quant8(int i) {
195   static std::set<int> ignore = {};
196   return ignore.find(i) != ignore.end();
197 }
198 
199