• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // clang-format off
2 // Generated file (from: tile_1.mod.py). Do not edit
CreateModel(Model * model)3 void CreateModel(Model *model) {
4   OperandType type0(Type::TENSOR_FLOAT32, {3});
5   OperandType type1(Type::TENSOR_INT32, {1});
6   OperandType type2(Type::TENSOR_FLOAT32, {6});
7   // Phase 1, operands
8   auto input0 = model->addOperand(&type0);
9   auto multipliers = model->addOperand(&type1);
10   auto output0 = model->addOperand(&type2);
11   // Phase 2, operations
12   model->addOperation(ANEURALNETWORKS_TILE, {input0, multipliers}, {output0});
13   // Phase 3, inputs and outputs
14   model->identifyInputsAndOutputs(
15     {input0, multipliers},
16     {output0});
17   assert(model->isValid());
18 }
19 
is_ignored(int i)20 inline bool is_ignored(int i) {
21   static std::set<int> ignore = {};
22   return ignore.find(i) != ignore.end();
23 }
24 
CreateModel_relaxed(Model * model)25 void CreateModel_relaxed(Model *model) {
26   OperandType type0(Type::TENSOR_FLOAT32, {3});
27   OperandType type1(Type::TENSOR_INT32, {1});
28   OperandType type2(Type::TENSOR_FLOAT32, {6});
29   // Phase 1, operands
30   auto input0 = model->addOperand(&type0);
31   auto multipliers = model->addOperand(&type1);
32   auto output0 = model->addOperand(&type2);
33   // Phase 2, operations
34   model->addOperation(ANEURALNETWORKS_TILE, {input0, multipliers}, {output0});
35   // Phase 3, inputs and outputs
36   model->identifyInputsAndOutputs(
37     {input0, multipliers},
38     {output0});
39   // Phase 4: set relaxed execution
40   model->relaxComputationFloat32toFloat16(true);
41   assert(model->isValid());
42 }
43 
is_ignored_relaxed(int i)44 inline bool is_ignored_relaxed(int i) {
45   static std::set<int> ignore = {};
46   return ignore.find(i) != ignore.end();
47 }
48 
CreateModel_float16(Model * model)49 void CreateModel_float16(Model *model) {
50   OperandType type1(Type::TENSOR_INT32, {1});
51   OperandType type3(Type::TENSOR_FLOAT16, {3});
52   OperandType type4(Type::TENSOR_FLOAT16, {6});
53   // Phase 1, operands
54   auto input0 = model->addOperand(&type3);
55   auto multipliers = model->addOperand(&type1);
56   auto output0 = model->addOperand(&type4);
57   // Phase 2, operations
58   model->addOperation(ANEURALNETWORKS_TILE, {input0, multipliers}, {output0});
59   // Phase 3, inputs and outputs
60   model->identifyInputsAndOutputs(
61     {input0, multipliers},
62     {output0});
63   assert(model->isValid());
64 }
65 
is_ignored_float16(int i)66 inline bool is_ignored_float16(int i) {
67   static std::set<int> ignore = {};
68   return ignore.find(i) != ignore.end();
69 }
70 
CreateModel_quant8(Model * model)71 void CreateModel_quant8(Model *model) {
72   OperandType type1(Type::TENSOR_INT32, {1});
73   OperandType type5(Type::TENSOR_QUANT8_ASYMM, {3}, 0.5f, 127);
74   OperandType type6(Type::TENSOR_QUANT8_ASYMM, {6}, 0.5f, 127);
75   // Phase 1, operands
76   auto input0 = model->addOperand(&type5);
77   auto multipliers = model->addOperand(&type1);
78   auto output0 = model->addOperand(&type6);
79   // Phase 2, operations
80   model->addOperation(ANEURALNETWORKS_TILE, {input0, multipliers}, {output0});
81   // Phase 3, inputs and outputs
82   model->identifyInputsAndOutputs(
83     {input0, multipliers},
84     {output0});
85   assert(model->isValid());
86 }
87 
is_ignored_quant8(int i)88 inline bool is_ignored_quant8(int i) {
89   static std::set<int> ignore = {};
90   return ignore.find(i) != ignore.end();
91 }
92 
CreateModel_dynamic_output_shape(Model * model)93 void CreateModel_dynamic_output_shape(Model *model) {
94   OperandType type0(Type::TENSOR_FLOAT32, {3});
95   OperandType type1(Type::TENSOR_INT32, {1});
96   OperandType type7(Type::TENSOR_FLOAT32, {0});
97   // Phase 1, operands
98   auto input0 = model->addOperand(&type0);
99   auto multipliers = model->addOperand(&type1);
100   auto output0 = model->addOperand(&type7);
101   // Phase 2, operations
102   model->addOperation(ANEURALNETWORKS_TILE, {input0, multipliers}, {output0});
103   // Phase 3, inputs and outputs
104   model->identifyInputsAndOutputs(
105     {input0, multipliers},
106     {output0});
107   assert(model->isValid());
108 }
109 
is_ignored_dynamic_output_shape(int i)110 inline bool is_ignored_dynamic_output_shape(int i) {
111   static std::set<int> ignore = {};
112   return ignore.find(i) != ignore.end();
113 }
114 
CreateModel_dynamic_output_shape_relaxed(Model * model)115 void CreateModel_dynamic_output_shape_relaxed(Model *model) {
116   OperandType type0(Type::TENSOR_FLOAT32, {3});
117   OperandType type1(Type::TENSOR_INT32, {1});
118   OperandType type7(Type::TENSOR_FLOAT32, {0});
119   // Phase 1, operands
120   auto input0 = model->addOperand(&type0);
121   auto multipliers = model->addOperand(&type1);
122   auto output0 = model->addOperand(&type7);
123   // Phase 2, operations
124   model->addOperation(ANEURALNETWORKS_TILE, {input0, multipliers}, {output0});
125   // Phase 3, inputs and outputs
126   model->identifyInputsAndOutputs(
127     {input0, multipliers},
128     {output0});
129   // Phase 4: set relaxed execution
130   model->relaxComputationFloat32toFloat16(true);
131   assert(model->isValid());
132 }
133 
is_ignored_dynamic_output_shape_relaxed(int i)134 inline bool is_ignored_dynamic_output_shape_relaxed(int i) {
135   static std::set<int> ignore = {};
136   return ignore.find(i) != ignore.end();
137 }
138 
CreateModel_dynamic_output_shape_float16(Model * model)139 void CreateModel_dynamic_output_shape_float16(Model *model) {
140   OperandType type1(Type::TENSOR_INT32, {1});
141   OperandType type3(Type::TENSOR_FLOAT16, {3});
142   OperandType type8(Type::TENSOR_FLOAT16, {0});
143   // Phase 1, operands
144   auto input0 = model->addOperand(&type3);
145   auto multipliers = model->addOperand(&type1);
146   auto output0 = model->addOperand(&type8);
147   // Phase 2, operations
148   model->addOperation(ANEURALNETWORKS_TILE, {input0, multipliers}, {output0});
149   // Phase 3, inputs and outputs
150   model->identifyInputsAndOutputs(
151     {input0, multipliers},
152     {output0});
153   assert(model->isValid());
154 }
155 
is_ignored_dynamic_output_shape_float16(int i)156 inline bool is_ignored_dynamic_output_shape_float16(int i) {
157   static std::set<int> ignore = {};
158   return ignore.find(i) != ignore.end();
159 }
160 
CreateModel_dynamic_output_shape_quant8(Model * model)161 void CreateModel_dynamic_output_shape_quant8(Model *model) {
162   OperandType type1(Type::TENSOR_INT32, {1});
163   OperandType type5(Type::TENSOR_QUANT8_ASYMM, {3}, 0.5f, 127);
164   OperandType type9(Type::TENSOR_QUANT8_ASYMM, {0}, 0.5f, 127);
165   // Phase 1, operands
166   auto input0 = model->addOperand(&type5);
167   auto multipliers = model->addOperand(&type1);
168   auto output0 = model->addOperand(&type9);
169   // Phase 2, operations
170   model->addOperation(ANEURALNETWORKS_TILE, {input0, multipliers}, {output0});
171   // Phase 3, inputs and outputs
172   model->identifyInputsAndOutputs(
173     {input0, multipliers},
174     {output0});
175   assert(model->isValid());
176 }
177 
is_ignored_dynamic_output_shape_quant8(int i)178 inline bool is_ignored_dynamic_output_shape_quant8(int i) {
179   static std::set<int> ignore = {};
180   return ignore.find(i) != ignore.end();
181 }
182 
183