1 // clang-format off
2 // Generated file (from: split_float_2.mod.py). Do not edit
CreateModel(Model * model)3 void CreateModel(Model *model) {
4 OperandType type0(Type::TENSOR_FLOAT32, {2, 3});
5 OperandType type1(Type::INT32, {});
6 OperandType type2(Type::TENSOR_FLOAT32, {1, 3});
7 // Phase 1, operands
8 auto input0 = model->addOperand(&type0);
9 auto axis = model->addOperand(&type1);
10 auto num_splits = model->addOperand(&type1);
11 auto output0 = model->addOperand(&type2);
12 auto output1 = model->addOperand(&type2);
13 // Phase 2, operations
14 static int32_t axis_init[] = {0};
15 model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
16 static int32_t num_splits_init[] = {2};
17 model->setOperandValue(num_splits, num_splits_init, sizeof(int32_t) * 1);
18 model->addOperation(ANEURALNETWORKS_SPLIT, {input0, axis, num_splits}, {output0, output1});
19 // Phase 3, inputs and outputs
20 model->identifyInputsAndOutputs(
21 {input0},
22 {output0, output1});
23 assert(model->isValid());
24 }
25
is_ignored(int i)26 inline bool is_ignored(int i) {
27 static std::set<int> ignore = {};
28 return ignore.find(i) != ignore.end();
29 }
30
CreateModel_relaxed(Model * model)31 void CreateModel_relaxed(Model *model) {
32 OperandType type0(Type::TENSOR_FLOAT32, {2, 3});
33 OperandType type1(Type::INT32, {});
34 OperandType type2(Type::TENSOR_FLOAT32, {1, 3});
35 // Phase 1, operands
36 auto input0 = model->addOperand(&type0);
37 auto axis = model->addOperand(&type1);
38 auto num_splits = model->addOperand(&type1);
39 auto output0 = model->addOperand(&type2);
40 auto output1 = model->addOperand(&type2);
41 // Phase 2, operations
42 static int32_t axis_init[] = {0};
43 model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
44 static int32_t num_splits_init[] = {2};
45 model->setOperandValue(num_splits, num_splits_init, sizeof(int32_t) * 1);
46 model->addOperation(ANEURALNETWORKS_SPLIT, {input0, axis, num_splits}, {output0, output1});
47 // Phase 3, inputs and outputs
48 model->identifyInputsAndOutputs(
49 {input0},
50 {output0, output1});
51 // Phase 4: set relaxed execution
52 model->relaxComputationFloat32toFloat16(true);
53 assert(model->isValid());
54 }
55
is_ignored_relaxed(int i)56 inline bool is_ignored_relaxed(int i) {
57 static std::set<int> ignore = {};
58 return ignore.find(i) != ignore.end();
59 }
60
CreateModel_float16(Model * model)61 void CreateModel_float16(Model *model) {
62 OperandType type1(Type::INT32, {});
63 OperandType type3(Type::TENSOR_FLOAT16, {2, 3});
64 OperandType type4(Type::TENSOR_FLOAT16, {1, 3});
65 // Phase 1, operands
66 auto input0 = model->addOperand(&type3);
67 auto axis = model->addOperand(&type1);
68 auto num_splits = model->addOperand(&type1);
69 auto output0 = model->addOperand(&type4);
70 auto output1 = model->addOperand(&type4);
71 // Phase 2, operations
72 static int32_t axis_init[] = {0};
73 model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
74 static int32_t num_splits_init[] = {2};
75 model->setOperandValue(num_splits, num_splits_init, sizeof(int32_t) * 1);
76 model->addOperation(ANEURALNETWORKS_SPLIT, {input0, axis, num_splits}, {output0, output1});
77 // Phase 3, inputs and outputs
78 model->identifyInputsAndOutputs(
79 {input0},
80 {output0, output1});
81 assert(model->isValid());
82 }
83
is_ignored_float16(int i)84 inline bool is_ignored_float16(int i) {
85 static std::set<int> ignore = {};
86 return ignore.find(i) != ignore.end();
87 }
88
CreateModel_dynamic_output_shape(Model * model)89 void CreateModel_dynamic_output_shape(Model *model) {
90 OperandType type0(Type::TENSOR_FLOAT32, {2, 3});
91 OperandType type1(Type::INT32, {});
92 OperandType type5(Type::TENSOR_FLOAT32, {0, 0});
93 // Phase 1, operands
94 auto input0 = model->addOperand(&type0);
95 auto axis = model->addOperand(&type1);
96 auto num_splits = model->addOperand(&type1);
97 auto output0 = model->addOperand(&type5);
98 auto output1 = model->addOperand(&type5);
99 // Phase 2, operations
100 static int32_t axis_init[] = {0};
101 model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
102 static int32_t num_splits_init[] = {2};
103 model->setOperandValue(num_splits, num_splits_init, sizeof(int32_t) * 1);
104 model->addOperation(ANEURALNETWORKS_SPLIT, {input0, axis, num_splits}, {output0, output1});
105 // Phase 3, inputs and outputs
106 model->identifyInputsAndOutputs(
107 {input0},
108 {output0, output1});
109 assert(model->isValid());
110 }
111
is_ignored_dynamic_output_shape(int i)112 inline bool is_ignored_dynamic_output_shape(int i) {
113 static std::set<int> ignore = {};
114 return ignore.find(i) != ignore.end();
115 }
116
CreateModel_dynamic_output_shape_relaxed(Model * model)117 void CreateModel_dynamic_output_shape_relaxed(Model *model) {
118 OperandType type0(Type::TENSOR_FLOAT32, {2, 3});
119 OperandType type1(Type::INT32, {});
120 OperandType type5(Type::TENSOR_FLOAT32, {0, 0});
121 // Phase 1, operands
122 auto input0 = model->addOperand(&type0);
123 auto axis = model->addOperand(&type1);
124 auto num_splits = model->addOperand(&type1);
125 auto output0 = model->addOperand(&type5);
126 auto output1 = model->addOperand(&type5);
127 // Phase 2, operations
128 static int32_t axis_init[] = {0};
129 model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
130 static int32_t num_splits_init[] = {2};
131 model->setOperandValue(num_splits, num_splits_init, sizeof(int32_t) * 1);
132 model->addOperation(ANEURALNETWORKS_SPLIT, {input0, axis, num_splits}, {output0, output1});
133 // Phase 3, inputs and outputs
134 model->identifyInputsAndOutputs(
135 {input0},
136 {output0, output1});
137 // Phase 4: set relaxed execution
138 model->relaxComputationFloat32toFloat16(true);
139 assert(model->isValid());
140 }
141
is_ignored_dynamic_output_shape_relaxed(int i)142 inline bool is_ignored_dynamic_output_shape_relaxed(int i) {
143 static std::set<int> ignore = {};
144 return ignore.find(i) != ignore.end();
145 }
146
CreateModel_dynamic_output_shape_float16(Model * model)147 void CreateModel_dynamic_output_shape_float16(Model *model) {
148 OperandType type1(Type::INT32, {});
149 OperandType type3(Type::TENSOR_FLOAT16, {2, 3});
150 OperandType type6(Type::TENSOR_FLOAT16, {0, 0});
151 // Phase 1, operands
152 auto input0 = model->addOperand(&type3);
153 auto axis = model->addOperand(&type1);
154 auto num_splits = model->addOperand(&type1);
155 auto output0 = model->addOperand(&type6);
156 auto output1 = model->addOperand(&type6);
157 // Phase 2, operations
158 static int32_t axis_init[] = {0};
159 model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
160 static int32_t num_splits_init[] = {2};
161 model->setOperandValue(num_splits, num_splits_init, sizeof(int32_t) * 1);
162 model->addOperation(ANEURALNETWORKS_SPLIT, {input0, axis, num_splits}, {output0, output1});
163 // Phase 3, inputs and outputs
164 model->identifyInputsAndOutputs(
165 {input0},
166 {output0, output1});
167 assert(model->isValid());
168 }
169
is_ignored_dynamic_output_shape_float16(int i)170 inline bool is_ignored_dynamic_output_shape_float16(int i) {
171 static std::set<int> ignore = {};
172 return ignore.find(i) != ignore.end();
173 }
174
175