1 // Generated file (from: depthwise_conv2d_quant8_2.mod.py). Do not edit
CreateModel(Model * model)2 void CreateModel(Model *model) {
3 OperandType type3(Type::INT32, {});
4 OperandType type2(Type::TENSOR_INT32, {4}, 0.25f, 0);
5 OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 1, 4}, 1.f, 127);
6 OperandType type1(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 4}, 0.5f, 127);
7 OperandType type0(Type::TENSOR_QUANT8_ASYMM, {1, 3, 2, 2}, 0.5f, 127);
8 // Phase 1, operands
9 auto op1 = model->addOperand(&type0);
10 auto op2 = model->addOperand(&type1);
11 auto op3 = model->addOperand(&type2);
12 auto pad_valid = model->addOperand(&type3);
13 auto act_none = model->addOperand(&type3);
14 auto stride = model->addOperand(&type3);
15 auto channelMultiplier = model->addOperand(&type3);
16 auto op4 = model->addOperand(&type4);
17 // Phase 2, operations
18 static uint8_t op2_init[] = {129, 131, 133, 135, 109, 147, 105, 151, 137, 139, 141, 143, 153, 99, 157, 95};
19 model->setOperandValue(op2, op2_init, sizeof(uint8_t) * 16);
20 static int32_t op3_init[] = {4, 8, 12, 16};
21 model->setOperandValue(op3, op3_init, sizeof(int32_t) * 4);
22 static int32_t pad_valid_init[] = {2};
23 model->setOperandValue(pad_valid, pad_valid_init, sizeof(int32_t) * 1);
24 static int32_t act_none_init[] = {0};
25 model->setOperandValue(act_none, act_none_init, sizeof(int32_t) * 1);
26 static int32_t stride_init[] = {1};
27 model->setOperandValue(stride, stride_init, sizeof(int32_t) * 1);
28 static int32_t channelMultiplier_init[] = {2};
29 model->setOperandValue(channelMultiplier, channelMultiplier_init, sizeof(int32_t) * 1);
30 model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op1, op2, op3, pad_valid, stride, stride, channelMultiplier, act_none}, {op4});
31 // Phase 3, inputs and outputs
32 model->identifyInputsAndOutputs(
33 {op1},
34 {op4});
35 assert(model->isValid());
36 }
37
is_ignored(int i)38 bool is_ignored(int i) {
39 static std::set<int> ignore = {};
40 return ignore.find(i) != ignore.end();
41 }
42