1 // clang-format off
2 // Generated file (from: lsh_projection_float16.mod.py). Do not edit
CreateModel(Model * model)3 void CreateModel(Model *model) {
4 OperandType type0(Type::TENSOR_FLOAT16, {4, 2});
5 OperandType type1(Type::TENSOR_INT32, {3, 2});
6 OperandType type2(Type::TENSOR_FLOAT16, {3});
7 OperandType type3(Type::INT32, {});
8 OperandType type4(Type::TENSOR_INT32, {8});
9 // Phase 1, operands
10 auto hash = model->addOperand(&type0);
11 auto lookup = model->addOperand(&type1);
12 auto weight = model->addOperand(&type2);
13 auto type_param = model->addOperand(&type3);
14 auto output = model->addOperand(&type4);
15 // Phase 2, operations
16 static _Float16 hash_init[] = {0.123f, 0.456f, -0.321f, -0.654f, 1.234f, 5.678f, -4.321f, -8.765f};
17 model->setOperandValue(hash, hash_init, sizeof(_Float16) * 8);
18 static int32_t type_param_init[] = {2};
19 model->setOperandValue(type_param, type_param_init, sizeof(int32_t) * 1);
20 model->addOperation(ANEURALNETWORKS_LSH_PROJECTION, {hash, lookup, weight, type_param}, {output});
21 // Phase 3, inputs and outputs
22 model->identifyInputsAndOutputs(
23 {lookup, weight},
24 {output});
25 assert(model->isValid());
26 }
27
is_ignored(int i)28 inline bool is_ignored(int i) {
29 static std::set<int> ignore = {};
30 return ignore.find(i) != ignore.end();
31 }
32
CreateModel_float16(Model * model)33 void CreateModel_float16(Model *model) {
34 OperandType type0(Type::TENSOR_FLOAT16, {4, 2});
35 OperandType type1(Type::TENSOR_INT32, {3, 2});
36 OperandType type2(Type::TENSOR_FLOAT16, {3});
37 OperandType type3(Type::INT32, {});
38 OperandType type4(Type::TENSOR_INT32, {8});
39 // Phase 1, operands
40 auto hash = model->addOperand(&type0);
41 auto lookup = model->addOperand(&type1);
42 auto weight = model->addOperand(&type2);
43 auto type_param = model->addOperand(&type3);
44 auto output = model->addOperand(&type4);
45 // Phase 2, operations
46 static _Float16 hash_init[] = {0.123f, 0.456f, -0.321f, -0.654f, 1.234f, 5.678f, -4.321f, -8.765f};
47 model->setOperandValue(hash, hash_init, sizeof(_Float16) * 8);
48 static int32_t type_param_init[] = {2};
49 model->setOperandValue(type_param, type_param_init, sizeof(int32_t) * 1);
50 model->addOperation(ANEURALNETWORKS_LSH_PROJECTION, {hash, lookup, weight, type_param}, {output});
51 // Phase 3, inputs and outputs
52 model->identifyInputsAndOutputs(
53 {lookup, weight},
54 {output});
55 assert(model->isValid());
56 }
57
is_ignored_float16(int i)58 inline bool is_ignored_float16(int i) {
59 static std::set<int> ignore = {};
60 return ignore.find(i) != ignore.end();
61 }
62
CreateModel_dynamic_output_shape(Model * model)63 void CreateModel_dynamic_output_shape(Model *model) {
64 OperandType type0(Type::TENSOR_FLOAT16, {4, 2});
65 OperandType type1(Type::TENSOR_INT32, {3, 2});
66 OperandType type2(Type::TENSOR_FLOAT16, {3});
67 OperandType type3(Type::INT32, {});
68 OperandType type5(Type::TENSOR_INT32, {0});
69 // Phase 1, operands
70 auto hash = model->addOperand(&type0);
71 auto lookup = model->addOperand(&type1);
72 auto weight = model->addOperand(&type2);
73 auto type_param = model->addOperand(&type3);
74 auto output = model->addOperand(&type5);
75 // Phase 2, operations
76 static _Float16 hash_init[] = {0.123f, 0.456f, -0.321f, -0.654f, 1.234f, 5.678f, -4.321f, -8.765f};
77 model->setOperandValue(hash, hash_init, sizeof(_Float16) * 8);
78 static int32_t type_param_init[] = {2};
79 model->setOperandValue(type_param, type_param_init, sizeof(int32_t) * 1);
80 model->addOperation(ANEURALNETWORKS_LSH_PROJECTION, {hash, lookup, weight, type_param}, {output});
81 // Phase 3, inputs and outputs
82 model->identifyInputsAndOutputs(
83 {lookup, weight},
84 {output});
85 assert(model->isValid());
86 }
87
is_ignored_dynamic_output_shape(int i)88 inline bool is_ignored_dynamic_output_shape(int i) {
89 static std::set<int> ignore = {};
90 return ignore.find(i) != ignore.end();
91 }
92
CreateModel_dynamic_output_shape_float16(Model * model)93 void CreateModel_dynamic_output_shape_float16(Model *model) {
94 OperandType type0(Type::TENSOR_FLOAT16, {4, 2});
95 OperandType type1(Type::TENSOR_INT32, {3, 2});
96 OperandType type2(Type::TENSOR_FLOAT16, {3});
97 OperandType type3(Type::INT32, {});
98 OperandType type5(Type::TENSOR_INT32, {0});
99 // Phase 1, operands
100 auto hash = model->addOperand(&type0);
101 auto lookup = model->addOperand(&type1);
102 auto weight = model->addOperand(&type2);
103 auto type_param = model->addOperand(&type3);
104 auto output = model->addOperand(&type5);
105 // Phase 2, operations
106 static _Float16 hash_init[] = {0.123f, 0.456f, -0.321f, -0.654f, 1.234f, 5.678f, -4.321f, -8.765f};
107 model->setOperandValue(hash, hash_init, sizeof(_Float16) * 8);
108 static int32_t type_param_init[] = {2};
109 model->setOperandValue(type_param, type_param_init, sizeof(int32_t) * 1);
110 model->addOperation(ANEURALNETWORKS_LSH_PROJECTION, {hash, lookup, weight, type_param}, {output});
111 // Phase 3, inputs and outputs
112 model->identifyInputsAndOutputs(
113 {lookup, weight},
114 {output});
115 assert(model->isValid());
116 }
117
is_ignored_dynamic_output_shape_float16(int i)118 inline bool is_ignored_dynamic_output_shape_float16(int i) {
119 static std::set<int> ignore = {};
120 return ignore.find(i) != ignore.end();
121 }
122
123