Home
last modified time | relevance | path

Searched refs:i3 (Results 1 – 25 of 112) sorted by relevance

12345

/packages/modules/NeuralNetworks/runtime/test/specs/V1_3/
Dbox_with_nms_limit_quant8_signed.mod.py20 i3 = Input("batchSplit", "TENSOR_INT32", "{19}") # batchSplit variable
26 model = Model().Operation("BOX_WITH_NMS_LIMIT", i1, i2, i3, 0.3, -1, 2, 0.4, 0.5, 0.3).To(o1, o2, o…
78 i3: [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] # batch split
117 i3 = Input("batchSplit", "TENSOR_INT32", "{19}") # batchSplit variable
123 model = Model().Operation("BOX_WITH_NMS_LIMIT", i1, i2, i3, 0.3, 5, 2, 0.4, 0.5, 0.3).To(o1, o2, o3…
175 i3: [1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3] # batch split
206 i3 = Input("batchSplit", "TENSOR_INT32", "{19}") # batchSplit variable
212 model = Model().Operation("BOX_WITH_NMS_LIMIT", i1, i2, i3, 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, o2, o…
264 i3: [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] # batch split
294 i3 = Input("batchSplit", "TENSOR_INT32", "{19}") # batchSplit variable
[all …]
Dgenerate_proposals_quant8_signed.mod.py22 i3 = Input("anchors", "TENSOR_FLOAT32", "{2, 4}") # anchors variable
28 i1, i2, i3, i4, 4.0, 4.0, -1, -1, 0.30, 1.0, layout).To(o1, o2, o3)
33 i3: ("TENSOR_QUANT16_SYMM", 0.125, 0),
50 i3: [0, 1, 4, 3, 1, 0, 3, 4], # anchors
72 i3 = Input("anchors", "TENSOR_FLOAT32", "{4, 4}") # anchors variable
78 i1, i2, i3, i4, 10.0, 10.0, 32, 16, 0.20, 1.0, layout).To(o1, o2, o3)
83 i3: ("TENSOR_QUANT16_SYMM", 0.125, 0),
158 i3: [ # anchors
Dmax_pool_quant8_signed.mod.py22 i3 = Output("op3", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 2, 2, 1}, 0.5f, -128") # output 0 variable
23 … model.Operation("MAX_POOL_2D", i1, pad0, pad0, pad0, pad0, cons1, cons1, cons1, cons1, act).To(i3)
27 output0 = {i3: # output 0
118 i3 = Output("op3", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 1, 2, 1}, 0.0625f, -128") # output 0 variable
119 model = model.Operation("MAX_POOL_2D", i1, pad_same, cons2, cons2, cons2, cons2, act_none).To(i3)
123 output0 = {i3: # output 0
191 i3 = Input("op1", ("TENSOR_FLOAT32", [bat, row, col, chn])) variable
193 Model().Operation("MAX_POOL_2D", i3, pad, pad, pad, pad, std, std, flt, flt, 3, layout).To(o3)
197 i3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, -128),
203 i3: [x % std + 1 for x in range(bat * row * col * chn)],
[all …]
Dmul_quant8_signed.mod.py21 i3 = Output("op3", "TENSOR_QUANT8_ASYMM_SIGNED", "{2, 2}, 2.0, -128") variable
22 model = model.Operation("MUL", i1, i2, act).To(i3)
30 output0 = {i3: # output 0
42 i3 = Output("op3", "TENSOR_QUANT8_ASYMM_SIGNED", "{2}, 2.0, -128") variable
43 model = model.Operation("MUL", i1, i2, act).To(i3)
51 output0 = {i3: # output 0
Dadd_quant8_signed.mod.py21 i3 = Output("op3", "TENSOR_QUANT8_ASYMM_SIGNED", "{2}, 1.0, 0") variable
22 model = model.Operation("ADD", i1, i2, act).To(i3)
30 output0 = {i3: # output 0
42 i3 = Output("op3", "TENSOR_QUANT8_ASYMM_SIGNED", "{2, 2}, 1.0, 0") variable
43 model = model.Operation("ADD", i1, i2, act).To(i3)
51 output0 = {i3: # output 0
Ddepthwise_conv2d_quant8_signed.mod.py201 i3 = Input("op1", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 3, 3, 2}, 0.5f, 0") variable
207 Model("layout").Operation("DEPTHWISE_CONV_2D", i3, f3, b3, 0, 0, 0, 0, 1, 1, 2, 0, layout).To(o3)
211 i3: [1, 2] * 9,
214 }).AddNchw(i3, o3, layout)
443 i3 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 2}") variable
447 Model("large").Operation("DEPTHWISE_CONV_2D", i3, f3, b3, 0, 0, 0, 0, 1, 1, 1, 0, layout).To(o3)
451 i3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -28),
457 i3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, 0),
465 i3: [10, 21, 10, 22, 10, 23, 10, 24],
467 }).AddNchw(i3, o3, layout).AddVariations(quant8_signed, includeDefault=False)
Dgrouped_conv2d_quant8_signed.mod.py105 i3 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 9}") # input 0 variable
109 Model("channel").Operation("GROUPED_CONV_2D", i3, w3, b3, 1, 1, 1, 3, 0, layout).To(o3)
113 i3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -128),
120 i3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -128),
127 i3: [1, 2, 3, 4, 55, 4, 3, 2, 1,
135 }).AddNchw(i3, o3, layout).AddVariations(quant8_signed, channelQuant8_signed, includeDefault=False)
Droi_pooling_quant8_signed.mod.py116 i3 = Input("in", "TENSOR_FLOAT32", "{4, 4, 4, 1}") variable
119 Model().Operation("ROI_POOLING", i3, roi3, [2, 2, 2, 2, 2], 2, 2, 2.0, 1.0, layout).To(o3)
122 i3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, 0),
129 i3: [
152 }).AddNchw(i3, o3, layout).AddVariations(quant8_signed, includeDefault=False)
Ddepth_to_space_quant8_signed.mod.py107 i3 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 8}") variable
109 Model().Operation("DEPTH_TO_SPACE", i3, 2, layout).To(o3)
113 i3: ("TENSOR_QUANT8_ASYMM_SIGNED", 1.0, 0),
119 i3: [10, 20, 11, 21, 14, 24, 15, 25,
127 }).AddNchw(i3, o3, layout).AddVariations(quant8_signed, includeDefault=False)
Dspace_to_depth_quant8_signed.mod.py104 i3 = Input("op1", "TENSOR_FLOAT32", "{1, 4, 4, 2}") variable
106 Model().Operation("SPACE_TO_DEPTH", i3, 2, layout).To(o3)
110 i3: ("TENSOR_QUANT8_ASYMM_SIGNED", 1.0, -128),
116 i3: [10, 20, 11, 21, 12, 22, 13, 23,
124 }).AddNchw(i3, o3, layout).AddVariations(quant8_signed, includeDefault=False)
Dconv2d_quant8_signed.mod.py123 i3 = Input("op1", "TENSOR_FLOAT32", "{1, 6, 6, 1}") variable
127 Model().Operation("CONV_2D", i3, f3, b3, 1, 2, 2, 0, layout, 3, 3).To(o3)
131 i3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -128),
139 i3: [0, 0, 0, 0, 0, 0,
146 }).AddNchw(i3, o3, layout).AddVariations(quant8_signed, includeDefault=False)
266 i3 = Input("op1", "TENSOR_FLOAT32", "{1, 1, 1, 3}") variable
270 Model("channel").Operation("CONV_2D", i3, f3, b3, 0, 0, 0, 0, 1, 1, 0, layout).To(o3)
274 i3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -128),
280 i3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -128),
288 i3: [5., 5., 5.],
[all …]
/packages/modules/NeuralNetworks/runtime/test/specs/V1_2/
Ddetection_postprocess.mod.py20 i3 = Input("anchors", "TENSOR_FLOAT32", "{6, 4}") # anchors variable
26 Model("regular").Operation("DETECTION_POSTPROCESSING", i1, i2, i3, 10.0, 10.0, 5.0, 5.0, True, 3, 1…
45 i3: [ # six anchors in center-size encoding
71 i3 = Input("anchors", "TENSOR_FLOAT32", "{6, 4}") # anchors variable
77 Model().Operation("DETECTION_POSTPROCESSING", i1, i2, i3, 10.0, 10.0, 5.0, 5.0, False, 3, 1, 1, 0.0…
96 i3: [ # six anchors in center-size encoding
122 i3 = Input("anchors", "TENSOR_FLOAT32", "{6, 4}") # anchors variable
128 Model().Operation("DETECTION_POSTPROCESSING", i1, i2, i3, 10.0, 10.0, 5.0, 5.0, False, 3, 1, 1, 0.0…
147 i3: [ # six anchors in center-size encoding
173 i3 = Input("anchors", "TENSOR_FLOAT32", "{6, 4}") # anchors variable
[all …]
Dgenerate_proposals.mod.py23 i3 = Input("anchors", "TENSOR_FLOAT32", "{2, 4}") # anchors variable
29 i1, i2, i3, i4, 4.0, 4.0, -1, -1, 0.30, 1.0, layout).To(o1, o2, o3)
34 i3: ("TENSOR_QUANT16_SYMM", 0.125, 0),
51 i3: [0, 1, 4, 3, 1, 0, 3, 4], # anchors
72 i3 = Input("anchors", "TENSOR_FLOAT32", "{4, 4}") # anchors variable
78 i1, i2, i3, i4, 10.0, 10.0, 32, 16, 0.20, 1.0, layout).To(o1, o2, o3)
83 i3: ("TENSOR_QUANT16_SYMM", 0.125, 0),
158 i3: [ # anchors
Dbox_with_nms_limit_hard.mod.py20 i3 = Input("batchSplit", "TENSOR_INT32", "{19}") # batchSplit variable
26 model = Model().Operation("BOX_WITH_NMS_LIMIT", i1, i2, i3, 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, o2, o…
78 i3: [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] # batch split
107 i3 = Input("batchSplit", "TENSOR_INT32", "{19}") # batchSplit variable
113 model = Model().Operation("BOX_WITH_NMS_LIMIT", i1, i2, i3, 0.3, 5, 0, 0.4, 0.5, 0.3).To(o1, o2, o3…
165 i3: [1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3] # batch split
Dbox_with_nms_limit_linear.mod.py20 i3 = Input("batchSplit", "TENSOR_INT32", "{19}") # batchSplit variable
26 model = Model().Operation("BOX_WITH_NMS_LIMIT", i1, i2, i3, 0.3, -1, 1, 0.4, 1.0, 0.3).To(o1, o2, o…
78 i3: [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] # batch split
114 i3 = Input("batchSplit", "TENSOR_INT32", "{19}") # batchSplit variable
120 model = Model().Operation("BOX_WITH_NMS_LIMIT", i1, i2, i3, 0.3, 8, 1, 0.4, 0.5, 0.3).To(o1, o2, o3…
172 i3: [1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3] # batch split
Dbox_with_nms_limit_gaussian.mod.py20 i3 = Input("batchSplit", "TENSOR_INT32", "{19}") # batchSplit variable
26 model = Model().Operation("BOX_WITH_NMS_LIMIT", i1, i2, i3, 0.3, -1, 2, 0.4, 0.5, 0.3).To(o1, o2, o…
78 i3: [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] # batch split
116 i3 = Input("batchSplit", "TENSOR_INT32", "{19}") # batchSplit variable
122 model = Model().Operation("BOX_WITH_NMS_LIMIT", i1, i2, i3, 0.3, 5, 2, 0.4, 0.5, 0.3).To(o1, o2, o3…
174 i3: [1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3] # batch split
Dadd_v1_2.mod.py22 i3 = Output("op3", "TENSOR_FLOAT16", "{3}") variable
23 model = model.Operation("ADD", i1, i2, act).To(i3)
32 output0 = {i3: # output 0
44 i3 = Output("op3", "TENSOR_FLOAT16", "{2, 2}") variable
45 model = model.Operation("ADD", i1, i2, act).To(i3)
53 output0 = {i3: # output 0
Dmul_v1_2.mod.py22 i3 = Output("op3", "TENSOR_FLOAT16", "{3}") variable
23 model = model.Operation("MUL", i1, i2, act).To(i3)
32 output0 = {i3: # output 0
44 i3 = Output("op3", "TENSOR_FLOAT16", "{2, 2}") variable
45 model = model.Operation("MUL", i1, i2, act).To(i3)
53 output0 = {i3: # output 0
Ddiv_v1_2.mod.py22 i3 = Output("op3", "TENSOR_FLOAT16", "{3}") variable
23 model = model.Operation("DIV", i1, i2, act).To(i3)
32 output0 = {i3: # output 0
44 i3 = Output("op3", "TENSOR_FLOAT16", "{2, 2}") variable
45 model = model.Operation("DIV", i1, i2, act).To(i3)
53 output0 = {i3: # output 0
Dgrouped_conv2d.mod.py105 i3 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 9}") # input 0 variable
109 Model("channel").Operation("GROUPED_CONV_2D", i3, w3, b3, 1, 1, 1, 3, 0, layout).To(o3)
113 i3: ("TENSOR_QUANT8_ASYMM", 0.5, 0),
120 i3: ("TENSOR_QUANT8_ASYMM", 0.5, 0),
127 i3: [1, 2, 3, 4, 55, 4, 3, 2, 1,
135 }).AddNchw(i3, o3, layout).AddVariations("relaxed", quant8, channelQuant8, "float16")
Ddepth_to_space_v1_2.mod.py56 i3 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 8}") variable
58 Model().Operation("DEPTH_TO_SPACE", i3, 2, layout).To(o3)
62 i3: ("TENSOR_QUANT8_ASYMM", 1.0, 0),
68 i3: [10, 20, 11, 21, 14, 24, 15, 25,
76 }).AddNchw(i3, o3, layout).AddVariations("relaxed", "float16", quant8)
Dspace_to_depth_v1_2.mod.py56 i3 = Input("op1", "TENSOR_FLOAT32", "{1, 4, 4, 2}") variable
58 Model().Operation("SPACE_TO_DEPTH", i3, 2, layout).To(o3)
62 i3: ("TENSOR_QUANT8_ASYMM", 1.0, 0),
68 i3: [10, 20, 11, 21, 12, 22, 13, 23,
76 }).AddNchw(i3, o3, layout).AddVariations("relaxed", "float16", quant8)
Droi_pooling.mod.py116 i3 = Input("in", "TENSOR_FLOAT32", "{4, 4, 4, 1}") variable
119 Model().Operation("ROI_POOLING", i3, roi3, [2, 2, 2, 2, 2], 2, 2, 2.0, 1.0, layout).To(o3)
122 i3: ("TENSOR_QUANT8_ASYMM", 0.25, 128),
129 i3: [
152 }).AddNchw(i3, o3, layout).AddVariations("relaxed", quant8, "float16")
Dspace_to_batch_v1_2.mod.py57 i3 = Input("op1", "TENSOR_FLOAT32", "{1, 5, 2, 1}") variable
60 Model().Operation("SPACE_TO_BATCH_ND", i3, [3, 2], pad3, layout).To(o3)
64 i3: ("TENSOR_QUANT8_ASYMM", 0.5, 128),
70 i3: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
73 }).AddNchw(i3, o3, layout).AddVariations("relaxed", "float16", quant8)
Ddepthwise_conv2d_v1_2.mod.py87 i3 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 2}") variable
91 Model("large").Operation("DEPTHWISE_CONV_2D", i3, f3, b3, 0, 0, 0, 0, 1, 1, 1, 0, layout).To(o3)
95 i3: ("TENSOR_QUANT8_ASYMM", 0.5, 100),
101 i3: ("TENSOR_QUANT8_ASYMM", 0.5, 128),
109 i3: [10, 21, 10, 22, 10, 23, 10, 24],
111 }).AddNchw(i3, o3, layout).AddVariations("relaxed", "float16", quant8, channelQuant8)

12345