Home
last modified time | relevance | path

Searched refs:o1 (Results 1 – 25 of 128) sorted by relevance

123456

/packages/modules/NeuralNetworks/runtime/test/specs/V1_2/
Dresize_nearest_neighbor.mod.py21 o1 = Output("out", "TENSOR_FLOAT32", "{1, 1, 1, 1}") # output 0 variable
22 model_shape = Model("shape").Operation("RESIZE_NEAREST_NEIGHBOR", i1, 1, 1, layout).To(o1)
23 model_scale = Model("scale").Operation("RESIZE_NEAREST_NEIGHBOR", i1, 0.5, 0.5, layout).To(o1)
28 o1: ("TENSOR_QUANT8_ASYMM", 0.25, 128)
33 o1: [1]
36 Example(test1, model=model_shape).AddNchw(i1, o1, layout).AddVariations("relaxed", quant8, "float16…
37 Example(test1, model=model_scale).AddNchw(i1, o1, layout).AddVariations("relaxed", quant8, "float16…
42 o1 = Output("out", "TENSOR_FLOAT32", "{1, 3, 3, 1}") # output 0 variable
43 model_shape = Model("shape").Operation("RESIZE_NEAREST_NEIGHBOR", i1, 3, 3, layout).To(o1)
44 model_scale = Model("scale").Operation("RESIZE_NEAREST_NEIGHBOR", i1, 1.5, 1.5, layout).To(o1)
[all …]
Dresize_bilinear_v1_2.mod.py21 o1 = Output("op4", "TENSOR_FLOAT32", "{1, 3, 3, 1}") variable
22 model_shape = Model("shape").Operation("RESIZE_BILINEAR", i1, 3, 3, layout).To(o1)
23 model_scale = Model("scale").Operation("RESIZE_BILINEAR", i1, 1.5, 1.5, layout).To(o1)
28 o1: ("TENSOR_QUANT8_ASYMM", 0.01, 0)
33 o1: [1.0, 1.0, 1.0,
39 Example(test1, model=model_shape).AddNchw(i1, o1, layout).AddVariations("relaxed", "float16", quant…
40 Example(test1, model=model_scale).AddNchw(i1, o1, layout).AddVariations("relaxed", "float16", quant…
96 o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out variable
100 …d").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
114 o1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
[all …]
Dtranspose_conv2d.mod.py25 o1 = Output("op4", "TENSOR_FLOAT32", "{1, 5, 5, 2}") # output variable
26 Model().Operation("TRANSPOSE_CONV_2D", i1, w1, b1, s1, 2, 2, 2, act, layout).To(o1)
33 o1: ("TENSOR_QUANT8_ASYMM", 0.5, 0)
40 o1: ("TENSOR_QUANT8_ASYMM", 0.1, 80)
48 o1: ("TENSOR_QUANT8_ASYMM", 0.5, 80)
55 o1: ("TENSOR_QUANT8_ASYMM", 0.1, 80)
60 o1: [-0.5, 0, 1.5, 2, 5.5, 8, 4.5, 6, 8.5, 10,
65 }).AddNchw(i1, o1, s1, layout).AddAllActivations(o1, act).AddVariations("relaxed", quant8, quant8_m…
183 o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out variable
187 …ed").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
[all …]
Ddetection_postprocess.mod.py22 o1 = Output("scoresOut", "TENSOR_FLOAT32", "{1, 3}") # scores out variable
26 …STPROCESSING", i1, i2, i3, 10.0, 10.0, 5.0, 5.0, True, 3, 1, 1, 0.0, 0.5, False).To(o1, o2, o3, o4)
56 o1: [0.95, 0.93, 0.0],
73 o1 = Output("scoresOut", "TENSOR_FLOAT32", "{1, 3}") # scores out variable
77 …TPROCESSING", i1, i2, i3, 10.0, 10.0, 5.0, 5.0, False, 3, 1, 1, 0.0, 0.5, False).To(o1, o2, o3, o4)
107 o1: [0.95, 0.9, 0.3],
124 o1 = Output("scoresOut", "TENSOR_FLOAT32", "{1, 3}") # scores out variable
128 …TPROCESSING", i1, i2, i3, 10.0, 10.0, 5.0, 5.0, False, 3, 1, 1, 0.0, 0.5, False).To(o1, o2, o3, o4)
158 o1: [0.95, 0.9, 0.3],
175 o1 = Output("scoresOut", "TENSOR_FLOAT32", "{1, 3}") # scores out variable
[all …]
Dmax_pool_v1_2.mod.py21 o1 = Output("op4", "TENSOR_FLOAT32", "{1, 2, 2, 1}") variable
22 Model().Operation("MAX_POOL_2D", i1, 0, 0, 0, 0, 1, 1, 1, 1, 0, layout).To(o1)
27 o1: ("TENSOR_QUANT8_ASYMM", 0.5, 0)
33 o1: [1.0, 2.0, 3.0, 4.0]
34 }).AddNchw(i1, o1, layout).AddVariations("relaxed", quant8, "float16")
116 o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out variable
120 …d").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
134 o1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
143 o1: [],
154 o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out variable
[all …]
Davg_pool_v1_2.mod.py21 o1 = Output("op4", "TENSOR_FLOAT32", "{1, 2, 2, 1}") variable
22 Model().Operation("AVERAGE_POOL_2D", i1, 0, 0, 0, 0, 1, 1, 1, 1, 0, layout).To(o1)
27 o1: ("TENSOR_QUANT8_ASYMM", 0.5, 0)
33 o1: [1.0, 2.0, 3.0, 4.0]
34 }).AddNchw(i1, o1, layout).AddVariations("relaxed", "float16", quant8)
146 o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out variable
150 …ed").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
164 o1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
173 o1: [],
184 o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out variable
[all …]
Ddepthwise_conv2d_dilation.mod.py23 o1 = Output("op4", "TENSOR_FLOAT32", "{1, 2, 2, 4}") variable
24 Model().Operation("DEPTHWISE_CONV_2D", i1, f1, b1, 0, 0, 0, 0, 1, 1, 2, 0, layout, 1, 1).To(o1)
31 o1: ("TENSOR_QUANT8_ASYMM", 0.1, 0)
39 o1: [11, 3, 7.2, 10.6,
43 }).AddNchw(i1, o1, layout).AddVariations("relaxed", "float16", quant8)
78 o1 = Output("op4", "TENSOR_FLOAT32", "{1, 2, 2, 4}") variable
79 Model().Operation("DEPTHWISE_CONV_2D", i1, f1, b1, 2, 1, 1, 2, 0, layout, 1, 1).To(o1)
86 o1: ("TENSOR_QUANT8_ASYMM", 0.1, 0)
94 o1: [11, 3, 7.2, 10.6,
98 }, name="valid_padding").AddNchw(i1, o1, layout).AddVariations("relaxed", "float16", quant8)
Dl2_pool_v1_2.mod.py21 o1 = Output("op4", "TENSOR_FLOAT32", "{1, 2, 2, 1}") variable
22 Model().Operation("L2_POOL_2D", i1, 0, 0, 0, 0, 1, 1, 1, 1, 0, layout).To(o1)
27 o1: [1.0, 2.0, 3.0, 4.0]
28 }).AddNchw(i1, o1, layout).AddVariations("relaxed", "float16")
60 o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out variable
64 …d").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
77 o1: [],
88 o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out variable
92 …d").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
105 o1: [],
Dconv2d_dilation.mod.py23 o1 = Output("op4", "TENSOR_FLOAT32", "{1, 2, 2, 1}") variable
24 Model().Operation("CONV_2D", i1, f1, b1, 0, 0, 0, 0, 1, 1, 0, layout, 1, 1).To(o1)
31 o1: ("TENSOR_QUANT8_ASYMM", 0.125, 0)
37 o1: [.875, .875, .875, .875]
38 }).AddNchw(i1, o1, layout).AddVariations("relaxed", quant8, "float16")
74 o1 = Output("op4", "TENSOR_FLOAT32", "{1, 2, 2, 1}") variable
75 Model().Operation("CONV_2D", i1, f1, b1, 2, 1, 1, 0, layout, 1, 1).To(o1)
82 o1: ("TENSOR_QUANT8_ASYMM", 0.125, 0)
88 o1: [.875, .875, .875, .875]
89 }, name="valid_padding").AddNchw(i1, o1, layout).AddVariations("relaxed", quant8, "float16")
Dconv2d_v1_2.mod.py23 o1 = Output("op4", "TENSOR_FLOAT32", "{1, 2, 2, 1}") variable
24 Model().Operation("CONV_2D", i1, f1, b1, 0, 0, 0, 0, 1, 1, 0, layout).To(o1)
31 o1: ("TENSOR_QUANT8_ASYMM", 0.125, 0)
37 o1: ("TENSOR_QUANT8_ASYMM", 0.125, 0)
43 o1: [.875, .875, .875, .875]
44 }).AddNchw(i1, o1, layout).AddVariations("relaxed", quant8, channelQuant8, "float16")
219 o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out variable
223 …ed").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
239 o1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
250 o1: [],
[all …]
Dbox_with_nms_limit_hard.mod.py22 o1 = Output("scoresOut", "TENSOR_FLOAT32", "{12}") # scores out variable
26 model = Model().Operation("BOX_WITH_NMS_LIMIT", i1, i2, i3, 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, o2, o…
31 o1: ("TENSOR_QUANT8_ASYMM", 0.01, 0),
82 o1: [0.95, 0.85, 0.75, 0.95, 0.7, 0.95, 0.9, 0.85, 0.75, 0.95, 0.8, 0.7],
109 o1 = Output("scoresOut", "TENSOR_FLOAT32", "{10}") # scores out variable
113 model = Model().Operation("BOX_WITH_NMS_LIMIT", i1, i2, i3, 0.3, 5, 0, 0.4, 0.5, 0.3).To(o1, o2, o3…
118 o1: ("TENSOR_QUANT8_ASYMM", 0.01, 128),
169 o1: [0.95, 0.85, 0.75, 0.95, 0.7, 0.95, 0.9, 0.85, 0.95, 0.8],
Dbox_with_nms_limit_linear.mod.py22 o1 = Output("scoresOut", "TENSOR_FLOAT32", "{16}") # scores out variable
26 model = Model().Operation("BOX_WITH_NMS_LIMIT", i1, i2, i3, 0.3, -1, 1, 0.4, 1.0, 0.3).To(o1, o2, o…
31 o1: ("TENSOR_QUANT8_ASYMM", 0.01, 0),
82 o1: [
116 o1 = Output("scoresOut", "TENSOR_FLOAT32", "{15}") # scores out variable
120 model = Model().Operation("BOX_WITH_NMS_LIMIT", i1, i2, i3, 0.3, 8, 1, 0.4, 0.5, 0.3).To(o1, o2, o3…
125 o1: ("TENSOR_QUANT8_ASYMM", 0.01, 128),
176 o1: [
Dbox_with_nms_limit_gaussian.mod.py22 o1 = Output("scoresOut", "TENSOR_FLOAT32", "{18}") # scores out variable
26 model = Model().Operation("BOX_WITH_NMS_LIMIT", i1, i2, i3, 0.3, -1, 2, 0.4, 0.5, 0.3).To(o1, o2, o…
31 o1: ("TENSOR_QUANT8_ASYMM", 0.01, 0),
82 o1: [
118 o1 = Output("scoresOut", "TENSOR_FLOAT32", "{10}") # scores out variable
122 model = Model().Operation("BOX_WITH_NMS_LIMIT", i1, i2, i3, 0.3, 5, 2, 0.4, 0.5, 0.3).To(o1, o2, o3…
127 o1: ("TENSOR_QUANT8_ASYMM", 0.01, 128),
178 o1: [
Dgrouped_conv2d.mod.py24 o1 = Output("op4", "TENSOR_FLOAT32", "{1, 2, 2, 2}") # output 0 variable
25 Model().Operation("GROUPED_CONV_2D", i1, w1, b1, 0, 0, 0, 0, 1, 1, 2, act, layout).To(o1)
32 o1: ("TENSOR_QUANT8_ASYMM", 0.5, 80)
39 o1: ("TENSOR_QUANT8_ASYMM", 0.05, 80)
47 o1: ("TENSOR_QUANT8_ASYMM", 0.5, 80)
54 o1: ("TENSOR_QUANT8_ASYMM", 0.1, 80)
61 o1: [33, -0.5,
65 }).AddNchw(i1, o1, layout).AddAllActivations(o1, act).AddVariations("relaxed", quant8, quant8_mult_…
Dgenerate_proposals.mod.py25 o1 = Output("scoresOut", "TENSOR_FLOAT32", "{4}") # scores out variable
29 i1, i2, i3, i4, 4.0, 4.0, -1, -1, 0.30, 1.0, layout).To(o1, o2, o3)
36 o1: ("TENSOR_QUANT8_ASYMM", 0.01, 100),
56 o1: [0.95, 0.9, 0.85, 0.8], # scores out
74 o1 = Output("scoresOut", "TENSOR_FLOAT32", "{30}") # scores out variable
78 i1, i2, i3, i4, 10.0, 10.0, 32, 16, 0.20, 1.0, layout).To(o1, o2, o3)
85 o1: ("TENSOR_QUANT8_ASYMM", 0.005, 0),
168 o1: [ # scores out
/packages/modules/NeuralNetworks/runtime/test/specs/V1_3/
Dresize_quant8_signed.mod.py21 o1 = Output("op4", "TENSOR_FLOAT32", "{1, 3, 3, 1}") variable
22 model_shape = Model("shape").Operation("RESIZE_BILINEAR", i1, 3, 3, layout).To(o1)
23 model_scale = Model("scale").Operation("RESIZE_BILINEAR", i1, 1.5, 1.5, layout).To(o1)
28 o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.01, -128)
33 o1: [1.0, 1.0, 1.0,
39 Example(test1, model=model_shape).AddNchw(i1, o1, layout).AddVariations(quant8_signed, includeDefau…
40 Example(test1, model=model_scale).AddNchw(i1, o1, layout).AddVariations(quant8_signed, includeDefau…
96 o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out variable
100 …d").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
114 o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
[all …]
Dbox_with_nms_limit_quant8_signed.mod.py22 o1 = Output("scoresOut", "TENSOR_FLOAT32", "{18}") # scores out variable
26 model = Model().Operation("BOX_WITH_NMS_LIMIT", i1, i2, i3, 0.3, -1, 2, 0.4, 0.5, 0.3).To(o1, o2, o…
31 o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.01, -128),
82 o1: [
119 o1 = Output("scoresOut", "TENSOR_FLOAT32", "{10}") # scores out variable
123 model = Model().Operation("BOX_WITH_NMS_LIMIT", i1, i2, i3, 0.3, 5, 2, 0.4, 0.5, 0.3).To(o1, o2, o3…
128 o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.01, 0),
179 o1: [
208 o1 = Output("scoresOut", "TENSOR_FLOAT32", "{12}") # scores out variable
212 model = Model().Operation("BOX_WITH_NMS_LIMIT", i1, i2, i3, 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, o2, o…
[all …]
Dtranspose_quant8_signed.mod.py25 o1 = Output("op4", "TENSOR_FLOAT32", "{25, 32, 32, 16}") # output variable
26 Model().Operation("TRANSPOSE_CONV_2D", i1, w1, b1, s1, 1, 32, 32, act, layout).To(o1)
33 o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -128)
41 o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -48)
46 o1: ([1] * 16 + [0] * (32 * 32 - 1) * 16) * 25
59 o1 = Output("op4", "TENSOR_FLOAT32", "{1, 5, 5, 2}") # output variable
60 Model().Operation("TRANSPOSE_CONV_2D", i1, w1, b1, s1, 2, 2, 2, act, layout).To(o1)
67 o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -128)
74 o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, -48)
82 o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -48)
[all …]
Dtranspose_conv2d_quant8_signed.mod.py25 o1 = Output("op4", "TENSOR_FLOAT32", "{25, 32, 32, 16}") # output variable
26 Model().Operation("TRANSPOSE_CONV_2D", i1, w1, b1, s1, 1, 32, 32, act, layout).To(o1)
33 o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -128)
41 o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -48)
46 o1: ([1] * 16 + [0] * (32 * 32 - 1) * 16) * 25
59 o1 = Output("op4", "TENSOR_FLOAT32", "{1, 5, 5, 2}") # output variable
60 Model().Operation("TRANSPOSE_CONV_2D", i1, w1, b1, s1, 2, 2, 2, act, layout).To(o1)
67 o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -128)
74 o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, -48)
82 o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -48)
[all …]
Dconv2d_quant8_signed.mod.py23 o1 = Output("op4", "TENSOR_FLOAT32", "{1, 2, 2, 1}") variable
24 Model().Operation("CONV_2D", i1, f1, b1, 0, 0, 0, 0, 1, 1, 0, layout, 1, 1).To(o1)
31 o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.125, -128)
37 o1: [.875, .875, .875, .875]
38 }).AddNchw(i1, o1, layout).AddVariations(quant8_signed, includeDefault=False)
74 o1 = Output("op4", "TENSOR_FLOAT32", "{1, 2, 2, 1}") variable
75 Model().Operation("CONV_2D", i1, f1, b1, 2, 1, 1, 0, layout, 1, 1).To(o1)
82 o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.125, -128)
88 o1: [.875, .875, .875, .875]
89 }, name="valid_padding").AddNchw(i1, o1, layout).AddVariations(quant8_signed, includeDefault=False)
[all …]
Dl2_normalization_quant8_signed.mod.py18 o1 = Output("op2", "TENSOR_FLOAT32", "{2, 2, 2, 3}") # output 0 variable
23 o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 1.0 / 128, 0)
35 o1: [0.00, 0.60, 0.80,
46 Model().Operation("L2_NORMALIZATION", i1, axis).To(o1)
47 Example(example0).AddAllDimsAndAxis(i1, o1, axis).AddVariations(quant8_signed, includeDefault=False)
52 o1 = Output("op2", "TENSOR_FLOAT32", "{2, 2, 2, 3}") # output 0 variable
57 o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 1.0 / 128, 0)
69 o1: [0.00, 0.60, 0.80,
80 Model().Operation("L2_NORMALIZATION", i1).To(o1)
81 Example(example0).AddAllDims(i1, o1).AddVariations(quant8_signed, includeDefault=False)
Ddepthwise_conv2d_quant8_signed.mod.py23 o1 = Output("op4", "TENSOR_FLOAT32", "{1, 2, 2, 4}") variable
24 Model().Operation("DEPTHWISE_CONV_2D", i1, f1, b1, 0, 0, 0, 0, 1, 1, 2, 0, layout, 1, 1).To(o1)
31 o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, -128)
39 o1: [11, 3, 7.2, 10.6,
43 }).AddNchw(i1, o1, layout).AddVariations(quant8_signed, includeDefault=False)
80 o1 = Output("op4", "TENSOR_FLOAT32", "{1, 2, 2, 4}") variable
81 Model().Operation("DEPTHWISE_CONV_2D", i1, f1, b1, 2, 1, 1, 2, 0, layout, 1, 1).To(o1)
88 o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, -128)
96 o1: [11, 3, 7.2, 10.6,
100 }, name="valid_padding").AddNchw(i1, o1, layout).AddVariations(quant8_signed, includeDefault=False)
[all …]
Dgrouped_conv2d_quant8_signed.mod.py24 o1 = Output("op4", "TENSOR_FLOAT32", "{1, 2, 2, 2}") # output 0 variable
25 Model().Operation("GROUPED_CONV_2D", i1, w1, b1, 0, 0, 0, 0, 1, 1, 2, act, layout).To(o1)
32 o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -48)
39 o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.05, -48)
47 o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -48)
54 o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, -48)
61 o1: [33, -0.5,
65 }).AddNchw(i1, o1, layout).AddAllActivations(o1, act).AddVariations(quant8_signed, quant8_mult_gt_1…
Dgenerate_proposals_quant8_signed.mod.py24 o1 = Output("scoresOut", "TENSOR_FLOAT32", "{4}") # scores out variable
28 i1, i2, i3, i4, 4.0, 4.0, -1, -1, 0.30, 1.0, layout).To(o1, o2, o3)
35 o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.01, -28),
55 o1: [0.95, 0.9, 0.85, 0.8], # scores out
74 o1 = Output("scoresOut", "TENSOR_FLOAT32", "{30}") # scores out variable
78 i1, i2, i3, i4, 10.0, 10.0, 32, 16, 0.20, 1.0, layout).To(o1, o2, o3)
85 o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.005, -128),
168 o1: [ # scores out
Droi_align_quant8_signed.mod.py22 o1 = Output("out", "TENSOR_FLOAT32", "{4, 2, 2, 1}") variable
23 Model().Operation("ROI_ALIGN", i1, roi1, [0, 0, 0, 0], 2, 2, 2.0, 2.0, 4, 4, layout).To(o1)
28 o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.0625, 0)
45 o1: [
51 }).AddNchw(i1, o1, layout).AddVariations(quant8_signed, includeDefault=False)
219 o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out variable
223 …d").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
233 o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
241 o1: [],

123456