Home
last modified time | relevance | path

Searched refs:weights (Results 1 – 25 of 2023) sorted by relevance

12345678910>>...81

/external/tensorflow/tensorflow/python/ops/
Dbincount_ops.py37 weights=None, argument
137 if weights is not None:
138 weights = ops.convert_to_tensor(weights, name="weights")
139 return gen_math_ops.unsorted_segment_sum(weights, arr, output_size)
140 weights = constant_op.constant([], dtype)
141 return gen_math_ops.bincount(arr, output_size, weights)
145 if weights is not None:
146 if not isinstance(weights, sparse_tensor.SparseTensor):
147 weights = ragged_tensor.convert_to_tensor_or_ragged_tensor(
148 weights, name="weights")
[all …]
Dmetrics_impl.py88 def _remove_squeezable_dimensions(predictions, labels, weights): argument
117 if weights is None:
120 weights = ops.convert_to_tensor(weights)
121 weights_shape = weights.get_shape()
124 return predictions, labels, weights
131 weights = array_ops.squeeze(weights, [-1])
133 weights = array_ops.expand_dims(weights, [-1])
136 weights_rank_tensor = array_ops.rank(weights)
142 lambda: array_ops.expand_dims(weights, [-1]), lambda: weights)
147 maybe_squeeze_weights = lambda: weights
[all …]
Dbincount_ops_test.py157 weights=None, argument
161 weights=weights,
353 weights=None, argument
356 w_sparse = sparse_ops.from_dense(weights) if weights is not None else None
359 weights=w_sparse,
500 weights=None, argument
503 w = ragged_factory_ops.constant(weights) if weights is not None else None
506 weights=w,
557 np_out = np.bincount(inp_vals, minlength=size, weights=weight_vals)
686 weights = ragged_factory_ops.constant([[], [], [.1, .2, .3], [],
[all …]
/external/tensorflow/tensorflow/python/kernel_tests/
Dweights_broadcast_test.py41 def _test_valid(self, weights, values): argument
43 weights=weights, values=values)
47 weights=weights_placeholder, values=values_placeholder)
51 weights_placeholder: weights,
57 self._test_valid(weights=5, values=_test_values((3, 2, 4)))
62 weights=np.asarray((5,)).reshape((1, 1, 1)),
68 weights=np.asarray((5, 7, 11, 3)).reshape((1, 1, 4)),
74 weights=np.asarray((5, 11)).reshape((1, 2, 1)),
80 weights=np.asarray((5, 7, 11, 3, 2, 13, 7, 5)).reshape((1, 2, 4)),
86 weights=np.asarray((5, 7, 11)).reshape((3, 1, 1)),
[all …]
Dlosses_test.py53 self._predictions, self._predictions, weights=None)
66 weights = 2.3
67 loss = losses.absolute_difference(self._labels, self._predictions, weights)
69 self.assertAlmostEqual(5.5 * weights, self.evaluate(loss), 3)
72 weights = 2.3
74 constant_op.constant(weights))
76 self.assertAlmostEqual(5.5 * weights, self.evaluate(loss), 3)
79 weights = constant_op.constant((1.2, 0.0), shape=(2, 1))
80 loss = losses.absolute_difference(self._labels, self._predictions, weights)
85 weights = constant_op.constant([1.2, 0.0], shape=[2, 1])
[all …]
/external/tensorflow/tensorflow/python/keras/saving/
Dhdf5_format.py89 if len(model.weights) != len(model._undeduplicated_weights):
232 weights,
250 def convert_nested_bidirectional(weights): argument
262 num_weights_per_layer = len(weights) // 2
264 layer.forward_layer, weights[:num_weights_per_layer],
267 layer.backward_layer, weights[num_weights_per_layer:],
271 def convert_nested_time_distributed(weights): argument
284 layer.layer, weights, original_keras_version, original_backend)
286 def convert_nested_model(weights): argument
298 trainable_weights = weights[:len(layer.trainable_weights)]
[all …]
/external/tensorflow/tensorflow/lite/delegates/gpu/common/task/
Dweights_conversion.h37 const tflite::gpu::Tensor<OHWI, S>& weights, int out_group_size, in RearrangeWeightsToOHWIOGroupI4O4() argument
39 const int dst_slices = DivideRoundUp(weights.shape.o, 4); in RearrangeWeightsToOHWIOGroupI4O4()
40 const int src_slices = DivideRoundUp(weights.shape.i, 4); in RearrangeWeightsToOHWIOGroupI4O4()
45 for (int y = 0; y < weights.shape.h; ++y) { in RearrangeWeightsToOHWIOGroupI4O4()
46 for (int x = 0; x < weights.shape.w; ++x) { in RearrangeWeightsToOHWIOGroupI4O4()
54 if (s_ch < weights.shape.i && d_ch < weights.shape.o) { in RearrangeWeightsToOHWIOGroupI4O4()
56 weights.shape.LinearIndex({d_ch, y, x, s_ch}); in RearrangeWeightsToOHWIOGroupI4O4()
57 filter[i] = weights.data[f_index]; in RearrangeWeightsToOHWIOGroupI4O4()
73 const tflite::gpu::Tensor<OHWI, S>& weights, int out_group_size, in RearrangeWeightsToOHWIOGroupO4I4() argument
75 const int dst_slices = DivideRoundUp(weights.shape.o, 4); in RearrangeWeightsToOHWIOGroupO4I4()
[all …]
/external/tensorflow/tensorflow/lite/delegates/gpu/common/tasks/
Dconv_weights_converter_test_util.cc30 const Tensor<OHWI, DataType::FLOAT32>& weights, in ConvolutionWeightsConverterTest() argument
36 BHWC(weights.shape.o, weights.shape.h, weights.shape.w, weights.shape.i); in ConvolutionWeightsConverterTest()
39 for (int o = 0; o < weights.shape.o; ++o) { in ConvolutionWeightsConverterTest()
40 for (int y = 0; y < weights.shape.h; ++y) { in ConvolutionWeightsConverterTest()
41 for (int x = 0; x < weights.shape.w; ++x) { in ConvolutionWeightsConverterTest()
42 for (int i = 0; i < weights.shape.i; ++i) { in ConvolutionWeightsConverterTest()
43 const int f_index = weights.shape.LinearIndex({o, y, x, i}); in ConvolutionWeightsConverterTest()
45 src_tensor.data[s_index] = weights.data[f_index]; in ConvolutionWeightsConverterTest()
52 GetTotalElementsCountForLayout(weight_desc, weights.shape); in ConvolutionWeightsConverterTest()
56 RearrangeWeights(weights, weight_desc, weights_type, in ConvolutionWeightsConverterTest()
[all …]
Dconv_constants.h35 const tflite::gpu::Tensor<OHWI, S>& weights, absl::Span<T> dst) { in RearrangeWeightsForConvConstants() argument
36 const int dst_depth = DivideRoundUp(weights.shape.o, 4); in RearrangeWeightsForConvConstants()
37 const int src_depth = DivideRoundUp(weights.shape.i, 4); in RearrangeWeightsForConvConstants()
38 const int kernel_x = weights.shape.w; in RearrangeWeightsForConvConstants()
39 const int kernel_y = weights.shape.h; in RearrangeWeightsForConvConstants()
46 const int channels_count = std::min(4, weights.shape.i - s * 4); in RearrangeWeightsForConvConstants()
52 if (s_ch < weights.shape.i && d_ch < weights.shape.o) { in RearrangeWeightsForConvConstants()
54 weights.shape.LinearIndex({d_ch, y, x, s_ch}); in RearrangeWeightsForConvConstants()
55 filters[j][i] = weights.data[f_index]; in RearrangeWeightsForConvConstants()
72 const tflite::gpu::Tensor<OHWI, S>& weights, absl::Span<T> dst) { in RearrangeWeightsForConvConstantsDot() argument
[all …]
Ddepthwise_conv.h37 void RearrangeWeightsForDWConv2D(const tflite::gpu::Tensor<OHWI, S>& weights, in RearrangeWeightsForDWConv2D() argument
39 const int dst_channels = weights.shape.i * weights.shape.o; in RearrangeWeightsForDWConv2D()
41 const int kernel_x = weights.shape.w; in RearrangeWeightsForDWConv2D()
42 const int kernel_y = weights.shape.h; in RearrangeWeightsForDWConv2D()
52 const int f_index = weights.shape.LinearIndex( in RearrangeWeightsForDWConv2D()
53 {d_ch % weights.shape.o, y, x, d_ch / weights.shape.o}); in RearrangeWeightsForDWConv2D()
54 filter_val[i] = weights.data[f_index]; in RearrangeWeightsForDWConv2D()
66 void UploadWeightsForDWConv2D(const tflite::gpu::Tensor<OHWI, T>& weights, in UploadWeightsForDWConv2D() argument
70 const int dst_channels = weights.shape.i * weights.shape.o; in UploadWeightsForDWConv2D()
72 const int kernel_x = weights.shape.w; in UploadWeightsForDWConv2D()
[all …]
Dconvolution_transposed_thin.h55 void UploadData(const tflite::gpu::Tensor<OHWI, T>& weights,
59 void RearrangeWeightsData(const tflite::gpu::Tensor<OHWI, S>& weights,
68 const tflite::gpu::Tensor<OHWI, T>& weights, in UploadData() argument
70 const int src_depth = DivideRoundUp(weights.shape.i, 4); in UploadData()
72 weights.shape.w * weights.shape.h * src_depth * weights.shape.o; in UploadData()
86 RearrangeWeightsData(weights, absl::MakeSpan(gpu_data, flt4_count)); in UploadData()
88 for (int i = 0; i < weights.shape.o; ++i) { in UploadData()
94 RearrangeWeightsData(weights, absl::MakeSpan(gpu_data, flt4_count)); in UploadData()
96 for (int i = 0; i < weights.shape.o; ++i) { in UploadData()
108 const tflite::gpu::Tensor<OHWI, S>& weights, absl::Span<T> dst) { in RearrangeWeightsData() argument
[all …]
/external/tensorflow/tensorflow/python/ops/losses/
Dlosses_impl.py91 def _num_present(losses, weights, per_batch=False): argument
113 if ((isinstance(weights, float) and weights != 0.0) or
114 (context.executing_eagerly() and weights._rank() == 0 # pylint: disable=protected-access
115 and not math_ops.equal(weights, 0.0))):
117 with ops.name_scope(None, "num_present", (losses, weights)) as scope:
118 weights = math_ops.cast(weights, dtype=dtypes.float32)
120 math_ops.equal(weights, 0.0),
121 array_ops.zeros_like(weights),
122 array_ops.ones_like(weights))
142 losses, weights=1.0, scope=None, loss_collection=ops.GraphKeys.LOSSES, argument
[all …]
/external/tensorflow/tensorflow/lite/delegates/gpu/common/transformations/
Dfuse_mul_to_conv.cc180 for (int d = 0; d < attr->weights.shape.o; ++d) { in FuseConvolution2DWithMultiply()
182 for (int s = 0; s < attr->weights.shape.i; ++s) { in FuseConvolution2DWithMultiply()
183 for (int k_y = 0; k_y < attr->weights.shape.h; ++k_y) { in FuseConvolution2DWithMultiply()
184 for (int k_x = 0; k_x < attr->weights.shape.w; ++k_x) { in FuseConvolution2DWithMultiply()
185 const int index = attr->weights.shape.LinearIndex({{d, k_y, k_x, s}}); in FuseConvolution2DWithMultiply()
186 attr->weights.data[index] *= multiplier; in FuseConvolution2DWithMultiply()
201 for (int g = 0; g < attr->weights.shape.o; ++g) { in FuseDepthwiseConvolution2DWithMultiply()
202 for (int s = 0; s < attr->weights.shape.i; ++s) { in FuseDepthwiseConvolution2DWithMultiply()
203 const int d = s * attr->weights.shape.o + g; in FuseDepthwiseConvolution2DWithMultiply()
205 for (int k_y = 0; k_y < attr->weights.shape.h; ++k_y) { in FuseDepthwiseConvolution2DWithMultiply()
[all …]
/external/libgav1/libgav1/src/dsp/arm/
Ddistance_weighted_blend_neon.cc41 const int16x4_t weights[2]) { in ComputeWeightedAverage8()
43 const int32x4_t wpred0_lo = vmull_s16(weights[0], vget_low_s16(pred0)); in ComputeWeightedAverage8()
44 const int32x4_t wpred0_hi = vmull_s16(weights[0], vget_high_s16(pred0)); in ComputeWeightedAverage8()
46 vmlal_s16(wpred0_lo, weights[1], vget_low_s16(pred1)); in ComputeWeightedAverage8()
48 vmlal_s16(wpred0_hi, weights[1], vget_high_s16(pred1)); in ComputeWeightedAverage8()
57 const int16x4_t weights[2], in DistanceWeightedBlendSmall_NEON()
68 const int16x8_t res0 = ComputeWeightedAverage8(src_00, src_10, weights); in DistanceWeightedBlendSmall_NEON()
74 const int16x8_t res1 = ComputeWeightedAverage8(src_01, src_11, weights); in DistanceWeightedBlendSmall_NEON()
99 const int16x4_t weights[2], in DistanceWeightedBlendLarge_NEON()
112 ComputeWeightedAverage8(src0_lo, src1_lo, weights); in DistanceWeightedBlendLarge_NEON()
[all …]
/external/tensorflow/tensorflow/lite/delegates/gpu/gl/kernels/
Dtranspose_conv_test.cc46 Tensor<OHWI, DataType::FLOAT32> weights; in TEST() local
47 weights.shape = OHWI(2, 2, 1, 1); in TEST()
48 weights.id = 2; in TEST()
49 weights.data = {1, 2, 3, 4}; in TEST()
50 attr.weights = std::move(weights); in TEST()
85 Tensor<OHWI, DataType::FLOAT32> weights; in TEST() local
86 weights.shape = OHWI(1, 2, 2, 1); in TEST()
87 weights.id = 2; in TEST()
88 weights.data = {1, 2, 3, 4}; in TEST()
89 attr.weights = std::move(weights); in TEST()
[all …]
Dconv_test.cc46 Tensor<OHWI, DataType::FLOAT32> weights; in TEST() local
47 weights.shape = OHWI(2, 2, 1, 1); in TEST()
48 weights.id = 2; in TEST()
49 weights.data = {1, 2, 3, 4}; in TEST()
50 attr.weights = std::move(weights); in TEST()
84 Tensor<OHWI, DataType::FLOAT32> weights; in TEST() local
85 weights.shape = OHWI(1, 2, 2, 1); in TEST()
86 weights.id = 2; in TEST()
87 weights.data = {1, 2, 3, 4}; in TEST()
88 attr.weights = std::move(weights); in TEST()
[all …]
/external/rnnoise/training/
Ddump_rnn.py37 weights = layer.get_weights()
39 if len(weights) > 2:
40 ft.write('{} {} '.format(weights[0].shape[0], weights[0].shape[1]/3))
42 ft.write('{} {} '.format(weights[0].shape[0], weights[0].shape[1]))
49 printVector(f, ft, weights[0], layer.name + '_weights')
50 if len(weights) > 2:
51 printVector(f, ft, weights[1], layer.name + '_recurrent_weights')
52 printVector(f, ft, weights[-1], layer.name + '_bias')
54 if len(weights) > 2:
56 … .format(name, name, name, name, weights[0].shape[0], weights[0].shape[1]/3, activation))
[all …]
/external/XNNPACK/src/f32-dwconv2d-chw/gen/
D5x5s2p2-minmax-scalar-1x1-acc4.c20 const float* weights, in xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__scalar_1x1_acc4() argument
35 const float vbias = weights[0]; in xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__scalar_1x1_acc4()
36 const float vk00 = weights[1]; in xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__scalar_1x1_acc4()
37 const float vk01 = weights[2]; in xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__scalar_1x1_acc4()
38 const float vk02 = weights[3]; in xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__scalar_1x1_acc4()
39 const float vk03 = weights[4]; in xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__scalar_1x1_acc4()
40 const float vk04 = weights[5]; in xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__scalar_1x1_acc4()
41 const float vk10 = weights[6]; in xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__scalar_1x1_acc4()
42 const float vk11 = weights[7]; in xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__scalar_1x1_acc4()
43 const float vk12 = weights[8]; in xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__scalar_1x1_acc4()
[all …]
D5x5s2p2-minmax-scalar-1x1.c20 const float* weights, in xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__scalar_1x1() argument
35 const float vbias = weights[0]; in xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__scalar_1x1()
36 const float vk00 = weights[1]; in xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__scalar_1x1()
37 const float vk01 = weights[2]; in xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__scalar_1x1()
38 const float vk02 = weights[3]; in xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__scalar_1x1()
39 const float vk03 = weights[4]; in xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__scalar_1x1()
40 const float vk04 = weights[5]; in xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__scalar_1x1()
41 const float vk10 = weights[6]; in xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__scalar_1x1()
42 const float vk11 = weights[7]; in xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__scalar_1x1()
43 const float vk12 = weights[8]; in xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__scalar_1x1()
[all …]
D5x5p2-minmax-scalar-1x1-acc4.c20 const float* weights, in xnn_f32_dwconv2d_chw_ukernel_5x5p2__scalar_1x1_acc4() argument
34 const float vbias = weights[0]; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__scalar_1x1_acc4()
35 const float vk00 = weights[1]; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__scalar_1x1_acc4()
36 const float vk01 = weights[2]; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__scalar_1x1_acc4()
37 const float vk02 = weights[3]; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__scalar_1x1_acc4()
38 const float vk03 = weights[4]; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__scalar_1x1_acc4()
39 const float vk04 = weights[5]; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__scalar_1x1_acc4()
40 const float vk10 = weights[6]; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__scalar_1x1_acc4()
41 const float vk11 = weights[7]; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__scalar_1x1_acc4()
42 const float vk12 = weights[8]; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__scalar_1x1_acc4()
[all …]
D5x5s2p2-minmax-scalar-1x1-acc2.c20 const float* weights, in xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__scalar_1x1_acc2() argument
35 const float vbias = weights[0]; in xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__scalar_1x1_acc2()
36 const float vk00 = weights[1]; in xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__scalar_1x1_acc2()
37 const float vk01 = weights[2]; in xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__scalar_1x1_acc2()
38 const float vk02 = weights[3]; in xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__scalar_1x1_acc2()
39 const float vk03 = weights[4]; in xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__scalar_1x1_acc2()
40 const float vk04 = weights[5]; in xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__scalar_1x1_acc2()
41 const float vk10 = weights[6]; in xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__scalar_1x1_acc2()
42 const float vk11 = weights[7]; in xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__scalar_1x1_acc2()
43 const float vk12 = weights[8]; in xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__scalar_1x1_acc2()
[all …]
D5x5p2-minmax-scalar-1x1-acc3.c20 const float* weights, in xnn_f32_dwconv2d_chw_ukernel_5x5p2__scalar_1x1_acc3() argument
34 const float vbias = weights[0]; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__scalar_1x1_acc3()
35 const float vk00 = weights[1]; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__scalar_1x1_acc3()
36 const float vk01 = weights[2]; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__scalar_1x1_acc3()
37 const float vk02 = weights[3]; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__scalar_1x1_acc3()
38 const float vk03 = weights[4]; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__scalar_1x1_acc3()
39 const float vk04 = weights[5]; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__scalar_1x1_acc3()
40 const float vk10 = weights[6]; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__scalar_1x1_acc3()
41 const float vk11 = weights[7]; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__scalar_1x1_acc3()
42 const float vk12 = weights[8]; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__scalar_1x1_acc3()
[all …]
D5x5p2-minmax-scalar-1x1.c20 const float* weights, in xnn_f32_dwconv2d_chw_ukernel_5x5p2__scalar_1x1() argument
34 const float vbias = weights[0]; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__scalar_1x1()
35 const float vk00 = weights[1]; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__scalar_1x1()
36 const float vk01 = weights[2]; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__scalar_1x1()
37 const float vk02 = weights[3]; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__scalar_1x1()
38 const float vk03 = weights[4]; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__scalar_1x1()
39 const float vk04 = weights[5]; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__scalar_1x1()
40 const float vk10 = weights[6]; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__scalar_1x1()
41 const float vk11 = weights[7]; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__scalar_1x1()
42 const float vk12 = weights[8]; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__scalar_1x1()
[all …]
D5x5s2p2-minmax-scalar-1x1-acc3.c20 const float* weights, in xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__scalar_1x1_acc3() argument
35 const float vbias = weights[0]; in xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__scalar_1x1_acc3()
36 const float vk00 = weights[1]; in xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__scalar_1x1_acc3()
37 const float vk01 = weights[2]; in xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__scalar_1x1_acc3()
38 const float vk02 = weights[3]; in xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__scalar_1x1_acc3()
39 const float vk03 = weights[4]; in xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__scalar_1x1_acc3()
40 const float vk04 = weights[5]; in xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__scalar_1x1_acc3()
41 const float vk10 = weights[6]; in xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__scalar_1x1_acc3()
42 const float vk11 = weights[7]; in xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__scalar_1x1_acc3()
43 const float vk12 = weights[8]; in xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__scalar_1x1_acc3()
[all …]
D5x5p2-minmax-scalar-1x1-acc2.c20 const float* weights, in xnn_f32_dwconv2d_chw_ukernel_5x5p2__scalar_1x1_acc2() argument
34 const float vbias = weights[0]; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__scalar_1x1_acc2()
35 const float vk00 = weights[1]; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__scalar_1x1_acc2()
36 const float vk01 = weights[2]; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__scalar_1x1_acc2()
37 const float vk02 = weights[3]; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__scalar_1x1_acc2()
38 const float vk03 = weights[4]; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__scalar_1x1_acc2()
39 const float vk04 = weights[5]; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__scalar_1x1_acc2()
40 const float vk10 = weights[6]; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__scalar_1x1_acc2()
41 const float vk11 = weights[7]; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__scalar_1x1_acc2()
42 const float vk12 = weights[8]; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__scalar_1x1_acc2()
[all …]

12345678910>>...81