Home
last modified time | relevance | path

Searched refs:weights (Results 1 – 25 of 1208) sorted by relevance

12345678910>>...49

/external/tensorflow/tensorflow/python/kernel_tests/
Dweights_broadcast_test.py41 def _test_valid(self, weights, values): argument
43 weights=weights, values=values)
47 weights=weights_placeholder, values=values_placeholder)
51 weights_placeholder: weights,
57 self._test_valid(weights=5, values=_test_values((3, 2, 4)))
62 weights=np.asarray((5,)).reshape((1, 1, 1)),
68 weights=np.asarray((5, 7, 11, 3)).reshape((1, 1, 4)),
74 weights=np.asarray((5, 11)).reshape((1, 2, 1)),
80 weights=np.asarray((5, 7, 11, 3, 2, 13, 7, 5)).reshape((1, 2, 4)),
86 weights=np.asarray((5, 7, 11)).reshape((3, 1, 1)),
[all …]
Dlosses_test.py53 self._predictions, self._predictions, weights=None)
66 weights = 2.3
67 loss = losses.absolute_difference(self._labels, self._predictions, weights)
69 self.assertAlmostEqual(5.5 * weights, self.evaluate(loss), 3)
72 weights = 2.3
74 constant_op.constant(weights))
76 self.assertAlmostEqual(5.5 * weights, self.evaluate(loss), 3)
79 weights = constant_op.constant((1.2, 0.0), shape=(2, 1))
80 loss = losses.absolute_difference(self._labels, self._predictions, weights)
85 weights = constant_op.constant([1.2, 0.0], shape=[2, 1])
[all …]
/external/tensorflow/tensorflow/python/keras/saving/
Dhdf5_format.py78 if len(model.weights) != len(model._undeduplicated_weights):
215 weights,
233 def convert_nested_bidirectional(weights): argument
245 num_weights_per_layer = len(weights) // 2
247 layer.forward_layer, weights[:num_weights_per_layer],
250 layer.backward_layer, weights[num_weights_per_layer:],
254 def convert_nested_time_distributed(weights): argument
267 layer.layer, weights, original_keras_version, original_backend)
269 def convert_nested_model(weights): argument
281 trainable_weights = weights[:len(layer.trainable_weights)]
[all …]
/external/tensorflow/tensorflow/python/ops/
Dmetrics_impl.py88 def _remove_squeezable_dimensions(predictions, labels, weights): argument
117 if weights is None:
120 weights = ops.convert_to_tensor(weights)
121 weights_shape = weights.get_shape()
124 return predictions, labels, weights
131 weights = array_ops.squeeze(weights, [-1])
133 weights = array_ops.expand_dims(weights, [-1])
136 weights_rank_tensor = array_ops.rank(weights)
142 lambda: array_ops.expand_dims(weights, [-1]), lambda: weights)
147 maybe_squeeze_weights = lambda: weights
[all …]
Dweights_broadcast_ops.py63 def assert_broadcastable(weights, values): argument
81 with ops.name_scope(None, "assert_broadcastable", (weights, values)) as scope:
82 with ops.name_scope(None, "weights", (weights,)) as weights_scope:
83 weights = ops.convert_to_tensor(weights, name=weights_scope)
84 weights_shape = array_ops.shape(weights, name="shape")
85 weights_rank = array_ops.rank(weights, name="rank")
103 weights_rank_static, values.shape, weights.shape))
123 "weights.shape=", weights.name, weights_shape,
136 def broadcast_weights(weights, values): argument
154 with ops.name_scope(None, "broadcast_weights", (weights, values)) as scope:
[all …]
/external/tensorflow/tensorflow/python/ops/losses/
Dlosses_impl.py90 def _num_present(losses, weights, per_batch=False): argument
112 if ((isinstance(weights, float) and weights != 0.0) or
113 (context.executing_eagerly() and weights._rank() == 0 # pylint: disable=protected-access
114 and not math_ops.equal(weights, 0.0))):
116 with ops.name_scope(None, "num_present", (losses, weights)) as scope:
117 weights = math_ops.cast(weights, dtype=dtypes.float32)
119 math_ops.equal(weights, 0.0),
120 array_ops.zeros_like(weights),
121 array_ops.ones_like(weights))
140 losses, weights=1.0, scope=None, loss_collection=ops.GraphKeys.LOSSES, argument
[all …]
/external/tensorflow/tensorflow/lite/delegates/gpu/common/transformations/
Dfuse_mul_to_conv.cc161 for (int d = 0; d < attr->weights.shape.o; ++d) { in FuseConvolution2DWithMultiply()
163 for (int s = 0; s < attr->weights.shape.i; ++s) { in FuseConvolution2DWithMultiply()
164 for (int k_y = 0; k_y < attr->weights.shape.h; ++k_y) { in FuseConvolution2DWithMultiply()
165 for (int k_x = 0; k_x < attr->weights.shape.w; ++k_x) { in FuseConvolution2DWithMultiply()
166 const int index = attr->weights.shape.LinearIndex({{d, k_y, k_x, s}}); in FuseConvolution2DWithMultiply()
167 attr->weights.data[index] *= multiplier; in FuseConvolution2DWithMultiply()
182 for (int g = 0; g < attr->weights.shape.o; ++g) { in FuseDepthwiseConvolution2DWithMultiply()
183 for (int s = 0; s < attr->weights.shape.i; ++s) { in FuseDepthwiseConvolution2DWithMultiply()
184 const int d = s * attr->weights.shape.o + g; in FuseDepthwiseConvolution2DWithMultiply()
186 for (int k_y = 0; k_y < attr->weights.shape.h; ++k_y) { in FuseDepthwiseConvolution2DWithMultiply()
[all …]
Dfuse_mul_to_conv_test.cc41 conv_attr.weights.shape = OHWI(16, 3, 2, 8); in TEST()
42 conv_attr.weights.data.resize(conv_attr.weights.shape.DimensionsProduct()); in TEST()
98 conv_attr.weights.shape = OHWI(16, 3, 2, 8); in TEST()
99 conv_attr.weights.data.resize(conv_attr.weights.shape.DimensionsProduct()); in TEST()
135 attr.weights.shape = OHWI(2, 1, 2, 2); in TEST()
136 attr.weights.data = {0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f}; in TEST()
148 EXPECT_THAT(attr.weights.data, in TEST()
156 attr.weights.shape = OHWI(2, 1, 2, 2); in TEST()
157 attr.weights.data = {0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f}; in TEST()
169 EXPECT_THAT(attr.weights.data, in TEST()
[all …]
Dfuse_add_to_conv.cc155 FuseBiasWithAddAttributes(add_attr, attr->weights.shape.o, &attr->bias); in FuseConvolution2DWithAdd()
161 add_attr, attr->weights.shape.o * attr->weights.shape.i, &attr->bias); in FuseDepthwiseConvolution2DWithAdd()
166 FuseBiasWithAddAttributes(add_attr, attr->weights.shape.o, &attr->bias); in FuseConvolutionTransposedWithAdd()
171 FuseBiasWithAddAttributes(add_attr, attr->weights.shape.o, &attr->bias); in FuseFullyConnectedWithAdd()
180 Linear(attr->weights.shape.o)); in FuseAddWithConvolution2D()
182 for (int d = 0; d < attr->weights.shape.o; ++d) { in FuseAddWithConvolution2D()
183 for (int s = 0; s < attr->weights.shape.i; ++s) { in FuseAddWithConvolution2D()
185 for (int k_y = 0; k_y < attr->weights.shape.h; ++k_y) { in FuseAddWithConvolution2D()
186 for (int k_x = 0; k_x < attr->weights.shape.w; ++k_x) { in FuseAddWithConvolution2D()
187 const int index = attr->weights.shape.LinearIndex({{d, k_y, k_x, s}}); in FuseAddWithConvolution2D()
[all …]
/external/tensorflow/tensorflow/lite/delegates/gpu/cl/kernels/
Dfully_connected_texture.h57 Status UploadWeights(const ::tflite::gpu::Tensor<OHWI, T>& weights,
61 void RearrangeWeightsFP16(const ::tflite::gpu::Tensor<OHWI, T>& weights,
64 void RearrangeWeightsFP32(const ::tflite::gpu::Tensor<OHWI, T>& weights,
75 const ::tflite::gpu::Tensor<OHWI, T>& weights, CLContext* context) { in UploadWeights() argument
76 const int src_depth = AlignByN(IntegralDivideRoundUp(weights.shape.i, 4), 4); in UploadWeights()
77 const int dst_depth = IntegralDivideRoundUp(weights.shape.o, 4); in UploadWeights()
81 RearrangeWeightsFP32(weights, absl::MakeSpan(gpu_data)); in UploadWeights()
86 RearrangeWeightsFP16(weights, absl::MakeSpan(gpu_data)); in UploadWeights()
94 const ::tflite::gpu::Tensor<OHWI, T>& weights, absl::Span<half4> dst) { in RearrangeWeightsFP16() argument
95 const int src_depth = AlignByN(IntegralDivideRoundUp(weights.shape.i, 4), 4); in RearrangeWeightsFP16()
[all …]
Ddepth_wise_conv_3d.h61 Status UploadWeights(const ::tflite::gpu::Tensor<OHWDI, T>& weights,
65 void RearrangeWeightsData(const ::tflite::gpu::Tensor<OHWDI, S>& weights,
89 const ::tflite::gpu::Tensor<OHWDI, T>& weights, CLContext* context) { in UploadWeights() argument
90 const int dst_channels = weights.shape.i * weights.shape.o; in UploadWeights()
92 const int kernel_x = weights.shape.w; in UploadWeights()
93 const int kernel_y = weights.shape.h; in UploadWeights()
94 const int kernel_z = weights.shape.d; in UploadWeights()
103 RearrangeWeightsData(weights, absl::MakeSpan(gpu_data)); in UploadWeights()
115 RearrangeWeightsData(weights, absl::MakeSpan(gpu_data)); in UploadWeights()
131 const ::tflite::gpu::Tensor<OHWDI, S>& weights, absl::Span<T> dst) { in RearrangeWeightsData() argument
[all …]
Dconv_constants.h57 kernel_size_(attr.weights.shape.w, attr.weights.shape.h), in ConvConstants()
61 src_channels_(attr.weights.shape.i), in ConvConstants()
62 dst_channels_(attr.weights.shape.o) {} in ConvConstants()
65 Status UploadWeights(const ::tflite::gpu::Tensor<OHWI, T>& weights,
69 void RearrangeWeightsData(const ::tflite::gpu::Tensor<OHWI, S>& weights,
91 const ::tflite::gpu::Tensor<OHWI, T>& weights, CLContext* context) { in UploadWeights() argument
92 const int dst_depth = IntegralDivideRoundUp(weights.shape.o, 4); in UploadWeights()
93 const int kernel_x = weights.shape.w; in UploadWeights()
94 const int kernel_y = weights.shape.h; in UploadWeights()
102 RearrangeWeightsData(weights, absl::MakeSpan(gpu_data)); in UploadWeights()
[all …]
Ddepth_wise_conv.h60 Status UploadWeights(const ::tflite::gpu::Tensor<OHWI, T>& weights,
64 void RearrangeWeightsData(const ::tflite::gpu::Tensor<OHWI, S>& weights,
88 const ::tflite::gpu::Tensor<OHWI, T>& weights, CLContext* context) { in UploadWeights() argument
89 const int dst_channels = weights.shape.i * weights.shape.o; in UploadWeights()
91 const int kernel_x = weights.shape.w; in UploadWeights()
92 const int kernel_y = weights.shape.h; in UploadWeights()
104 RearrangeWeightsData(weights, absl::MakeSpan(gpu_data)); in UploadWeights()
116 RearrangeWeightsData(weights, absl::MakeSpan(gpu_data)); in UploadWeights()
139 const ::tflite::gpu::Tensor<OHWI, S>& weights, absl::Span<T> dst) { in RearrangeWeightsData() argument
140 const int dst_channels = weights.shape.i * weights.shape.o; in RearrangeWeightsData()
[all …]
/external/tensorflow/tensorflow/lite/delegates/gpu/gl/kernels/
Dconv_test.cc46 Tensor<OHWI, DataType::FLOAT32> weights; in TEST() local
47 weights.shape = OHWI(2, 2, 1, 1); in TEST()
48 weights.id = 2; in TEST()
49 weights.data = {1, 2, 3, 4}; in TEST()
50 attr.weights = std::move(weights); in TEST()
84 Tensor<OHWI, DataType::FLOAT32> weights; in TEST() local
85 weights.shape = OHWI(1, 2, 2, 1); in TEST()
86 weights.id = 2; in TEST()
87 weights.data = {1, 2, 3, 4}; in TEST()
88 attr.weights = std::move(weights); in TEST()
[all …]
Dtranspose_conv_test.cc46 Tensor<OHWI, DataType::FLOAT32> weights; in TEST() local
47 weights.shape = OHWI(2, 2, 1, 1); in TEST()
48 weights.id = 2; in TEST()
49 weights.data = {1, 2, 3, 4}; in TEST()
50 attr.weights = std::move(weights); in TEST()
85 Tensor<OHWI, DataType::FLOAT32> weights; in TEST() local
86 weights.shape = OHWI(1, 2, 2, 1); in TEST()
87 weights.id = 2; in TEST()
88 weights.data = {1, 2, 3, 4}; in TEST()
89 attr.weights = std::move(weights); in TEST()
[all …]
Ddepthwise_conv_test.cc46 Tensor<OHWI, DataType::FLOAT32> weights; in TEST() local
47 weights.shape = OHWI(2, 1, 1, 2); in TEST()
48 weights.id = 2; in TEST()
49 weights.data = {1, 3, 2, 4}; in TEST()
51 attr.weights = std::move(weights); in TEST()
84 Tensor<OHWI, DataType::FLOAT32> weights; in TEST() local
85 weights.shape = OHWI(2, 1, 1, 1); in TEST()
86 weights.id = 1; in TEST()
87 weights.data = {1, 3}; in TEST()
89 attr.weights = std::move(weights); in TEST()
[all …]
Ddepthwise_conv.cc44 auto weights = attr.weights.shape; in GenerateCode() local
45 const int offsets_count = weights.h * weights.w; in GenerateCode()
56 {"kernel_w", weights.w}, in GenerateCode()
57 {"kernel_h", weights.h}, in GenerateCode()
58 {"src_depth", IntegralDivideRoundUp(weights.i, 4)}, in GenerateCode()
59 {"channel_multiplier", weights.o}, in GenerateCode()
64 for (int h = 0; h < weights.h; ++h) { in GenerateCode()
65 for (int w = 0; w < weights.w; ++w) { in GenerateCode()
75 {"src_depth", IntegralDivideRoundUp(weights.i, 4)}, in GenerateCode()
76 {"channel_multiplier", weights.o}, in GenerateCode()
[all …]
/external/XNNPACK/src/f32-dwconv-spchw/
D5x5s2p2-scalar.c16 const float* weights, in xnn_f32_dwconv_spchw_ukernel_5x5s2p2__scalar() argument
43 const float vw0 = weights[0]; in xnn_f32_dwconv_spchw_ukernel_5x5s2p2__scalar()
44 const float vw1 = weights[1]; in xnn_f32_dwconv_spchw_ukernel_5x5s2p2__scalar()
45 const float vw2 = weights[2]; in xnn_f32_dwconv_spchw_ukernel_5x5s2p2__scalar()
46 const float vw3 = weights[3]; in xnn_f32_dwconv_spchw_ukernel_5x5s2p2__scalar()
47 const float vw4 = weights[4]; in xnn_f32_dwconv_spchw_ukernel_5x5s2p2__scalar()
48 const float vw5 = weights[5]; in xnn_f32_dwconv_spchw_ukernel_5x5s2p2__scalar()
49 const float vw6 = weights[6]; in xnn_f32_dwconv_spchw_ukernel_5x5s2p2__scalar()
50 const float vw7 = weights[7]; in xnn_f32_dwconv_spchw_ukernel_5x5s2p2__scalar()
51 const float vw8 = weights[8]; in xnn_f32_dwconv_spchw_ukernel_5x5s2p2__scalar()
[all …]
D5x5p2-scalar.c16 const float* weights, in xnn_f32_dwconv_spchw_ukernel_5x5p2__scalar() argument
43 const float vw0 = weights[0]; in xnn_f32_dwconv_spchw_ukernel_5x5p2__scalar()
44 const float vw1 = weights[1]; in xnn_f32_dwconv_spchw_ukernel_5x5p2__scalar()
45 const float vw2 = weights[2]; in xnn_f32_dwconv_spchw_ukernel_5x5p2__scalar()
46 const float vw3 = weights[3]; in xnn_f32_dwconv_spchw_ukernel_5x5p2__scalar()
47 const float vw4 = weights[4]; in xnn_f32_dwconv_spchw_ukernel_5x5p2__scalar()
48 const float vw5 = weights[5]; in xnn_f32_dwconv_spchw_ukernel_5x5p2__scalar()
49 const float vw6 = weights[6]; in xnn_f32_dwconv_spchw_ukernel_5x5p2__scalar()
50 const float vw7 = weights[7]; in xnn_f32_dwconv_spchw_ukernel_5x5p2__scalar()
51 const float vw8 = weights[8]; in xnn_f32_dwconv_spchw_ukernel_5x5p2__scalar()
[all …]
/external/tensorflow/tensorflow/tools/api/golden/v1/
Dtensorflow.metrics.pbtxt5 …argspec: "args=[\'labels\', \'predictions\', \'weights\', \'metrics_collections\', \'updates_colle…
9 …argspec: "args=[\'labels\', \'predictions\', \'weights\', \'num_thresholds\', \'metrics_collection…
13 …argspec: "args=[\'labels\', \'predictions\', \'k\', \'weights\', \'metrics_collections\', \'update…
17 …argspec: "args=[\'labels\', \'predictions\', \'weights\', \'metrics_collections\', \'updates_colle…
21 …argspec: "args=[\'labels\', \'predictions\', \'thresholds\', \'weights\', \'metrics_collections\',…
25 …argspec: "args=[\'labels\', \'predictions\', \'weights\', \'metrics_collections\', \'updates_colle…
29 …argspec: "args=[\'labels\', \'predictions\', \'thresholds\', \'weights\', \'metrics_collections\',…
33 …argspec: "args=[\'values\', \'weights\', \'metrics_collections\', \'updates_collections\', \'name\…
37 …argspec: "args=[\'labels\', \'predictions\', \'weights\', \'metrics_collections\', \'updates_colle…
41 …argspec: "args=[\'labels\', \'predictions\', \'dim\', \'weights\', \'metrics_collections\', \'upda…
[all …]
/external/libgav1/libgav1/src/dsp/arm/
Ddistance_weighted_blend_neon.cc39 const int16x4_t weights[2]) { in ComputeWeightedAverage8()
41 const int32x4_t wpred0_lo = vmull_s16(weights[0], vget_low_s16(pred0)); in ComputeWeightedAverage8()
42 const int32x4_t wpred0_hi = vmull_s16(weights[0], vget_high_s16(pred0)); in ComputeWeightedAverage8()
44 vmlal_s16(wpred0_lo, weights[1], vget_low_s16(pred1)); in ComputeWeightedAverage8()
46 vmlal_s16(wpred0_hi, weights[1], vget_high_s16(pred1)); in ComputeWeightedAverage8()
55 const int16x4_t weights[2], in DistanceWeightedBlendSmall_NEON()
66 const int16x8_t res0 = ComputeWeightedAverage8(src_00, src_10, weights); in DistanceWeightedBlendSmall_NEON()
72 const int16x8_t res1 = ComputeWeightedAverage8(src_01, src_11, weights); in DistanceWeightedBlendSmall_NEON()
97 const int16x4_t weights[2], in DistanceWeightedBlendLarge_NEON()
110 ComputeWeightedAverage8(src0_lo, src1_lo, weights); in DistanceWeightedBlendLarge_NEON()
[all …]
/external/freetype/src/base/
Dftlcdfil.c80 FT_LcdFiveTapFilter weights ) in ft_lcd_filter_fir() argument
109 fir[2] = weights[2] * val; in ft_lcd_filter_fir()
110 fir[3] = weights[3] * val; in ft_lcd_filter_fir()
111 fir[4] = weights[4] * val; in ft_lcd_filter_fir()
114 fir[1] = fir[2] + weights[1] * val; in ft_lcd_filter_fir()
115 fir[2] = fir[3] + weights[2] * val; in ft_lcd_filter_fir()
116 fir[3] = fir[4] + weights[3] * val; in ft_lcd_filter_fir()
117 fir[4] = weights[4] * val; in ft_lcd_filter_fir()
122 fir[0] = fir[1] + weights[0] * val; in ft_lcd_filter_fir()
123 fir[1] = fir[2] + weights[1] * val; in ft_lcd_filter_fir()
[all …]
/external/tensorflow/tensorflow/python/keras/applications/
Defficientnet.py154 weights='imagenet', argument
211 if not (weights in {'imagenet', None} or os.path.exists(weights)):
217 if weights == 'imagenet' and include_top and classes != 1000:
228 weights=weights)
332 if weights == 'imagenet':
346 elif weights is not None:
347 model.load_weights(weights)
458 weights='imagenet', argument
471 weights=weights,
482 weights='imagenet', argument
[all …]
/external/tensorflow/tensorflow/lite/kernels/
Dtranspose_conv.cc174 const TfLiteTensor* weights, in ResizeCol2ImTensor() argument
185 const RuntimeShape& weights_shape = GetTensorShape(weights); in ResizeCol2ImTensor()
196 const TfLiteTensor* weights, in ResizeAndTransposeWeights() argument
199 const RuntimeShape& input_shape = GetTensorShape(weights); in ResizeAndTransposeWeights()
205 transposed_weights->type = weights->type; in ResizeAndTransposeWeights()
218 if (weights->type == kTfLiteFloat32) { in ResizeAndTransposeWeights()
220 GetTensorData<float>(weights), in ResizeAndTransposeWeights()
223 } else if (weights->type == kTfLiteUInt8) { in ResizeAndTransposeWeights()
225 GetTensorData<uint8>(weights), in ResizeAndTransposeWeights()
228 } else if (weights->type == kTfLiteInt8) { in ResizeAndTransposeWeights()
[all …]
/external/tensorflow/tensorflow/lite/delegates/gpu/metal/kernels/
Dtranspose_conv.cc125 const int kernel_x = attr.weights.shape.w; in GetDeconvolution()
126 const int kernel_y = attr.weights.shape.h; in GetDeconvolution()
133 const int src_depth = IntegralDivideRoundUp(attr.weights.shape.i, 4); in GetDeconvolution()
134 const int dst_depth = IntegralDivideRoundUp(attr.weights.shape.o, 4); in GetDeconvolution()
135 const int dst_channels_aligned = AlignByN(attr.weights.shape.o, 4); in GetDeconvolution()
137 src_depth, dst_depth, attr.weights.shape.o, in GetDeconvolution()
259 const int kernel_x = attr.weights.shape.w; in GetDeconvolutionShared()
260 const int kernel_y = attr.weights.shape.h; in GetDeconvolutionShared()
267 const int src_depth = IntegralDivideRoundUp(attr.weights.shape.i, 4); in GetDeconvolutionShared()
268 const int dst_depth = IntegralDivideRoundUp(attr.weights.shape.o, 4); in GetDeconvolutionShared()
[all …]

12345678910>>...49