Home
last modified time | relevance | path

Searched refs:dst_params (Results 1 – 19 of 19) sorted by relevance

/external/tensorflow/tensorflow/lite/kernels/
Dcpu_backend_gemm.h118 const MatrixParams<DstScalar>& dst_params, DstScalar* dst_data, in Gemm() argument
122 ValidateParams(lhs_params, rhs_params, dst_params, params); in Gemm()
134 dst_params.order != Order::kColMajor) { in Gemm()
146 dst_params, dst_data, in Gemm()
152 const bool try_custom_gemv = (dst_params.cols == 1); in Gemm()
157 dst_params, dst_data, params, context)) { in Gemm()
164 dst_params, dst_data, params, context); in Gemm()
173 const MatrixParams<int32_t>& dst_params, int32_t* dst_data, in Gemm() argument
177 ValidateParams(lhs_params, rhs_params, dst_params, params); in Gemm()
185 dst_params, dst_data, in Gemm()
Dcpu_backend_gemm_eigen.cc40 const MatrixParams<float>& dst_params, float* dst_data, in Run() argument
58 EigenMatrixMapColMajorMutable eigen_dst(dst_data, dst_params.rows, in Run()
59 dst_params.cols); in Run()
70 BiasAndClamp(params.clamp_min, params.clamp_max, dst_params.rows, in Run()
71 params.bias, dst_params.rows * dst_params.cols, dst_data); in Run()
Dcpu_backend_gemm_gemmlowp.h83 const MatrixParams<DstScalar>& dst_params, DstScalar* dst_data,
93 dst_data, dst_params.rows, dst_params.cols);
98 scale_stage.result_offset_after_shift = dst_params.zero_point;
140 const MatrixParams<DstScalar>& dst_params, DstScalar* dst_data,
153 dst_data, dst_params.rows, dst_params.cols);
163 scale_stage.result_offset_after_shift = dst_params.zero_point;
165 ColVectorMap(params.multiplier_fixedpoint_perchannel, dst_params.rows);
167 ColVectorMap(params.multiplier_exponent_perchannel, dst_params.rows);
183 Run(lhs_params, lhs_data, rhs_params, rhs_data, dst_params, dst_data,
Dcpu_backend_gemm_test.cc257 const MatrixParams<DstScalar>& dst_params, std::vector<DstScalar>* dst_data, in PerformGemmThenCompareResultsThenAgainWithClamping() argument
262 Gemm(lhs_params, lhs_data.data(), rhs_params, rhs_data.data(), dst_params, in PerformGemmThenCompareResultsThenAgainWithClamping()
275 Gemm(lhs_params, lhs_data.data(), rhs_params, rhs_data.data(), dst_params, in PerformGemmThenCompareResultsThenAgainWithClamping()
284 Gemm(lhs_params, lhs_data.data(), rhs_params, rhs_data.data(), dst_params, in PerformGemmThenCompareResultsThenAgainWithClamping()
315 const MatrixParams<DstScalar>& dst_params, std::vector<DstScalar>* dst_data, in BisectReasonableMultiplierExponent() argument
331 Gemm(lhs_params, lhs_data.data(), rhs_params, rhs_data.data(), dst_params, in BisectReasonableMultiplierExponent()
337 dst_params, dst_data, params_copy, cpu_backend_context); in BisectReasonableMultiplierExponent()
341 dst_params, dst_data, params_copy, cpu_backend_context); in BisectReasonableMultiplierExponent()
350 const MatrixParams<DstScalar>& dst_params, DstScalar* dst_data, in ReferenceGemm() argument
358 cpu_backend_gemm::detail::MakeRuyMatrix(dst_params, dst_data, &ruy_dst); in ReferenceGemm()
[all …]
Dcpu_backend_gemm_x86.h41 const MatrixParams<DstScalar>& dst_params, DstScalar* dst_data, in Run()
51 dst_params, dst_data, params, context); in Run()
59 dst_params, dst_data, in Run()
70 const MatrixParams<float>& dst_params, float* dst_data,
75 dst_params, dst_data, params, context);
Dcpu_backend_gemm_custom_gemv.h82 const MatrixParams<DstScalar>& dst_params, in IsSupportedGivenSufficientlyManyRows()
91 const MatrixParams<DstScalar>& dst_params, DstScalar* dst_data, in Run()
104 const MatrixParams<DstScalar>& dst_params, DstScalar* dst_data, in CustomGemvTask() argument
111 dst_params_(dst_params), in CustomGemvTask()
150 const MatrixParams<DstScalar>& dst_params, DstScalar* dst_data, in CustomGemv() argument
160 dst_params, params)) { in CustomGemv()
165 context->max_num_threads(), dst_params.rows, dst_params.cols, in CustomGemv()
168 Impl::Run(lhs_params, lhs_data, rhs_params, rhs_data, dst_params, dst_data, in CustomGemv()
176 RoundUp<Impl::kKernelRows>(CeilQuotient(dst_params.rows, thread_count)); in CustomGemv()
179 int row_end = std::min(dst_params.rows, row_start + kRowsPerThread); in CustomGemv()
[all …]
Dcpu_backend_gemm_eigen.h31 const MatrixParams<float>& dst_params, float* dst_data,
Dcpu_backend_gemm_ruy.h128 const MatrixParams<DstScalar>& dst_params, DstScalar* dst_data,
136 MakeRuyMatrix(dst_params, dst_data, &ruy_dst);
Dcpu_backend_gemm_params.h234 const MatrixParams<DstScalar>& dst_params,
/external/tensorflow/tensorflow/lite/kernels/internal/optimized/integer_ops/
Dfully_connected.h74 cpu_backend_gemm::MatrixParams<int8> dst_params; in FullyConnected() local
75 dst_params.rows = filter_rows; in FullyConnected()
76 dst_params.cols = batches; in FullyConnected()
77 dst_params.order = cpu_backend_gemm::Order::kColMajor; in FullyConnected()
78 dst_params.zero_point = output_offset; in FullyConnected()
86 dst_params, output_data, gemm_params, in FullyConnected()
Dconv.h107 cpu_backend_gemm::MatrixParams<int8> dst_params; in ConvPerChannel() local
108 dst_params.rows = output_rows; in ConvPerChannel()
109 dst_params.cols = output_cols; in ConvPerChannel()
110 dst_params.order = cpu_backend_gemm::Order::kColMajor; in ConvPerChannel()
111 dst_params.zero_point = output_offset; in ConvPerChannel()
122 dst_params, output_data, gemm_params, in ConvPerChannel()
Dtranspose_conv.h80 cpu_backend_gemm::MatrixParams<int32_t> dst_params; in TransposeConvV2() local
81 dst_params.order = cpu_backend_gemm::Order::kColMajor; in TransposeConvV2()
82 dst_params.rows = hwoi_ordered_filter_total_size; in TransposeConvV2()
83 dst_params.cols = input_image_size; in TransposeConvV2()
87 input_data + input_offset * i, dst_params, in TransposeConvV2()
/external/tensorflow/tensorflow/lite/kernels/internal/optimized/
Dbatch_matmul.h90 MatrixParams<float> dst_params; in BatchMatMul() local
91 dst_params.order = cpu_backend_gemm::Order::kColMajor; in BatchMatMul()
92 dst_params.rows = lhs_rows; in BatchMatMul()
93 dst_params.cols = rhs_cols; in BatchMatMul()
109 dst_params, out_ptr, gemm_params, context); in BatchMatMul()
200 MatrixParams<int32_t> dst_params; in BatchMatMul() local
201 dst_params.order = cpu_backend_gemm::Order::kColMajor; in BatchMatMul()
202 dst_params.rows = lhs_rows; in BatchMatMul()
203 dst_params.cols = rhs_cols; in BatchMatMul()
228 dst_params, accum_scratch, gemm_params, context); in BatchMatMul()
[all …]
Dsse_tensor_utils.cc195 MatrixParams<int32_t> dst_params; in SseCpuBackendGemm() local
196 dst_params.order = cpu_backend_gemm::Order::kColMajor; in SseCpuBackendGemm()
197 dst_params.rows = n_output; in SseCpuBackendGemm()
198 dst_params.cols = n_batch; in SseCpuBackendGemm()
205 dst_params, scratch, gemm_params, context); in SseCpuBackendGemm()
Doptimized_ops.h354 cpu_backend_gemm::MatrixParams<float> dst_params; in FullyConnected() local
355 dst_params.order = cpu_backend_gemm::Order::kColMajor; in FullyConnected()
356 dst_params.rows = output_shape.Dims(output_shape.DimensionsCount() - 1); in FullyConnected()
357 dst_params.cols = in FullyConnected()
364 dst_params, output_data, gemm_params, in FullyConnected()
415 cpu_backend_gemm::MatrixParams<uint8> dst_params; in FullyConnected() local
416 dst_params.rows = filter_rows; in FullyConnected()
417 dst_params.cols = batches; in FullyConnected()
418 dst_params.order = cpu_backend_gemm::Order::kColMajor; in FullyConnected()
419 dst_params.zero_point = output_offset; in FullyConnected()
[all …]
Dneon_tensor_utils.cc998 MatrixParams<int32_t> dst_params; in NeonCpuBackendGemm() local
999 dst_params.order = cpu_backend_gemm::Order::kColMajor; in NeonCpuBackendGemm()
1000 dst_params.rows = n_output; in NeonCpuBackendGemm()
1001 dst_params.cols = n_batch; in NeonCpuBackendGemm()
1008 dst_params, scratch, gemm_params, context); in NeonCpuBackendGemm()
/external/crosvm/devices/src/virtio/video/encoder/
Dmod.rs74 dst_params: Params, field
126 let mut dst_params = Default::default(); in new() localVariable
132 .populate_dst_params(&mut dst_params, desired_format, DEFAULT_BUFFER_SIZE) in new()
136 let dest_format = dst_params.format.ok_or(VideoError::InvalidArgument)?; in new()
151 dst_params, in new()
188 dst_params: self.dst_params.clone(), in set_encode_session()
240 self.dst_params.plane_formats[0].plane_size = output_buffer_size; in require_input_buffers()
269 params: self.dst_params.clone(), in require_input_buffers()
650 if num_planes != stream.dst_params.plane_formats.len() { in resource_create()
762 if data_sizes.len() != stream.dst_params.plane_formats.len() { in resource_queue()
[all …]
Dencoder.rs71 pub dst_params: Params, field
182 dst_params: &mut Params, in populate_dst_params()
196 dst_params.format = Some(format_desc.format.clone()); in populate_dst_params()
200 dst_params.plane_formats = vec![PlaneFormat { in populate_dst_params()
Dlibvda_encoder.rs180 if config.dst_params.format.is_none() { in start_session()