Home
last modified time | relevance | path

Searched refs:lhs_params (Results 1 – 18 of 18) sorted by relevance

/external/tensorflow/tensorflow/lite/kernels/
Dcpu_backend_gemm_custom_gemv.h80 const MatrixParams<LhsScalar>& lhs_params, in IsSupportedGivenSufficientlyManyRows()
89 const MatrixParams<LhsScalar>& lhs_params, const LhsScalar* lhs_data, in Run()
102 const MatrixParams<LhsScalar>& lhs_params, const LhsScalar* lhs_data, in CustomGemvTask() argument
107 : lhs_params_(lhs_params), in CustomGemvTask()
148 const MatrixParams<LhsScalar>& lhs_params, const LhsScalar* lhs_data, in CustomGemv() argument
156 if (lhs_params.rows < Impl::kKernelRows) { in CustomGemv()
159 if (!Impl::IsSupportedGivenSufficientlyManyRows(lhs_params, rhs_params, in CustomGemv()
163 TFLITE_DCHECK_GE(lhs_params.rows, Impl::kKernelRows); in CustomGemv()
166 lhs_params.cols); in CustomGemv()
168 Impl::Run(lhs_params, lhs_data, rhs_params, rhs_data, dst_params, dst_data, in CustomGemv()
[all …]
Dcpu_backend_gemm.h116 void Gemm(const MatrixParams<LhsScalar>& lhs_params, const LhsScalar* lhs_data, in Gemm() argument
122 ValidateParams(lhs_params, rhs_params, dst_params, params); in Gemm()
123 if (!IsValidGemm(lhs_params, rhs_params, dst_params)) { in Gemm()
139 if (lhs_params.order != Order::kRowMajor || in Gemm()
151 quantization_flavor>::Run(lhs_params, lhs_data, in Gemm()
163 if (detail::CustomGemv(lhs_params, lhs_data, rhs_params, rhs_data, in Gemm()
170 quantization_flavor>::Run(lhs_params, lhs_data, rhs_params, rhs_data, in Gemm()
176 void Gemm(const MatrixParams<int8_t>& lhs_params, const int8_t* lhs_data, in Gemm() argument
182 ValidateParams(lhs_params, rhs_params, dst_params, params); in Gemm()
183 if (!IsValidGemm(lhs_params, rhs_params, dst_params)) { in Gemm()
[all …]
Dcpu_backend_gemm_gemmlowp.h81 const MatrixParams<SrcScalar>& lhs_params, const SrcScalar* lhs_data,
89 gemmlowp_lhs(lhs_data, lhs_params.rows, lhs_params.cols);
109 ColVectorMap bias_vector(params.bias, lhs_params.rows);
116 &gemmlowp_dst, -lhs_params.zero_point, -rhs_params.zero_point,
123 &gemmlowp_dst, -lhs_params.zero_point, -rhs_params.zero_point,
138 const MatrixParams<SrcScalar>& lhs_params, const SrcScalar* lhs_data,
149 gemmlowp_lhs(lhs_data, lhs_params.rows, lhs_params.cols);
157 ColVectorMap bias_vector(params.bias, lhs_params.rows);
179 -lhs_params.zero_point, -rhs_params.zero_point, output_pipeline);
183 Run(lhs_params, lhs_data, rhs_params, rhs_data, dst_params, dst_data,
Dcpu_backend_gemm_test.cc253 const MatrixParams<LhsScalar>& lhs_params, in PerformGemmThenCompareResultsThenAgainWithClamping() argument
261 const int accumulation_depth = lhs_params.cols; in PerformGemmThenCompareResultsThenAgainWithClamping()
262 Gemm(lhs_params, lhs_data.data(), rhs_params, rhs_data.data(), dst_params, in PerformGemmThenCompareResultsThenAgainWithClamping()
275 Gemm(lhs_params, lhs_data.data(), rhs_params, rhs_data.data(), dst_params, in PerformGemmThenCompareResultsThenAgainWithClamping()
284 Gemm(lhs_params, lhs_data.data(), rhs_params, rhs_data.data(), dst_params, in PerformGemmThenCompareResultsThenAgainWithClamping()
311 int bisect_min, int bisect_max, const MatrixParams<LhsScalar>& lhs_params, in BisectReasonableMultiplierExponent() argument
331 Gemm(lhs_params, lhs_data.data(), rhs_params, rhs_data.data(), dst_params, in BisectReasonableMultiplierExponent()
336 bisect_mid + 1, bisect_max, lhs_params, lhs_data, rhs_params, rhs_data, in BisectReasonableMultiplierExponent()
340 bisect_min, bisect_mid, lhs_params, lhs_data, rhs_params, rhs_data, in BisectReasonableMultiplierExponent()
348 const MatrixParams<LhsScalar>& lhs_params, const LhsScalar* lhs_data, in ReferenceGemm() argument
[all …]
Dcpu_backend_gemm_params.h232 const MatrixParams<LhsScalar>& lhs_params,
243 bool IsValidGemm(const MatrixParams<LhsScalar>& lhs_params,
247 valid &= lhs_params.rows >= 1;
248 valid &= lhs_params.cols >= 1;
253 valid &= lhs_params.cols == rhs_params.rows;
255 valid &= lhs_params.rows == lhs_params.rows;
Dcpu_backend_gemm_x86.h39 const MatrixParams<LhsScalar>& lhs_params, const LhsScalar* lhs_data, in Run()
50 quantization_flavor>::Run(lhs_params, lhs_data, rhs_params, rhs_data, in Run()
57 quantization_flavor>::Run(lhs_params, lhs_data, in Run()
68 static void Run(const MatrixParams<float>& lhs_params, const float* lhs_data,
74 GemmImplUsingEigen::Run(lhs_params, lhs_data, rhs_params, rhs_data,
Dcpu_backend_gemm_eigen.cc38 const MatrixParams<float>& lhs_params, const float* lhs_data, in Run() argument
54 EigenMatrixMapRowMajorConst eigen_lhs(lhs_data, lhs_params.rows, in Run()
55 lhs_params.cols); in Run()
63 } else if (lhs_params.rows == 1) { in Run()
Dcpu_backend_gemm_eigen.h29 static void Run(const MatrixParams<float>& lhs_params, const float* lhs_data,
Dcpu_backend_gemm_ruy.h126 const MatrixParams<LhsScalar>& lhs_params, const LhsScalar* lhs_data,
134 MakeRuyMatrix(lhs_params, lhs_data, &ruy_lhs, context->use_caching());
/external/tensorflow/tensorflow/lite/kernels/internal/optimized/integer_ops/
Dfully_connected.h65 cpu_backend_gemm::MatrixParams<int8> lhs_params; in FullyConnectedPerChannel() local
66 lhs_params.rows = filter_rows; in FullyConnectedPerChannel()
67 lhs_params.cols = filter_cols; in FullyConnectedPerChannel()
68 lhs_params.order = cpu_backend_gemm::Order::kRowMajor; in FullyConnectedPerChannel()
69 lhs_params.zero_point = 0; in FullyConnectedPerChannel()
70 lhs_params.cache_policy = in FullyConnectedPerChannel()
95 cpu_backend_gemm::Gemm(lhs_params, filter_data, rhs_params, input_data, in FullyConnectedPerChannel()
137 cpu_backend_gemm::MatrixParams<int8> lhs_params; in FullyConnected() local
138 lhs_params.rows = filter_rows; in FullyConnected()
139 lhs_params.cols = filter_cols; in FullyConnected()
[all …]
Dtranspose_conv.h69 cpu_backend_gemm::MatrixParams<int8_t> lhs_params; in TransposeConvV2() local
70 lhs_params.order = cpu_backend_gemm::Order::kRowMajor; in TransposeConvV2()
71 lhs_params.rows = hwoi_ordered_filter_total_size; in TransposeConvV2()
72 lhs_params.cols = input_depth; in TransposeConvV2()
74 lhs_params.zero_point = 0; in TransposeConvV2()
91 cpu_backend_gemm::Gemm(lhs_params, hwoi_ordered_filter_data, rhs_params, in TransposeConvV2()
Dconv.h98 cpu_backend_gemm::MatrixParams<int8> lhs_params; in ConvPerChannel() local
99 lhs_params.rows = filter_rows; in ConvPerChannel()
100 lhs_params.cols = filter_cols; in ConvPerChannel()
101 lhs_params.order = cpu_backend_gemm::Order::kRowMajor; in ConvPerChannel()
102 lhs_params.zero_point = 0; // filter is symmetric-quantized in ConvPerChannel()
122 cpu_backend_gemm::Gemm(lhs_params, filter_data, rhs_params, gemm_input_data, in ConvPerChannel()
/external/tensorflow/tensorflow/lite/kernels/internal/optimized/
Dbatch_matmul.h80 MatrixParams<float> lhs_params; in BatchMatMul() local
81 lhs_params.order = cpu_backend_gemm::Order::kRowMajor; in BatchMatMul()
82 lhs_params.rows = lhs_rows; in BatchMatMul()
83 lhs_params.cols = accum_depth; in BatchMatMul()
108 cpu_backend_gemm::Gemm(lhs_params, lhs_ptr2, rhs_params, rhs_ptr2, in BatchMatMul()
190 MatrixParams<int8_t> lhs_params; in BatchMatMul() local
191 lhs_params.order = cpu_backend_gemm::Order::kRowMajor; in BatchMatMul()
192 lhs_params.rows = lhs_rows; in BatchMatMul()
193 lhs_params.cols = accum_depth; in BatchMatMul()
227 cpu_backend_gemm::Gemm(lhs_params, lhs_ptr2, rhs_params, rhs_ptr2, in BatchMatMul()
[all …]
Dsse_tensor_utils.cc386 MatrixParams<int8_t> lhs_params; in SseCpuBackendGemm() local
387 lhs_params.order = cpu_backend_gemm::Order::kRowMajor; in SseCpuBackendGemm()
388 lhs_params.rows = n_output; in SseCpuBackendGemm()
389 lhs_params.cols = n_input; in SseCpuBackendGemm()
390 lhs_params.cache_policy = cpu_backend_gemm::CachePolicy::kCacheIfLargeSpeedup; in SseCpuBackendGemm()
406 cpu_backend_gemm::Gemm(lhs_params, input_to_gate_weights, rhs_params, input, in SseCpuBackendGemm()
Doptimized_ops.h284 cpu_backend_gemm::MatrixParams<float> lhs_params; in FullyConnected() local
285 lhs_params.order = cpu_backend_gemm::Order::kRowMajor; in FullyConnected()
286 lhs_params.cols = weights_shape.Dims(dims_count - 1); in FullyConnected()
287 lhs_params.rows = FlatSizeSkipDim(weights_shape, dims_count - 1); in FullyConnected()
288 lhs_params.cache_policy = in FullyConnected()
299 cpu_backend_gemm::Gemm(lhs_params, weights_data, rhs_params, input_data, in FullyConnected()
337 cpu_backend_gemm::MatrixParams<uint8> lhs_params; in FullyConnected() local
338 lhs_params.rows = filter_rows; in FullyConnected()
339 lhs_params.cols = filter_cols; in FullyConnected()
340 lhs_params.order = cpu_backend_gemm::Order::kRowMajor; in FullyConnected()
[all …]
Dneon_tensor_utils.cc1013 MatrixParams<int8_t> lhs_params; in NeonCpuBackendGemm() local
1014 lhs_params.order = cpu_backend_gemm::Order::kRowMajor; in NeonCpuBackendGemm()
1015 lhs_params.rows = n_output; in NeonCpuBackendGemm()
1016 lhs_params.cols = n_input; in NeonCpuBackendGemm()
1017 lhs_params.cache_policy = cpu_backend_gemm::CachePolicy::kCacheIfLargeSpeedup; in NeonCpuBackendGemm()
1033 cpu_backend_gemm::Gemm(lhs_params, input_to_gate_weights, rhs_params, input, in NeonCpuBackendGemm()
/external/tensorflow/tensorflow/compiler/xla/client/lib/
Dcomparators.cc63 std::vector<XlaOp> lhs_params; in CreateScalarComparisonComputation() local
77 lhs_params.emplace_back(lhs_param); in CreateScalarComparisonComputation()
87 auto shape_or = b->GetShape(lhs_params[0]); in CreateScalarComparisonComputation()
101 generators[i].value()(lhs_params[i], rhs_params[i], {}), in CreateScalarComparisonComputation()
105 And(param_equal, EqTotalOrder(lhs_params[i], rhs_params[i])); in CreateScalarComparisonComputation()
Dapprox_topk.cc74 std::vector<XlaOp> lhs_params; in BuildReductionComputation() local
77 lhs_params.reserve(num_operands); in BuildReductionComputation()
81 lhs_params.push_back(Parameter(reduction_builder.get(), param_number, in BuildReductionComputation()
96 comparator_args.push_back(lhs_params[i]); in BuildReductionComputation()
103 results.push_back(Select(pred, lhs_params[i], rhs_params[i])); in BuildReductionComputation()