Home
last modified time | relevance | path

Searched refs:lhs_data (Results 1 – 25 of 27) sorted by relevance

12

/external/ruy/ruy/
Dkernel_avx512.cc291 const __m512i lhs_data = _mm512_loadu_si512(lhs_ptr);
313 _mm512_cvtepi8_epi16(_mm512_cvtepi32_epi16(lhs_data));
316 _mm512_cvtepi32_epi16(_mm512_srli_epi32(lhs_data, 16)));
730 const __m512i lhs_data = _mm512_loadu_si512(lhs_ptr);
745 _mm512_cvtepi8_epi16(_mm512_cvtepi32_epi16(lhs_data));
748 _mm512_cvtepi32_epi16(_mm512_srli_epi32(lhs_data, 16)));
974 const __m512 lhs_data = _mm512_loadu_ps(lhs_ptr);
992 _mm512_fmadd_ps(lhs_data, dup_rhs_element_j0, accum_data_v0);
995 _mm512_fmadd_ps(lhs_data, dup_rhs_element_j1, accum_data_v1);
998 _mm512_fmadd_ps(lhs_data, dup_rhs_element_j2, accum_data_v2);
[all …]
Dkernel_x86.h609 const __m256 lhs_data = _mm256_loadu_ps(lhs_ptr);
621 lhs_data, dup_rhs_element_0, accum_data_v[0]);
625 lhs_data, dup_rhs_element_1, accum_data_v[1]);
629 lhs_data, dup_rhs_element_2, accum_data_v[2]);
633 lhs_data, dup_rhs_element_3, accum_data_v[3]);
637 lhs_data, dup_rhs_element_4, accum_data_v[4]);
641 lhs_data, dup_rhs_element_5, accum_data_v[5]);
645 lhs_data, dup_rhs_element_6, accum_data_v[6]);
649 lhs_data, dup_rhs_element_7, accum_data_v[7]);
709 const __m256 lhs_data = _mm256_loadu_ps(lhs_ptr);
[all …]
Dperchannel_buffers_reallocation_test.cc55 const LhsScalar lhs_data[kMatrixSize * kMatrixSize] = {0}; in TestPerChannelBuffersReallocation() local
56 lhs.set_data(lhs_data); in TestPerChannelBuffersReallocation()
Dprepacked_cache_test.cc235 const float lhs_data[] = {1, 2, 3, 4}; in TestCachePolicies() local
241 lhs.set_data(lhs_data); in TestCachePolicies()
276 const float lhs_data[] = {1, 2, 3, 4}; in TEST() local
282 lhs.set_data(lhs_data); in TEST()
Dkernel_avx2_fma.cc256 const __m256i lhs_data =
279 _mm256_shuffle_epi8(lhs_data, splitter_idx);
812 const __m256i lhs_data =
828 _mm256_shuffle_epi8(lhs_data, splitter_idx);
Dkernel_avx.cc603 const __m256i lhs_data =
628 intrin_utils::mm256_shuffle_epi8<path>(lhs_data, splitter_idx);
1253 const __m256i lhs_data =
1269 intrin_utils::mm256_shuffle_epi8<path>(lhs_data, splitter_idx);
/external/ruy/example/
Dexample.cc22 const float lhs_data[] = {1, 2, 3, 4}; in ExampleMulFloat() local
28 lhs.set_data(lhs_data); in ExampleMulFloat()
46 const float lhs_data[] = {1, 2, 3, 4}; in ExampleMulFloatWithBiasAddAndClamp() local
53 lhs.set_data(lhs_data); in ExampleMulFloatWithBiasAddAndClamp()
74 const std::uint8_t lhs_data[] = {124, 125, 126, 127}; in ExampleMulUint8AsymmetricQuantized() local
80 lhs.set_data(lhs_data); in ExampleMulUint8AsymmetricQuantized()
103 const std::int8_t lhs_data[] = {1, 2, 3, 4}; in ExampleMulInt8PerChannelQuantized() local
111 lhs.set_data(lhs_data); in ExampleMulInt8PerChannelQuantized()
130 const std::int8_t lhs_data[] = {1, 2, 3, 4}; in ExampleMulInt8GetRawAccumulators() local
136 lhs.set_data(lhs_data); in ExampleMulInt8GetRawAccumulators()
/external/tensorflow/tensorflow/lite/kernels/
Dcpu_backend_gemm_x86.h39 const MatrixParams<LhsScalar>& lhs_params, const LhsScalar* lhs_data, in Run()
50 quantization_flavor>::Run(lhs_params, lhs_data, rhs_params, rhs_data, in Run()
57 quantization_flavor>::Run(lhs_params, lhs_data, in Run()
68 static void Run(const MatrixParams<float>& lhs_params, const float* lhs_data,
74 GemmImplUsingEigen::Run(lhs_params, lhs_data, rhs_params, rhs_data,
Dcpu_backend_gemm.h116 void Gemm(const MatrixParams<LhsScalar>& lhs_params, const LhsScalar* lhs_data, in Gemm() argument
144 quantization_flavor>::Run(lhs_params, lhs_data, in Gemm()
156 if (detail::CustomGemv(lhs_params, lhs_data, rhs_params, rhs_data, in Gemm()
163 quantization_flavor>::Run(lhs_params, lhs_data, rhs_params, rhs_data, in Gemm()
171 void Gemm(const MatrixParams<LhsScalar>& lhs_params, const LhsScalar* lhs_data, in Gemm() argument
183 quantization_flavor>::Run(lhs_params, lhs_data, in Gemm()
Dcpu_backend_gemm_test.cc254 const std::vector<LhsScalar>& lhs_data, in PerformGemmThenCompareResultsThenAgainWithClamping() argument
262 Gemm(lhs_params, lhs_data.data(), rhs_params, rhs_data.data(), dst_params, in PerformGemmThenCompareResultsThenAgainWithClamping()
275 Gemm(lhs_params, lhs_data.data(), rhs_params, rhs_data.data(), dst_params, in PerformGemmThenCompareResultsThenAgainWithClamping()
284 Gemm(lhs_params, lhs_data.data(), rhs_params, rhs_data.data(), dst_params, in PerformGemmThenCompareResultsThenAgainWithClamping()
312 const std::vector<LhsScalar>& lhs_data, in BisectReasonableMultiplierExponent() argument
331 Gemm(lhs_params, lhs_data.data(), rhs_params, rhs_data.data(), dst_params, in BisectReasonableMultiplierExponent()
336 bisect_mid + 1, bisect_max, lhs_params, lhs_data, rhs_params, rhs_data, in BisectReasonableMultiplierExponent()
340 bisect_min, bisect_mid, lhs_params, lhs_data, rhs_params, rhs_data, in BisectReasonableMultiplierExponent()
348 const MatrixParams<LhsScalar>& lhs_params, const LhsScalar* lhs_data, in ReferenceGemm() argument
356 cpu_backend_gemm::detail::MakeRuyMatrix(lhs_params, lhs_data, &ruy_lhs); in ReferenceGemm()
[all …]
Dbatch_matmul.cc302 const TfLiteTensor* lhs_data; in Prepare() local
304 GetInputSafe(context, node, kInputLHSTensor, &lhs_data)); in Prepare()
314 if (lhs_data->type == kTfLiteInt8 || lhs_data->type == kTfLiteInt16) { in Prepare()
317 context, lhs_data, rhs_data, output, &real_multiplier)); in Prepare()
324 if (lhs_data->type == kTfLiteInt8) { in Prepare()
333 if (lhs_data->type == kTfLiteInt16) { in Prepare()
334 TF_LITE_ENSURE_EQ(context, lhs_data->params.zero_point, 0); in Prepare()
339 TF_LITE_ENSURE(context, lhs_data->type == kTfLiteFloat32 || in Prepare()
340 lhs_data->type == kTfLiteInt8 || in Prepare()
341 lhs_data->type == kTfLiteInt16); in Prepare()
[all …]
Dcpu_backend_gemm_gemmlowp.h81 const MatrixParams<SrcScalar>& lhs_params, const SrcScalar* lhs_data,
89 gemmlowp_lhs(lhs_data, lhs_params.rows, lhs_params.cols);
138 const MatrixParams<SrcScalar>& lhs_params, const SrcScalar* lhs_data,
149 gemmlowp_lhs(lhs_data, lhs_params.rows, lhs_params.cols);
183 Run(lhs_params, lhs_data, rhs_params, rhs_data, dst_params, dst_data,
Dcpu_backend_gemm_eigen.cc38 const MatrixParams<float>& lhs_params, const float* lhs_data, in Run() argument
54 EigenMatrixMapRowMajorConst eigen_lhs(lhs_data, lhs_params.rows, in Run()
Dcpu_backend_gemm_custom_gemv.h89 const MatrixParams<LhsScalar>& lhs_params, const LhsScalar* lhs_data, in Run()
102 const MatrixParams<LhsScalar>& lhs_params, const LhsScalar* lhs_data, in CustomGemvTask() argument
108 lhs_data_(lhs_data), in CustomGemvTask()
148 const MatrixParams<LhsScalar>& lhs_params, const LhsScalar* lhs_data, in CustomGemv() argument
168 Impl::Run(lhs_params, lhs_data, rhs_params, rhs_data, dst_params, dst_data, in CustomGemv()
180 tasks.emplace_back(lhs_params, lhs_data, rhs_params, rhs_data, dst_params, in CustomGemv()
323 const MatrixParams<LhsScalar>& lhs_params, const LhsScalar* lhs_data,
337 const LhsScalar* filter_ptr = lhs_data + row * lhs_params.cols;
635 static void Run(const MatrixParams<float>& lhs_params, const float* lhs_data,
649 const float* filter_ptr = lhs_data + row * lhs_params.cols;
Dcpu_backend_gemm_eigen.h29 static void Run(const MatrixParams<float>& lhs_params, const float* lhs_data,
Dcpu_backend_gemm_ruy.h126 const MatrixParams<LhsScalar>& lhs_params, const LhsScalar* lhs_data,
134 MakeRuyMatrix(lhs_params, lhs_data, &ruy_lhs, context->use_caching());
/external/tensorflow/tensorflow/lite/kernels/internal/reference/
Dbatch_matmul.h53 inline void BatchMatMul(const RuntimeShape& lhs_shape, const float* lhs_data, in BatchMatMul() argument
81 const float* lhs_ptr0 = lhs_data + (b0 * lhs_ext0); in BatchMatMul()
108 inline void BatchMatMul(const RuntimeShape& lhs_shape, const int8_t* lhs_data, in BatchMatMul() argument
151 lhs_data, row_sums, num_weights_matrices * lhs_rows, accum_depth); in BatchMatMul()
158 const int8_t* lhs_ptr0 = lhs_data + (b0 * lhs_ext0); in BatchMatMul()
200 const RuntimeShape& lhs_shape, const T* lhs_data, in BatchMatMul() argument
237 const T* lhs_ptr0 = lhs_data + (b0 * lhs_ext0); in BatchMatMul()
/external/tensorflow/tensorflow/compiler/xla/service/gpu/
Dgemm_thunk.cc64 se::DeviceMemoryBase lhs_data = get_device_address(lhs_buffer_); in ExecuteOnStream() local
67 return RunGemm(config_, lhs_data, rhs_data, output_data, params.stream, in ExecuteOnStream()
117 se::DeviceMemory<Element> lhs_data(lhs_matrix.data); in DoGemmWithAlgorithm() local
135 /*alpha=*/static_cast<Element>(alpha), lhs_data, in DoGemmWithAlgorithm()
152 /*alpha=*/alpha, lhs_data, in DoGemmWithAlgorithm()
165 lhs_data, /*leading dim of LHS=*/lhs_matrix.num_rows, rhs_data, in DoGemmWithAlgorithm()
/external/tensorflow/tensorflow/lite/kernels/internal/optimized/
Dbatch_matmul.h28 inline void BatchMatMul(const RuntimeShape& lhs_shape, const float* lhs_data, in BatchMatMul() argument
96 const float* lhs_ptr0 = lhs_data + (b0 * lhs_ext0); in BatchMatMul()
115 inline void BatchMatMul(const RuntimeShape& lhs_shape, const int8_t* lhs_data, in BatchMatMul() argument
184 lhs_data, row_sums, num_weights_matrices * lhs_rows, accum_depth); in BatchMatMul()
206 const int8_t* lhs_ptr0 = lhs_data + (b0 * lhs_ext0); in BatchMatMul()
272 const RuntimeShape& lhs_shape, const int8_t* lhs_data, in BatchMatMul() argument
353 const int8_t* lhs_ptr0 = lhs_data + (b0 * lhs_ext0); in BatchMatMul()
/external/tensorflow/tensorflow/core/kernels/
Dmatmul_op_test.cc36 std::function<void(const Tensor& lhs_data, const Tensor& rhs_data,
100 void RunMatMulWithBias(const Tensor& lhs_data, const Tensor& rhs_data, in RunMatMulWithBias() argument
108 ops::Const(root.WithOpName("lhs"), Input::Initializer(lhs_data)), in RunMatMulWithBias()
120 const Tensor& lhs_data, const Tensor& rhs_data, const Tensor& bias_data, in RunMatMulWithBiasAndActivation() argument
127 ops::Const(root.WithOpName("lhs"), Input::Initializer(lhs_data)), in RunMatMulWithBiasAndActivation()
150 void RunFusedMatMulOp(const Tensor& lhs_data, const Tensor& rhs_data, in RunFusedMatMulOp() argument
161 ops::Const(root.WithOpName("lhs"), Input::Initializer(lhs_data)); in RunFusedMatMulOp()
/external/tensorflow/tensorflow/core/kernels/linalg/
Dtridiagonal_solve_op_gpu.cu.cc297 const Scalar* lhs_data = lhs_transposed.flat<Scalar>().data(); in ComputeWithGtsvBatched() local
298 const Scalar* superdiag = lhs_data; in ComputeWithGtsvBatched()
299 const Scalar* diag = lhs_data + matrix_size * batch_size; in ComputeWithGtsvBatched()
300 const Scalar* subdiag = lhs_data + 2 * matrix_size * batch_size; in ComputeWithGtsvBatched()
/external/llvm-project/lldb/examples/python/
Dmach_o.py920 lhs_data = lhs_section.get_contents(self)
922 if lhs_data and rhs_data:
923 if lhs_data == rhs_data:
926 lhs_data_len = len(lhs_data)
947 elif lhs_data and not rhs_data:
952 elif not lhs_data and rhs_data:
/external/tensorflow/tensorflow/lite/toco/
Dtooling_util.cc778 const auto& lhs_data = lhs_array.GetBuffer<A>().data; in CompareArrayBuffers() local
780 CHECK_EQ(lhs_data.size(), rhs_data.size()) in CompareArrayBuffers()
782 for (int i = 0; i < lhs_data.size(); ++i) { in CompareArrayBuffers()
783 if (lhs_data[i] != rhs_data[i]) { in CompareArrayBuffers()
/external/tensorflow/tensorflow/compiler/xla/tests/
Darray_elementwise_ops_test.cc263 std::unique_ptr<GlobalData> lhs_data = in XLA_TEST_F() local
287 ComputeAndCompareR1<uint64>(&b, expected, {lhs_data.get(), rhs_data.get()}); in XLA_TEST_F()
303 std::unique_ptr<GlobalData> lhs_data = in XLA_TEST_F() local
326 ComputeAndCompareR1<int64>(&b, expected, {lhs_data.get(), rhs_data.get()}); in XLA_TEST_F()
/external/llvm-project/openmp/runtime/src/
Dkmp.h3827 void *reduce_data, void (*reduce_func)(void *lhs_data, void *rhs_data),
3833 void *reduce_data, void (*reduce_func)(void *lhs_data, void *rhs_data),
3842 void *reduce_data, void (*reduce_func)(void *lhs_data, void *rhs_data),

12