Home
last modified time | relevance | path

Searched refs:result_stride (Results 1 – 16 of 16) sorted by relevance

/external/tensorflow/tensorflow/lite/kernels/internal/reference/
Dportable_tensor_utils.h44 int result_stride);
49 int n_batch, float* __restrict__ result, int result_stride);
53 const float* vector, int n_batch, float* result, int result_stride);
59 int result_stride);
80 int result_stride);
164 int result_stride) { in MatrixBatchVectorMultiplyAccumulate() argument
166 n_batch, result, result_stride); in MatrixBatchVectorMultiplyAccumulate()
172 int n_batch, float* __restrict__ result, int result_stride) { in MatrixBatchVectorMultiplyAccumulate() argument
175 result_stride); in MatrixBatchVectorMultiplyAccumulate()
180 const float* vector, int n_batch, float* result, int result_stride) { in SparseMatrixBatchVectorMultiplyAccumulate() argument
[all …]
Dportable_tensor_utils.cc72 int result_stride) { in PortableMatrixBatchVectorMultiplyAccumulate() argument
83 result_in_batch += result_stride; in PortableMatrixBatchVectorMultiplyAccumulate()
91 int n_batch, float* __restrict__ result, int result_stride) { in PortableMatrixBatchVectorMultiplyAccumulate() argument
97 for (row = 0; row < m_rows; ++row, result += result_stride) { in PortableMatrixBatchVectorMultiplyAccumulate()
115 const float* vector, int n_batch, float* result, int result_stride) { in PortableSparseMatrixBatchVectorMultiplyAccumulate() argument
138 result_in_batch += result_stride; in PortableSparseMatrixBatchVectorMultiplyAccumulate()
147 int result_stride) { in PortableSparseMatrixBatchVectorMultiplyAccumulate() argument
157 for (row = 0; row < m_rows; ++row, result += result_stride) { in PortableSparseMatrixBatchVectorMultiplyAccumulate()
200 int result_stride) { in PortableBatchVectorBatchVectorDotProduct() argument
209 result_ptr += result_stride; in PortableBatchVectorBatchVectorDotProduct()
/external/tensorflow/tensorflow/lite/kernels/internal/optimized/
Dtensor_utils_impl.h41 int result_stride);
45 int result_stride);
51 int n_batch, float* __restrict__ result, int result_stride);
55 int n_batch, float* __restrict__ result, int result_stride);
59 const float* vector, int n_batch, float* result, int result_stride);
62 const float* vector, int n_batch, float* result, int result_stride);
69 int result_stride);
74 int result_stride);
102 int result_stride);
106 int result_stride);
Dneon_tensor_utils.h30 int result_stride) { in MatrixBatchVectorMultiplyAccumulate() argument
32 vector, n_batch, result, result_stride); in MatrixBatchVectorMultiplyAccumulate()
38 int n_batch, float* __restrict__ result, int result_stride) { in MatrixBatchVectorMultiplyAccumulate() argument
40 vectors, scaling_factors, n_batch, result, result_stride); in MatrixBatchVectorMultiplyAccumulate()
46 int result_stride) { in SparseMatrixBatchVectorMultiplyAccumulate() argument
48 matrix, ledger, m_rows, m_cols, vector, n_batch, result, result_stride); in SparseMatrixBatchVectorMultiplyAccumulate()
55 int result_stride) { in SparseMatrixBatchVectorMultiplyAccumulate() argument
58 n_batch, result, result_stride); in SparseMatrixBatchVectorMultiplyAccumulate()
95 int result_stride) { in BatchVectorBatchVectorDotProduct() argument
97 n_batch, result, result_stride); in BatchVectorBatchVectorDotProduct()
Dneon_tensor_utils.cc100 int result_stride) { in NeonMatrixBatchVectorMultiplyAccumulate() argument
108 float* result_in_batch = result + b * m_rows * result_stride; in NeonMatrixBatchVectorMultiplyAccumulate()
131 result_in_batch += result_stride; in NeonMatrixBatchVectorMultiplyAccumulate()
317 int result_stride) { in DotprodSparseMatrixBatchVectorMultiplyAccumulate() argument
371 result[(batch * m_rows + row) * result_stride] += in DotprodSparseMatrixBatchVectorMultiplyAccumulate()
382 int n_batch, float* __restrict__ result, int result_stride) { in NeonMatrixBatchVectorMultiplyAccumulate() argument
386 if (n_batch % 4 == 0 && result_stride == 1) { in NeonMatrixBatchVectorMultiplyAccumulate()
432 for (row = 0; row < m_rows; ++row, result += result_stride) { in NeonMatrixBatchVectorMultiplyAccumulate()
512 const float* vector, int n_batch, float* result, int result_stride) { in NeonSparseMatrixBatchVectorMultiplyAccumulate() argument
548 result_in_batch += result_stride; in NeonSparseMatrixBatchVectorMultiplyAccumulate()
[all …]
/external/gemmlowp/meta/
Dlegacy_multi_thread_common.h49 std::int32_t result_stride; member
54 std::int32_t result_stride, const F& operation) in MetaTask()
61 result_stride(result_stride), in MetaTask()
68 result + task_rect.m_offset * result_stride + task_rect.n_offset; in Run()
70 task_rect.n, k, task_result, result_stride); in Run()
120 OUT_TYPE* result, std::int32_t result_stride, in MultiThreadedMatrixMatrix() argument
129 result_stride); in MultiThreadedMatrixMatrix()
138 [&tasks, &task_scratch, lhs, rhs, k, result, result_stride, operation, in MultiThreadedMatrixMatrix()
141 task_scratch, lhs, rhs, rect, k, result, result_stride, operation)); in MultiThreadedMatrixMatrix()
Dlegacy_multi_thread_gemm.h37 std::int32_t result_stride, const F& operation) { in CacheFriendlyMatrixMatrix() argument
47 result + i * optimal_n, result_stride); in CacheFriendlyMatrixMatrix()
52 result + chunks_count_less_one * optimal_n, result_stride); in CacheFriendlyMatrixMatrix()
55 result, result_stride); in CacheFriendlyMatrixMatrix()
70 std::int32_t result_stride) const { in ExecuteMatrixMatrix() argument
71 CacheFriendlyMatrixMatrix(scratch, lhs, rhs, m, n, k, result, result_stride, in ExecuteMatrixMatrix()
80 std::int32_t result_stride) const { in ExecuteCacheFriendlyMatrixMatrix() argument
82 sum_offset, multiplier, shift, result, result_stride); in ExecuteCacheFriendlyMatrixMatrix()
100 std::int32_t result_stride) const { in ExecuteMatrixMatrix() argument
101 CacheFriendlyMatrixMatrix(scratch, lhs, rhs, m, n, k, result, result_stride, in ExecuteMatrixMatrix()
[all …]
Dlegacy_single_thread_gemm.h34 std::uint8_t* result, std::int32_t result_stride) { in gemm_q8_strided() argument
69 params.fused_kernel.output_stream.stride = result_stride; in gemm_q8_strided()
126 std::int32_t result_stride) { in gemm_i32_strided() argument
158 params.fused_kernel.output_stream.stride = result_stride * 4; in gemm_i32_strided()
211 std::int32_t result_stride) { in gemm_f_strided() argument
243 params.fused_kernel.output_stream.stride = result_stride * 4; in gemm_f_strided()
Dlegacy_multi_thread_gemv.h42 std::int32_t result_stride) const { in ExecuteMatrixMatrix() argument
62 std::int32_t result_stride) const { in ExecuteMatrixMatrix() argument
81 std::int32_t result_stride) const { in ExecuteMatrixMatrix() argument
/external/tensorflow/tensorflow/lite/kernels/internal/
Dtensor_utils.h56 int result_stride);
71 const float* vector, int n_batch, float* result, int result_stride);
83 int n_batch, float* __restrict__ result, int result_stride);
100 int result_stride);
134 int result_stride);
/external/gemmlowp/meta/generators/
Dmul_Nx8_Mx8_neon.py259 result_stride): argument
263 result_stride)
267 result_stride)
273 0), emitter.Dereference(result_address, None), result_stride)
278 emitter.Dereference(result_address, None), result_stride)
/external/gemmlowp/eight_bit_int_gemm/
Deight_bit_int_gemm.cc258 std::int32_t result_stride, std::uint8_t* result) { in MetaGemmQuantized8Bit() argument
261 if (IsRowMajorOrVector(result_transpose, result_stride, m, n)) { in MetaGemmQuantized8Bit()
282 std::int32_t result_stride, float* result) { in MetaGemmFloat() argument
285 if (IsRowMajorOrVector(result_transpose, result_stride, m, n)) { in MetaGemmFloat()
/external/libaom/libaom/aom_dsp/
Dnoise_model.c1296 float *result, int result_stride, INT_TYPE *denoised, int w, int h, \
1302 (y + (block_size >> chroma_sub_h)) * result_stride + x + \
1315 result[result_idx + result_stride - 1] += err * 3.0f / 16.0f; \
1317 result[result_idx + result_stride] += err * 5.0f / 16.0f; \
1319 result[result_idx + result_stride + 1] += err * 1.0f / 16.0f; \
1340 const int result_stride = (num_blocks_w + 2) * block_size; in aom_wiener_denoise_2d() local
1355 result = (float *)aom_malloc((num_blocks_h + 2) * block_size * result_stride * in aom_wiener_denoise_2d()
1391 memset(result, 0, sizeof(*result) * result_stride * result_height); in aom_wiener_denoise_2d()
1426 result[y_result * result_stride + x_result] += in aom_wiener_denoise_2d()
1437 dither_and_quantize_highbd(result, result_stride, (uint16_t *)denoised[c], in aom_wiener_denoise_2d()
[all …]
/external/mesa3d/src/gallium/drivers/radeon/
Dr600_query.c1649 uint32_t result_stride; in r600_query_hw_get_result_resource() member
1675 consts.result_stride = query->result_size; in r600_query_hw_get_result_resource()
/external/mesa3d/src/gallium/drivers/r600/
Dr600_query.c1608 uint32_t result_stride; in r600_query_hw_get_result_resource() member
1634 consts.result_stride = query->result_size; in r600_query_hw_get_result_resource()
/external/deqp/external/openglcts/modules/gl/
Dgl4cGPUShaderFP64Tests.cpp15144 const glw::GLuint result_stride = function_object.getResultStride(); in prepareTestData() local
15145 const glw::GLuint result_buffer_size = result_stride * m_n_veritces; in prepareTestData()
15157 const glw::GLuint result_offset = vertex * result_stride; in prepareTestData()