Home
last modified time | relevance | path

Searched defs:n_batch (Results 1 – 25 of 29) sorted by relevance

12

/external/tensorflow/tensorflow/lite/kernels/internal/optimized/
Dsse_tensor_utils.h39 int n_batch, float* result) { in MatrixBatchVectorMultiplyAccumulate()
52 const float* __restrict__ scaling_factors, int n_batch, in MatrixBatchVectorMultiplyAccumulate()
61 int n_batch, float* __restrict__ result, const float* per_channel_scale, in MatrixBatchVectorMultiplyAccumulate()
72 const float* __restrict__ scaling_factors, int n_batch, in MatrixBatchVectorMultiplyAccumulate()
82 const float* __restrict__ vector, int n_batch, float* __restrict__ result) { in SparseMatrixBatchVectorMultiplyAccumulate1x4()
91 int n_batch, const int32_t input_offset, const int32_t output_multiplier, in SparseMatrixBatchVectorMultiplyAccumulate1x16()
104 int m_rows, int m_cols, const float* __restrict__ vector, int n_batch, in SparseMatrixBatchVectorMultiplyAccumulate()
113 const float* __restrict__ scaling_factors, int n_batch, in SparseMatrixBatchVectorMultiplyAccumulate()
122 int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp, in MatrixBatchVectorMultiplyAccumulate()
132 int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp, in MatrixBatchVectorMultiplyAccumulate()
[all …]
Dneon_tensor_utils.h29 int n_batch, float* result) { in MatrixBatchVectorMultiplyAccumulate()
38 int n_batch, in MatrixBatchVectorMultiplyAccumulate()
48 int n_batch, int32_t* scratch, in MatrixBatchVectorMultiplyAccumulate()
58 int n_batch, float* __restrict__ result, const float* per_channel_scale, in MatrixBatchVectorMultiplyAccumulate()
69 const float* __restrict__ vector, int n_batch, float* __restrict__ result) { in SparseMatrixBatchVectorMultiplyAccumulate1x4()
76 int m_rows, int m_cols, const float* __restrict__ vector, int n_batch, in SparseMatrixBatchVectorMultiplyAccumulate()
86 int n_batch, const int32_t input_offset, const int32_t output_multiplier, in SparseMatrixBatchVectorMultiplyAccumulate1x16()
100 const float* scaling_factors, int n_batch, float* __restrict__ result) { in SparseMatrixBatchVectorMultiplyAccumulate()
108 int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp, in MatrixBatchVectorMultiplyAccumulate()
118 int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp, in MatrixBatchVectorMultiplyAccumulate()
[all …]
Dsse_tensor_utils.cc161 const float* __restrict__ vector, int n_batch, float* __restrict__ result) { in Avx2MatrixBatchVectorMultiplyAccumulateImpl()
202 const float* __restrict__ scaling_factors, int n_batch, in Avx2MatrixBatchVectorMultiplyAccumulateImpl()
294 const float* __restrict__ scaling_factors, int n_batch, in SseMatrixBatchVectorMultiplyAccumulateImpl()
379 const int8_t* input_to_gate_weights, int32_t n_batch, in SseCpuBackendGemm()
413 const float* __restrict__ scaling_factors, int n_batch, in SseMatrixBatchVectorMultiplyAccumulate()
424 const float* __restrict__ scaling_factors, int n_batch, int32_t* scratch, in SseMatrixBatchVectorMultiplyAccumulate()
475 const float* __restrict__ scaling_factors, int n_batch, in SseMatrixBatchVectorMultiplyAccumulate()
597 const float* __restrict__ scaling_factors, int n_batch, in SseSparseMatrixBatchVectorMultiplyAccumulate()
Dneon_tensor_utils.cc227 int n_batch, float* result) { in NeonMatrixBatchVectorMultiplyAccumulate()
276 const int8_t* ShuffleVectors(const int8_t* vectors, const int n_batch, in ShuffleVectors()
331 const int8_t* vectors, const float* scaling_factors, int n_batch, in DotprodMatrixBatchFourVectorMultiplyAccumulate()
459 const int8_t* vectors, const float* scaling_factors, int n_batch, in DotprodMatrixBatchFourVectorMultiplyAccumulate()
589 const int8_t* vectors, const float* scaling_factors, int n_batch, in DotprodMatrixBatchFourVectorMultiplyAccumulate()
622 const int8_t* vectors, const float* scaling_factors, int n_batch, in DotprodMatrixBatchPaddedFourVectorMultiplyAccumulate()
694 const int8_t* vectors, const float* scaling_factors, int n_batch, in DotprodMatrixBatchPaddedFourVectorMultiplyAccumulate()
705 const float* scaling_factors, int n_batch, float* __restrict__ result) { in DotprodSparseMatrixBatchVectorMultiplyAccumulate()
769 int32_t n_batch, int32_t n_input, in NeonMatrixBatchVectorMultiplyImpl()
879 int32_t multiplier, int32_t shift, int32_t n_batch, int32_t n_output, in NeonMatrixBatchVectorAccumulateImpl()
[all …]
/external/tensorflow/tensorflow/lite/kernels/internal/reference/
Dportable_tensor_utils.h60 int n_batch, float* result) { in MatrixBatchVectorMultiplyAccumulate()
69 int n_batch, in MatrixBatchVectorMultiplyAccumulate()
78 int n_batch, float* __restrict__ result, const float* per_channel_scale, in MatrixBatchVectorMultiplyAccumulate()
91 int n_batch, int32_t* scratch, in MatrixBatchVectorMultiplyAccumulate()
101 const float* __restrict__ vector, int n_batch, float* __restrict__ result) { in SparseMatrixBatchVectorMultiplyAccumulate1x4()
108 int m_rows, int m_cols, const float* __restrict__ vector, int n_batch, in SparseMatrixBatchVectorMultiplyAccumulate()
118 int n_batch, const int32_t input_offset, const int32_t output_multiplier, in SparseMatrixBatchVectorMultiplyAccumulate1x16()
132 const float* scaling_factors, int n_batch, float* __restrict__ result) { in SparseMatrixBatchVectorMultiplyAccumulate()
141 int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp, in MatrixBatchVectorMultiplyAccumulate()
151 int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp, in MatrixBatchVectorMultiplyAccumulate()
[all …]
Dportable_tensor_utils.cc122 int n_batch, float* result) { in PortableMatrixBatchVectorMultiplyAccumulate()
141 int n_batch, float* __restrict__ result) { in PortableMatrixBatchVectorMultiplyAccumulate()
166 int n_batch, float* __restrict__ result, const float* per_channel_scale, in PortableMatrixBatchVectorMultiplyAccumulate()
209 const float* __restrict__ vector, int n_batch, float* __restrict__ result) { in PortableSparseMatrixBatchVectorMultiplyAccumulate1x4()
234 int n_batch, const int32_t input_offset, const int32_t output_multiplier, in PortableSparseMatrixBatchVectorMultiplyAccumulate1x16()
267 int m_rows, int m_cols, const float* __restrict__ vector, int n_batch, in PortableSparseMatrixBatchVectorMultiplyAccumulate()
297 const float* scaling_factors, int n_batch, float* __restrict__ result) { in PortableSparseMatrixBatchVectorMultiplyAccumulate()
331 int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp, in PortableMatrixBatchVectorMultiplyAccumulateImpl()
360 int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp, in PortableMatrixBatchVectorMultiplyAccumulate()
370 int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp, in PortableMatrixBatchVectorMultiplyAccumulate()
[all …]
Dsvdf.h77 const int n_batch = input_shape.Dims(0); in EvalIntegerSVDF() local
/external/tensorflow/tensorflow/lite/kernels/internal/
Dportable_tensor_utils.h51 void VectorBatchVectorAdd(const T* vector, int v_size, int n_batch, in VectorBatchVectorAdd()
73 const T* batch_vector, int n_batch, in VectorBatchVectorCwiseProduct()
101 int n_batch, T* result) { in VectorBatchVectorCwiseProductAccumulate()
112 void VectorBatchVectorAssign(const T* vector, int v_size, int n_batch, in VectorBatchVectorAssign()
153 inline void BatchQuantizeFloats(const float* float_data_ptr, int n_batch, in BatchQuantizeFloats()
419 int v_size, int n_batch, in BatchVectorBatchVectorDotProduct()
Dtensor_utils.h63 const float* vector_scaling_factors, int n_batch, in MatrixBatchVectorMultiplyAccumulate()
/external/tensorflow/tensorflow/lite/delegates/gpu/cl/kernels/
Dlstm_full_test.cc35 LSTMOpModel(int n_batch, int n_input, int n_cell, int n_output, bool use_cifg, in LSTMOpModel()
265 const int n_batch = 1; in TEST_P() local
339 const int n_batch = 1; in TEST_P() local
407 const int n_batch = 1; in TEST_P() local
970 const int n_batch = 1; in TEST_F() local
1056 const int n_batch = 1; in TEST_F() local
1120 const int n_batch = 1; in TEST_F() local
/external/tensorflow/tensorflow/lite/kernels/
Dunidirectional_sequence_lstm_test.cc36 int n_batch, int n_input, int n_cell, int n_output, int sequence_length, in HybridUnidirectionalLSTMOpModel()
248 const int n_batch = 1; in TEST_F() local
310 const int n_batch = 1; in TEST_F() local
377 const int n_batch = 1; in TEST_P() local
440 const int n_batch = 1; in TEST_P() local
551 const int n_batch = 1; in TEST_F() local
613 const int n_batch = 1; in TEST_P() local
675 const int n_batch = 1; in TEST_P() local
1338 const int n_batch = 2; in TEST_F() local
1405 const int n_batch = 2; in TEST_P() local
[all …]
Dbidirectional_sequence_lstm_test.cc32 BidirectionalLSTMOpModel(int n_batch, int n_input, int n_cell, int n_output, in BidirectionalLSTMOpModel()
430 const int n_batch = 1; in TEST_P() local
596 const int n_batch = 2; in TEST_P() local
762 const int n_batch = 1; in TEST() local
923 const int n_batch = 1; in TEST() local
1074 const int n_batch = 1; in TEST() local
1226 const int n_batch = 2; in TEST() local
1929 const int n_batch = 2; in TEST() local
2630 const int n_batch = 1; in TEST_P() local
2804 const int n_batch = 1; in TEST_P() local
Dunidirectional_sequence_gru_test.cc34 explicit GRUOpModel(int n_batch, int n_input, int n_output, in GRUOpModel()
101 const int n_batch = 2; in TEST() local
Dlstm_eval.cc47 float* output, int m_rows, int m_cols, int n_batch, in MatrixBatchVectorMultiplyAccumulate()
187 const int n_batch, const int n_input, const int n_aux_input, in CalculateLstmGateFloat()
255 void UpdateLstmCellFloat(int n_batch, int n_cell, float* cell_state, in UpdateLstmCellFloat()
302 void CalculateLstmOutputFloat(int n_batch, int n_cell, int n_output, in CalculateLstmOutputFloat()
363 const int n_batch, const int n_input, const int n_aux_input, in CalculateLstmGateHybrid()
475 int n_batch, int n_cell, int n_output, const float* cell_state, in CalculateLstmOutputHybrid()
548 const int n_batch, const int n_input, const int n_output, const int n_cell, in CalculateLstmGateInteger8x8_16()
613 void UpdateLstmCellInteger(int n_batch, int n_cell, int16_t* cell_state, in UpdateLstmCellInteger()
659 int n_batch, int n_cell, int n_output, const int16_t* cell_state, in CalculateLstmOutputInteger8x8_16()
713 const int n_batch, const int n_input, const int n_output, const int n_cell, in CalculateLstmGateInteger8x8_8()
[all …]
Dlstm_test.cc39 LSTMOpModel(int n_batch, int n_input, int n_cell, int n_output, bool use_cifg, in LSTMOpModel()
413 const int n_batch = 1; in TEST_P() local
490 const int n_batch = 1; in TEST_P() local
561 const int n_batch = 2; in TEST_P() local
1144 const int n_batch = 2; in TEST_P() local
1239 const int n_batch = 2; in TEST_P() local
1314 LSTMIntegerOpModel(int n_batch, int n_input, int n_cell, int n_output, in LSTMIntegerOpModel()
1598 const int n_batch = 2; in TEST() local
1760 const int n_batch = 2; in TEST() local
1933 const int n_batch = 2; in TEST() local
[all …]
Doptional_tensor_test.cc31 LSTMOpModel(int n_batch, int n_input, int n_cell, int n_output, bool use_cifg, in LSTMOpModel()
229 const int n_batch = 1; in TEST() local
Dunidirectional_sequence_gru.cc40 const int n_batch = input->dims->data[1]; in GruImpl() local
142 const int n_batch = input->dims->data[1]; in Prepare() local
Dgru_cell.cc45 const int n_batch = input_shape.Dims(0); in GruCell() local
Dunidirectional_sequence_lstm_test_util.h39 : n_batch_(n_batch), in n_batch_() argument
Dunidirectional_sequence_lstm.cc895 const int n_batch = time_major ? input->dims->data[1] : input->dims->data[0]; in Prepare() local
/external/tensorflow/tensorflow/lite/tools/optimize/calibration/builtin_logging_ops/
Dlstm.cc46 const int n_batch, const int n_input, const int n_aux_input, in CalculateLstmGateFloat()
100 void UpdateLstmCellFloat(int n_batch, int n_cell, float* cell_state, in UpdateLstmCellFloat()
124 int n_batch, int n_cell, int n_output, const float* cell_state, in CalculateLstmOutputCalibration()
181 const TfLiteLSTMParams* params, int n_batch, int n_cell, int n_input, in LstmStepCalibration()
294 int max_time, n_batch; in EvalCalibration() local
/external/tensorflow/tensorflow/lite/tools/optimize/calibration/custom_logging_ops/
Dlstm.cc63 const TfLiteLSTMParams* params, int n_batch, int n_cell, int n_input, in LstmStepWithAuxInput()
371 int max_time, n_batch; in EvalFloat() local
/external/armnn/src/backends/reference/workloads/
DLstmUtils.cpp43 uint32_t n_batch, in MeanStddevNormalization()
/external/tensorflow/tensorflow/lite/delegates/nnapi/
Dnnapi_delegate_test.cc3169 LSTMOpModel(int n_batch, int n_input, int n_cell, int n_output, bool use_cifg, in LSTMOpModel()
3532 const int n_batch = 1; in TEST_F() local
3596 const int n_batch = 1; in TEST_F() local
3708 const int n_batch = 1; in TEST_F() local
4367 const int n_batch = 2; in TEST_F() local
4500 const int n_batch = 2; in TEST_F() local
4641 const int n_batch = 2; in TEST_F() local
/external/deqp/external/openglcts/modules/gl/
Dgl4cVertexAttrib64BitTest.cpp3554 for (unsigned int n_batch = 0; n_batch < m_n_batches; ++n_batch) in initBuffers() local
3883 for (unsigned int n_batch = 0; n_batch < m_n_batches; ++n_batch) in verifyXFBData() local

12