Home
last modified time | relevance | path

Searched refs:n_batch (Results 1 – 25 of 33) sorted by relevance

12

/external/tensorflow/tensorflow/lite/tools/optimize/calibration/custom_logging_ops/
Dlstm.cc63 const TfLiteLSTMParams* params, int n_batch, int n_cell, int n_input, in LstmStepWithAuxInput() argument
80 std::fill_n(input_gate_scratch, n_cell * n_batch, 0.0f); in LstmStepWithAuxInput()
82 std::fill_n(forget_gate_scratch, n_cell * n_batch, 0.0f); in LstmStepWithAuxInput()
83 std::fill_n(cell_scratch, n_cell * n_batch, 0.0f); in LstmStepWithAuxInput()
84 std::fill_n(output_gate_scratch, n_cell * n_batch, 0.0f); in LstmStepWithAuxInput()
88 n_batch, input_gate_scratch); in LstmStepWithAuxInput()
90 tensor_utils::VectorBatchVectorAssign(forget_gate_bias_ptr, n_cell, n_batch, in LstmStepWithAuxInput()
92 tensor_utils::VectorBatchVectorAssign(cell_bias_ptr, n_cell, n_batch, in LstmStepWithAuxInput()
94 tensor_utils::VectorBatchVectorAssign(output_gate_bias_ptr, n_cell, n_batch, in LstmStepWithAuxInput()
101 input_to_input_weights_ptr, n_cell, n_input, input_ptr, n_batch, in LstmStepWithAuxInput()
[all …]
/external/tensorflow/tensorflow/lite/kernels/internal/reference/
Dportable_tensor_utils.h60 int n_batch, float* result) { in MatrixBatchVectorMultiplyAccumulate() argument
62 n_batch, result); in MatrixBatchVectorMultiplyAccumulate()
69 int n_batch, in MatrixBatchVectorMultiplyAccumulate() argument
72 scaling_factors, n_batch, result); in MatrixBatchVectorMultiplyAccumulate()
78 int n_batch, float* __restrict__ result, const float* per_channel_scale, in MatrixBatchVectorMultiplyAccumulate() argument
82 matrix, m_rows, m_cols, vectors, scaling_factors, n_batch, result, in MatrixBatchVectorMultiplyAccumulate()
91 int n_batch, int32_t* scratch, in MatrixBatchVectorMultiplyAccumulate() argument
95 scaling_factors, n_batch, result); in MatrixBatchVectorMultiplyAccumulate()
101 const float* __restrict__ vector, int n_batch, float* __restrict__ result) { in SparseMatrixBatchVectorMultiplyAccumulate1x4() argument
103 matrix, segments, indices, m_rows, m_cols, vector, n_batch, result); in SparseMatrixBatchVectorMultiplyAccumulate1x4()
[all …]
Dportable_tensor_utils_impl.h60 int n_batch, float* result);
65 int n_batch, float* __restrict__ result);
70 int n_batch, float* __restrict__ result, const float* per_channel_scale,
77 int n_batch, int32_t* scratch, float* __restrict__ result,
83 const float* __restrict__ vector, int n_batch, float* __restrict__ result);
87 int m_rows, int m_cols, const float* __restrict__ vector, int n_batch,
93 const float* scaling_factors, int n_batch, float* __restrict__ result);
101 int v_size, int n_batch,
105 const int16_t* vector, int v_size, const int16_t* batch_vector, int n_batch,
111 int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp,
[all …]
Dportable_tensor_utils.cc122 int n_batch, float* result) { in PortableMatrixBatchVectorMultiplyAccumulate() argument
124 for (int b = 0; b < n_batch; b++) { in PortableMatrixBatchVectorMultiplyAccumulate()
141 int n_batch, float* __restrict__ result) { in PortableMatrixBatchVectorMultiplyAccumulate() argument
142 for (int batch = 0; batch < n_batch; ++batch, vectors += m_cols) { in PortableMatrixBatchVectorMultiplyAccumulate()
166 int n_batch, float* __restrict__ result, const float* per_channel_scale, in PortableMatrixBatchVectorMultiplyAccumulate() argument
171 matrix, m_rows, m_cols, vectors, scaling_factors, n_batch, result); in PortableMatrixBatchVectorMultiplyAccumulate()
181 for (int batch = 0; batch < n_batch; ++batch, vectors += m_cols) { in PortableMatrixBatchVectorMultiplyAccumulate()
209 const float* __restrict__ vector, int n_batch, float* __restrict__ result) { in PortableSparseMatrixBatchVectorMultiplyAccumulate1x4() argument
212 for (int batch = 0; batch < n_batch; batch++) { in PortableSparseMatrixBatchVectorMultiplyAccumulate1x4()
232 int m_rows, int m_cols, const float* __restrict__ vector, int n_batch, in PortableSparseMatrixBatchVectorMultiplyAccumulate() argument
[all …]
Dsvdf.h77 const int n_batch = input_shape.Dims(0); in EvalIntegerSVDF() local
86 std::copy(state_data + 1, state_data + n_batch * n_memory * n_filter, in EvalIntegerSVDF()
95 for (int b = 0; b < n_batch; b++) { in EvalIntegerSVDF()
120 for (int b = 0; b < n_batch; ++b) { in EvalIntegerSVDF()
133 n_batch * n_unit, n_rank); in EvalIntegerSVDF()
136 tensor_utils::VectorBatchVectorAdd(bias_data, n_unit, n_batch, in EvalIntegerSVDF()
142 for (int i = 0; i < n_batch * n_unit; ++i) { in EvalIntegerSVDF()
/external/tensorflow/tensorflow/lite/kernels/internal/optimized/
Dsse_tensor_utils.h39 int n_batch, float* result) { in MatrixBatchVectorMultiplyAccumulate() argument
41 vector, n_batch, result); in MatrixBatchVectorMultiplyAccumulate()
47 const float* __restrict__ scaling_factors, int n_batch, in MatrixBatchVectorMultiplyAccumulate() argument
50 vectors, scaling_factors, n_batch, result); in MatrixBatchVectorMultiplyAccumulate()
56 int n_batch, float* __restrict__ result, const float* per_channel_scale, in MatrixBatchVectorMultiplyAccumulate() argument
60 vectors, scaling_factors, n_batch, result, per_channel_scale, in MatrixBatchVectorMultiplyAccumulate()
67 const float* __restrict__ scaling_factors, int n_batch, in MatrixBatchVectorMultiplyAccumulate() argument
71 vectors, scaling_factors, n_batch, scratch, result, context); in MatrixBatchVectorMultiplyAccumulate()
77 const float* __restrict__ vector, int n_batch, float* __restrict__ result) { in SparseMatrixBatchVectorMultiplyAccumulate1x4() argument
79 segments, indices, m_rows, m_cols, vector, n_batch, result); in SparseMatrixBatchVectorMultiplyAccumulate1x4()
[all …]
Dneon_tensor_utils.h29 int n_batch, float* result) { in MatrixBatchVectorMultiplyAccumulate() argument
31 vector, n_batch, result); in MatrixBatchVectorMultiplyAccumulate()
38 int n_batch, in MatrixBatchVectorMultiplyAccumulate() argument
41 vectors, scaling_factors, n_batch, result); in MatrixBatchVectorMultiplyAccumulate()
48 int n_batch, int32_t* scratch, in MatrixBatchVectorMultiplyAccumulate() argument
52 vectors, scaling_factors, n_batch, scratch, result, context); in MatrixBatchVectorMultiplyAccumulate()
58 int n_batch, float* __restrict__ result, const float* per_channel_scale, in MatrixBatchVectorMultiplyAccumulate() argument
62 vectors, scaling_factors, n_batch, result, per_channel_scale, in MatrixBatchVectorMultiplyAccumulate()
69 const float* __restrict__ vector, int n_batch, float* __restrict__ result) { in SparseMatrixBatchVectorMultiplyAccumulate1x4() argument
71 segments, indices, m_rows, m_cols, vector, n_batch, result); in SparseMatrixBatchVectorMultiplyAccumulate1x4()
[all …]
Dneon_tensor_utils_impl.h34 int n_batch, float* result);
41 int n_batch,
50 int n_batch, int32_t* scratch,
58 int n_batch, float* __restrict__ result, const float* per_channel_scale,
65 int n_batch, int n_input, int16_t* output);
67 void NeonApplySigmoid(const int16_t* input, int32_t n_batch, int32_t n_input,
70 void NeonApplyTanh(int32_t integer_bits, const int16_t* input, int32_t n_batch,
73 void NeonCwiseMul(const int16_t* input_1, const int16_t* input_2, int n_batch,
77 int32_t multiplier, int shift, int n_batch, int n_input,
80 void NeonCwiseAdd(const int16_t* input_1, const int16_t* input_2, int n_batch,
[all …]
Dneon_tensor_utils.cc202 int n_batch, float* result) { in NeonMatrixBatchVectorMultiplyAccumulate() argument
209 for (int b = 0; b < n_batch; b++) { in NeonMatrixBatchVectorMultiplyAccumulate()
251 const int8_t* ShuffleVectors(const int8_t* vectors, const int n_batch, in ShuffleVectors() argument
254 kNeonVectorAlignment, n_batch * m_cols, shuffled_vectors_free)); in ShuffleVectors()
256 for (int i = 0; i < n_batch; i += 4) { in ShuffleVectors()
306 const int8_t* vectors, const float* scaling_factors, int n_batch, in DotprodMatrixBatchFourVectorMultiplyAccumulate() argument
311 ShuffleVectors(vectors, n_batch, m_cols, &shuffled_vectors_free); in DotprodMatrixBatchFourVectorMultiplyAccumulate()
314 for (int batch = 0; batch < n_batch; batch += 4) { in DotprodMatrixBatchFourVectorMultiplyAccumulate()
434 const int8_t* vectors, const float* scaling_factors, int n_batch, in DotprodMatrixBatchFourVectorMultiplyAccumulate() argument
439 ShuffleVectors(vectors, n_batch, m_cols, &shuffled_vectors_free); in DotprodMatrixBatchFourVectorMultiplyAccumulate()
[all …]
Dsse_tensor_utils.cc99 const float* __restrict__ scaling_factors, int n_batch, in SseMatrixBatchVectorMultiplyAccumulateImpl() argument
102 for (std::intptr_t batch = 0; batch < n_batch; ++batch) { in SseMatrixBatchVectorMultiplyAccumulateImpl()
177 const int8_t* input_to_gate_weights, int32_t n_batch, in SseCpuBackendGemm() argument
193 rhs_params.cols = n_batch; in SseCpuBackendGemm()
198 dst_params.cols = n_batch; in SseCpuBackendGemm()
211 const float* __restrict__ scaling_factors, int n_batch, in SseMatrixBatchVectorMultiplyAccumulate() argument
214 matrix, m_rows, m_cols, vectors, scaling_factors, n_batch, result, in SseMatrixBatchVectorMultiplyAccumulate()
222 const float* __restrict__ scaling_factors, int n_batch, int32_t* scratch, in SseMatrixBatchVectorMultiplyAccumulate() argument
226 SseCpuBackendGemm(vectors, bias, matrix, n_batch, m_cols, m_rows, in SseMatrixBatchVectorMultiplyAccumulate()
232 const int total_size = n_batch * m_rows; in SseMatrixBatchVectorMultiplyAccumulate()
[all …]
Dsse_tensor_utils_impl.h35 const float* __restrict__ scaling_factors, int n_batch,
43 const float* __restrict__ scaling_factors, int n_batch, int32_t* scratch,
50 const float* __restrict__ scaling_factors, int n_batch,
60 const float* __restrict__ scaling_factors, int n_batch,
/external/tensorflow/tensorflow/lite/kernels/internal/
Dtensor_utils_common.h63 inline void BatchQuantizeFloats(const float* float_data_ptr, int n_batch, in BatchQuantizeFloats() argument
67 for (int b = 0; b < n_batch; ++b) { in BatchQuantizeFloats()
90 int n_batch, float* result);
99 const float* __restrict__ vector, int n_batch, float* __restrict__ result);
113 int m_rows, int m_cols, const float* __restrict__ vector, int n_batch,
126 const float* __restrict__ scaling_factors, int n_batch,
135 const float* __restrict__ scaling_factors, int n_batch,
152 const float* __restrict__ scaling_factors, int n_batch,
163 int32_t n_batch, int32_t n_input, int32_t n_cell,
172 const int32_t* gate_bias, int32_t n_batch,
[all …]
Dtensor_utils.h45 const float* __restrict__ scaling_factors, int n_batch,
53 int n_batch, float* __restrict__ result, const float* per_channel_scale,
63 const float* vector_scaling_factors, int n_batch, in MatrixBatchVectorMultiplyAccumulate() argument
68 for (int b = 0; b < n_batch; ++b) { in MatrixBatchVectorMultiplyAccumulate()
73 scaling_factor_scratch, n_batch, result, in MatrixBatchVectorMultiplyAccumulate()
107 int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp,
138 int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp,
/external/tensorflow/tensorflow/lite/kernels/
Dlstm_eval.cc156 const int n_batch, const int n_input, const int n_aux_input, in CalculateLstmGateFloat() argument
166 std::fill_n(gate, n_cell * n_batch, 0.0f); in CalculateLstmGateFloat()
168 tensor_utils::VectorBatchVectorAssign(gate_bias, n_cell, n_batch, gate); in CalculateLstmGateFloat()
174 input_to_gate_weights, n_cell, n_input, input, n_batch, gate); in CalculateLstmGateFloat()
181 aux_input, n_batch, gate); in CalculateLstmGateFloat()
185 recurrent_to_gate_weights, n_cell, n_output, output_state, n_batch, gate); in CalculateLstmGateFloat()
189 cell_to_gate_weights, n_cell, cell_state, n_batch, gate); in CalculateLstmGateFloat()
193 tensor_utils::MeanStddevNormalization(gate, gate, n_cell, n_batch); in CalculateLstmGateFloat()
195 gate, n_batch, gate); in CalculateLstmGateFloat()
196 tensor_utils::VectorBatchVectorAdd(gate_bias, n_cell, n_batch, gate); in CalculateLstmGateFloat()
[all …]
Dbidirectional_sequence_lstm_test.cc32 BidirectionalLSTMOpModel(int n_batch, int n_input, int n_cell, int n_output, in BidirectionalLSTMOpModel() argument
40 : n_batch_(n_batch), in BidirectionalLSTMOpModel()
430 const int n_batch = 1; in TEST_P() local
441 n_batch, n_input, n_cell, n_output, sequence_length, /*use_cifg=*/false, in TEST_P()
447 {sequence_length, n_batch, n_input}, // input tensor in TEST_P()
495 {n_batch, n_output}, // activation_state tensor in TEST_P()
496 {n_batch, n_cell}, // cell_state tensor in TEST_P()
498 {n_batch, n_output}, // activation_state tensor in TEST_P()
499 {n_batch, n_cell}, // cell_state tensor in TEST_P()
501 {sequence_length, n_batch, 0}, // aux_input tensor in TEST_P()
[all …]
Dunidirectional_sequence_lstm_test.cc32 UnidirectionalLSTMOpModel(int n_batch, int n_input, int n_cell, int n_output, in UnidirectionalLSTMOpModel() argument
41 : n_batch_(n_batch), in UnidirectionalLSTMOpModel()
288 int n_batch, int n_input, int n_cell, int n_output, int sequence_length, in HybridUnidirectionalLSTMOpModel() argument
294 n_batch, n_input, n_cell, n_output, sequence_length, time_major, in HybridUnidirectionalLSTMOpModel()
500 const int n_batch = 1; in TEST_F() local
508 n_batch, n_input, n_cell, n_output, sequence_length, in TEST_F()
514 {sequence_length, n_batch, n_input}, // input tensor in TEST_F()
538 {n_batch, n_output}, // output_state tensor in TEST_F()
539 {n_batch, n_cell}, // cell_state tensor in TEST_F()
562 const int n_batch = 1; in TEST_F() local
[all …]
Dlstm_test.cc39 LSTMOpModel(int n_batch, int n_input, int n_cell, int n_output, bool use_cifg, in LSTMOpModel() argument
46 n_batch_(n_batch), in LSTMOpModel()
48 input_ = AddInput({TensorType_FLOAT32, {n_batch, n_input}}); in LSTMOpModel()
104 AddVariableInput({TensorType_FLOAT32, {n_batch, n_output}}); in LSTMOpModel()
105 AddVariableInput({TensorType_FLOAT32, {n_batch, n_cell}}); in LSTMOpModel()
130 output_ = AddOutput({TensorType_FLOAT32, {n_batch, n_output}}); in LSTMOpModel()
413 const int n_batch = 1; in TEST_P() local
475 LSTMOpModel lstm(n_batch, n_input, n_cell, n_output, in TEST_P()
490 const int n_batch = 1; in TEST_P() local
546 LSTMOpModel lstm(n_batch, n_input, n_cell, n_output, in TEST_P()
[all …]
Doptional_tensor_test.cc31 LSTMOpModel(int n_batch, int n_input, int n_cell, int n_output, bool use_cifg, in LSTMOpModel() argument
35 : n_batch_(n_batch), in LSTMOpModel()
229 const int n_batch = 1; in TEST() local
235 LSTMOpModel lstm(n_batch, n_input, n_cell, n_output, in TEST()
241 {n_batch, n_input}, // input tensor in TEST()
/external/tensorflow/tensorflow/lite/tools/optimize/calibration/builtin_logging_ops/
Dlstm.cc46 const int n_batch, const int n_input, const int n_aux_input, in CalculateLstmGateFloat() argument
58 std::fill_n(gate, n_cell * n_batch, 0.0f); in CalculateLstmGateFloat()
60 tensor_utils::VectorBatchVectorAssign(gate_bias, n_cell, n_batch, gate); in CalculateLstmGateFloat()
66 input_to_gate_weights, n_cell, n_input, input, n_batch, gate); in CalculateLstmGateFloat()
73 aux_input, n_batch, gate); in CalculateLstmGateFloat()
77 recurrent_to_gate_weights, n_cell, n_output, output_state, n_batch, gate); in CalculateLstmGateFloat()
81 cell_to_gate_weights, n_cell, cell_state, n_batch, gate); in CalculateLstmGateFloat()
85 logger->LogTensorValue(intermediate_tensor_index, gate, n_cell * n_batch, in CalculateLstmGateFloat()
88 tensor_utils::MeanStddevNormalization(gate, gate, n_cell, n_batch); in CalculateLstmGateFloat()
90 gate, n_batch, gate); in CalculateLstmGateFloat()
[all …]
/external/tensorflow/tensorflow/lite/delegates/gpu/cl/kernels/
Dlstm_full_test.cc35 LSTMOpModel(int n_batch, int n_input, int n_cell, int n_output, bool use_cifg, in LSTMOpModel() argument
63 n_batch_(n_batch), in LSTMOpModel()
65 input_ = AddInput({TensorType_FLOAT32, {n_batch, n_input}}); in LSTMOpModel()
124 AddVariableInput({TensorType_FLOAT32, {n_batch, n_output}}); in LSTMOpModel()
125 AddVariableInput({TensorType_FLOAT32, {n_batch, n_cell}}); in LSTMOpModel()
150 output_ = AddOutput({TensorType_FLOAT32, {n_batch, n_output}}); in LSTMOpModel()
265 const int n_batch = 1; in TEST_P() local
319 n_batch, n_input, n_cell, n_output, in TEST_P()
339 const int n_batch = 1; in TEST_P() local
387 n_batch, n_input, n_cell, n_output, in TEST_P()
[all …]
/external/tensorflow/tensorflow/lite/experimental/kernels/
Dunidirectional_sequence_gru_test.cc34 explicit GRUOpModel(int n_batch, int n_input, int n_output, in GRUOpModel() argument
37 : n_batch_(n_batch), n_input_(n_input), n_output_(n_output) { in GRUOpModel()
40 AddVariableInput(TensorData{TensorType_FLOAT32, {n_batch, n_output}}); in GRUOpModel()
101 const int n_batch = 2; in TEST() local
105 GRUOpModel m(n_batch, n_input, n_output, in TEST()
106 {{n_time, n_batch, n_input}, in TEST()
107 {n_batch, n_output}, in TEST()
133 EXPECT_THAT(m.GetOutputShape(), ElementsAre(n_time, n_batch, n_output)); in TEST()
Dgru_cell.cc45 const int n_batch = input_shape.Dims(0); in GruCell() local
70 auto r = ru.block(0 * n_output, 0, n_output, n_batch); in GruCell()
71 auto u = ru.block(1 * n_output, 0, n_output, n_batch); in GruCell()
76 auto hr = xh.block(n_input, 0, n_output, n_batch); in GruCell()
88 memcpy(output_state, output, n_batch * n_output * sizeof(float)); in GruCell()
Dunidirectional_sequence_gru.cc38 const int n_batch = input->dims->data[1]; in GruImpl() local
41 const int n_batch_input = n_batch * n_input; in GruImpl()
42 const int n_batch_output = n_batch * n_output; in GruImpl()
43 const RuntimeShape input_shape({n_batch, n_input}); in GruImpl()
56 const RuntimeShape output_shape = RuntimeShape({n_batch, n_output}); in GruImpl()
134 const int n_batch = input->dims->data[1]; in Prepare() local
142 TF_LITE_ENSURE_EQ(context, input_state->dims->data[0], n_batch); in Prepare()
181 output_size->data[1] = n_batch; in Prepare()
205 activation_size->data[0] = n_batch; in Prepare()
217 concat_size->data[0] = n_batch; in Prepare()
/external/tensorflow/tensorflow/lite/micro/kernels/
Dsvdf_common.cc42 const int n_batch = input_tensor->dims->data[0]; in EvalIntegerSvdfReference() local
64 const int16_t* old_state_end = state_ptr + n_batch * n_filter * n_memory; in EvalIntegerSvdfReference()
82 for (int b = 0; b < n_batch; b++) { in EvalIntegerSvdfReference()
108 for (int b = 0; b < n_batch; ++b) { in EvalIntegerSvdfReference()
135 for (int i = 0; i < n_batch; ++i) { in EvalIntegerSvdfReference()
144 for (int i = 0; i < n_batch * n_unit; ++i) { in EvalIntegerSvdfReference()
150 for (int b = 0; b < n_batch; ++b) { in EvalIntegerSvdfReference()
165 for (int i = 0; i < n_batch * n_unit; ++i) { in EvalIntegerSvdfReference()
/external/tensorflow/tensorflow/lite/micro/kernels/xtensa/
Dsvdf.cc63 const int n_batch = input_tensor->dims->data[0]; in EvalIntegerSvdfHifimini() local
87 const int16_t* old_state_end = state_ptr + n_batch * n_filter * n_memory; in EvalIntegerSvdfHifimini()
106 for (int b = 0; b < n_batch; b++) { in EvalIntegerSvdfHifimini()
162 for (int b = 0; b < n_batch; ++b) { in EvalIntegerSvdfHifimini()
201 for (int i = 0; i < n_batch; ++i) { in EvalIntegerSvdfHifimini()
210 for (int i = 0; i < n_batch * n_unit; ++i) { in EvalIntegerSvdfHifimini()
216 for (int b = 0; b < n_batch; ++b) { in EvalIntegerSvdfHifimini()
232 for (int i = 0; i < n_batch * n_unit; ++i) { in EvalIntegerSvdfHifimini()
258 const int n_batch = input_tensor->dims->data[0]; in EvalIntegerSvdfHifi4() local
272 int num_bytes = sizeof(*state_ptr) * (n_batch * n_filter * n_memory - 1); in EvalIntegerSvdfHifi4()
[all …]

12