Home
last modified time | relevance | path

Searched refs:num_units (Results 1 – 25 of 66) sorted by relevance

123

/external/tensorflow/tensorflow/lite/kernels/internal/
Dkernel_utils.cc26 int input_size, int num_units, int batch_size, in RnnBatchStep() argument
33 bias_ptr, input_size, /*aux_input_size=*/0, num_units, in RnnBatchStep()
42 int input_size, int aux_input_size, int num_units, in RnnBatchStep() argument
48 if (output_batch_leading_dim == num_units) { in RnnBatchStep()
50 tensor_utils::VectorBatchVectorAssign(bias_ptr, num_units, batch_size, in RnnBatchStep()
55 input_weights_ptr, num_units, input_size, input_ptr_batch, batch_size, in RnnBatchStep()
61 aux_input_weights_ptr, num_units, aux_input_size, aux_input_ptr_batch, in RnnBatchStep()
67 recurrent_weights_ptr, num_units, num_units, hidden_state_ptr_batch, in RnnBatchStep()
72 output_ptr_batch, num_units * batch_size, activation, output_ptr_batch); in RnnBatchStep()
73 std::copy_n(output_ptr_batch, num_units * batch_size, in RnnBatchStep()
[all …]
Dkernel_utils.h41 int input_size, int num_units, int batch_size,
51 int input_size, int aux_input_size, int num_units,
70 int num_units, int batch_size, int output_batch_leading_dim,
80 const float* bias_ptr, int input_size, int aux_input_size, int num_units,
/external/tensorflow/tensorflow/lite/micro/kernels/
Dsvdf_test.cc124 void ValidateSVDFGoldens(const int batch_size, const int num_units, in ValidateSVDFGoldens() argument
192 int golden_idx = i * batch_size * num_units; in ValidateSVDFGoldens()
193 for (int j = golden_idx; j < golden_idx + batch_size * num_units; ++j) { in ValidateSVDFGoldens()
205 void ValidateIntegerSVDFGoldens(const int batch_size, const int num_units, in ValidateIntegerSVDFGoldens() argument
261 int golden_idx = i * batch_size * num_units; in ValidateIntegerSVDFGoldens()
262 for (int j = golden_idx; j < golden_idx + batch_size * num_units; ++j) { in ValidateIntegerSVDFGoldens()
273 void TestSVDF(const int batch_size, const int num_units, const int input_size, in TestSVDF() argument
280 const int num_filters = num_units * rank; in TestSVDF()
301 const int output_dims_args[] = {2, batch_size, num_units}; in TestSVDF()
316 ValidateSVDFGoldens(batch_size, num_units, input_size, rank, tensors, in TestSVDF()
[all …]
Dsvdf.cc59 int batch_size, int memory_size, int num_filters, int num_units, int rank, in ApplyTimeWeightsBiasAndActivation() argument
89 float* output_ptr = output_data + i * num_units; in ApplyTimeWeightsBiasAndActivation()
91 for (int j = 0; j < num_units; ++j) { in ApplyTimeWeightsBiasAndActivation()
97 for (int i = 0; i < batch_size * num_units; ++i) { in ApplyTimeWeightsBiasAndActivation()
104 float* output_ptr_batch = GetTensorData<float>(output) + b * num_units; in ApplyTimeWeightsBiasAndActivation()
108 for (int i = 0; i < num_units; ++i) { in ApplyTimeWeightsBiasAndActivation()
117 float* output_ptr_batch = GetTensorData<float>(output) + b * num_units; in ApplyTimeWeightsBiasAndActivation()
118 for (int i = 0; i < num_units; ++i) { in ApplyTimeWeightsBiasAndActivation()
155 const int num_units = num_filters / rank; in EvalFloatSVDF() local
195 batch_size, memory_size, num_filters, num_units, rank, weights_time, bias, in EvalFloatSVDF()
[all …]
/external/tensorflow/tensorflow/core/ops/
Dcudnn_rnn_ops_test.cc44 int num_units = 4; in TEST() local
47 std::vector<int> input_shape = {seq_length, batch_size, num_units}; in TEST()
49 num_units}; in TEST()
51 num_units * dir_count}; in TEST()
76 int num_units = 4; in TEST() local
79 std::vector<int> input_shape = {seq_length, batch_size, num_units}; in TEST()
81 num_units}; in TEST()
83 num_units * dir_count}; in TEST()
108 int num_units = 4; in TEST() local
111 std::vector<int> input_shape = {max_seq_length, batch_size, num_units}; in TEST()
[all …]
Dcudnn_rnn_ops.cc88 auto num_units = c->Dim(input_h_shape, 2); in __anon4fa58c430302() local
95 TF_RETURN_IF_ERROR(c->Multiply(num_units, dir_count, &output_size)); in __anon4fa58c430302()
131 auto num_units = c->Dim(input_h_shape, 2); in __anon4fa58c430402() local
138 TF_RETURN_IF_ERROR(c->Multiply(num_units, dir_count, &output_size)); in __anon4fa58c430402()
179 auto num_units = c->Dim(input_h_shape, 2); in __anon4fa58c430502() local
186 TF_RETURN_IF_ERROR(c->Multiply(num_units, dir_count, &output_size)); in __anon4fa58c430502()
/external/tensorflow/tensorflow/lite/kernels/
Dunidirectional_sequence_rnn.cc70 const int num_units = input_weights->dims->data[0]; in Prepare() local
82 TF_LITE_ENSURE_EQ(context, hidden_state->dims->data[1], num_units); in Prepare()
90 output_size_array->data[2] = num_units; in Prepare()
153 const int num_units = input_weights->dims->data[0]; in EvalFloat() local
169 GetTensorData<float>(output) + s * num_units * batch_size; in EvalFloat()
173 input_size, num_units, batch_size, num_units, params->activation, in EvalFloat()
181 GetTensorData<float>(hidden_state) + b * num_units; in EvalFloat()
188 b * num_units * max_time + s * num_units; in EvalFloat()
192 input_size, num_units, /*batch_size=*/1, num_units, in EvalFloat()
211 const int num_units = input_weights->dims->data[0]; in EvalHybrid() local
[all …]
Dunidirectional_sequence_rnn_test.cc229 int num_units() { return units_; } in num_units() function in tflite::__anon46a7a5720111::UnidirectionalRNNOpModel
292 float* golden_end = golden_start + rnn.num_units() * rnn.sequence_len(); in TEST()
317 float* golden_end = golden_start + rnn.num_units() * rnn.sequence_len(); in TEST()
343 float* golden_end = golden_start + rnn.num_units() * rnn.sequence_len(); in TEST()
372 float* golden_batch_start = rnn_golden_output + i * rnn.num_units(); in TEST()
373 float* golden_batch_end = golden_batch_start + rnn.num_units(); in TEST()
401 float* golden_batch_start = rnn_golden_output + i * rnn.num_units(); in TEST()
402 float* golden_batch_end = golden_batch_start + rnn.num_units(); in TEST()
431 float* golden_batch_start = rnn_golden_output + i * rnn.num_units(); in TEST()
432 float* golden_batch_end = golden_batch_start + rnn.num_units(); in TEST()
Dbasic_rnn_test.cc217 int num_units() { return units_; } in num_units() function in tflite::__anon76108c220111::RNNOpModel
275 float* golden_start = rnn_golden_output + i * rnn.num_units(); in TEST()
276 float* golden_end = golden_start + rnn.num_units(); in TEST()
302 float* golden_start = rnn_golden_output + i * rnn.num_units(); in TEST()
303 float* golden_end = golden_start + rnn.num_units(); in TEST()
330 float* golden_start = rnn_golden_output + i * rnn.num_units(); in TEST()
331 float* golden_end = golden_start + rnn.num_units(); in TEST()
Dfully_connected.cc162 const int num_units = filter->dims->data[0]; in PrepareImpl() local
219 int accum_scratch_dims[2] = {num_units, batch_size}; in PrepareImpl()
223 accum_size->data[0] = num_units; in PrepareImpl()
240 output_size_array->data[output_size_array->size - 1] = num_units; in PrepareImpl()
245 output_size_array->data[1] = num_units; in PrepareImpl()
287 const int num_units = filter->dims->data[0]; in EvalPie() local
291 tensor_utils::VectorBatchVectorAssign(GetTensorData<float>(bias), num_units, in EvalPie()
295 std::fill_n(GetTensorData<float>(output), batch_size * num_units, 0.0f); in EvalPie()
300 GetTensorData<float>(filter), num_units, input_size, in EvalPie()
306 GetTensorData<float>(output), batch_size * num_units, params->activation, in EvalPie()
[all …]
Dbasic_rnn.cc64 const int num_units = input_weights->dims->data[0]; in Prepare() local
76 TF_LITE_ENSURE_EQ(context, hidden_state->dims->data[1], num_units); in Prepare()
83 output_size_array->data[1] = num_units; in Prepare()
139 const int num_units = input_weights->dims->data[0]; in EvalFloat() local
156 input_size, num_units, batch_size, output_batch_leading_dim, in EvalFloat()
170 const int num_units = input_weights->dims->data[0]; in EvalHybrid() local
197 num_units, batch_size, output_batch_leading_dim, params->activation, in EvalHybrid()
/external/mesa3d/src/gallium/drivers/lima/ir/gp/
Ddisasm.c37 num_units enumerator
40 static const gpir_codegen_store_src gp_unit_to_store_src[num_units] = {
170 printf("^%d", cur_dest_index - 1 * num_units + unit_acc_0); in print_src()
174 printf("^%d", cur_dest_index - 1 * num_units + unit_acc_1); in print_src()
178 printf("^%d", cur_dest_index - 1 * num_units + unit_mul_0); in print_src()
182 printf("^%d", cur_dest_index - 1 * num_units + unit_mul_1); in print_src()
186 printf("^%d", cur_dest_index - 1 * num_units + unit_pass); in print_src()
212 printf("^%d", cur_dest_index - 1 * num_units + unit_complex); in print_src()
216 printf("^%d", cur_dest_index - 2 * num_units + unit_pass); in print_src()
220 printf("^%d", cur_dest_index - 2 * num_units + unit_acc_0); in print_src()
[all …]
/external/tensorflow/tensorflow/lite/kernels/internal/reference/
Dsvdf.h35 int batch_size, int memory_size, int num_filters, int num_units, int rank, in ApplyTimeWeightsBiasAndActivation() argument
55 tensor_utils::VectorBatchVectorAssign(GetTensorData<float>(bias), num_units, in ApplyTimeWeightsBiasAndActivation()
59 std::fill_n(GetTensorData<float>(output), batch_size * num_units, 0.0f); in ApplyTimeWeightsBiasAndActivation()
64 float* output_ptr_batch = GetTensorData<float>(output) + b * num_units; in ApplyTimeWeightsBiasAndActivation()
67 num_units, rank); in ApplyTimeWeightsBiasAndActivation()
72 float* output_ptr_batch = GetTensorData<float>(output) + b * num_units; in ApplyTimeWeightsBiasAndActivation()
73 tensor_utils::ApplyActivationToVector(output_ptr_batch, num_units, in ApplyTimeWeightsBiasAndActivation()
212 const int num_units = num_filters / rank; in EvalFloatSVDF() local
237 num_units, rank, weights_time, bias, in EvalFloatSVDF()
251 const int num_units = num_filters / rank; in EvalHybridSVDF() local
[all …]
/external/tensorflow/tensorflow/python/kernel_tests/
Drnn_test.py723 num_units=input_size,
745 num_units=input_size,
766 num_units = 512
772 np.random.randn(batch_size, num_units).astype(np.float32)
812 def static_vs_dynamic_rnn_benchmark(batch_size, max_time, num_units, use_gpu): argument
820 np.random.randn(batch_size, num_units).astype(np.float32)
846 (batch_size, max_time, num_units, use_gpu, delta_static, delta_dynamic,
856 num_units=input_size,
874 def half_seq_len_vs_unroll_half_rnn_benchmark(batch_size, max_time, num_units, argument
883 np.random.randn(batch_size, num_units).astype(np.float32)
[all …]
Drnn_cell_test.py383 num_units = 3
391 num_units, initializer=initializer, state_is_tuple=False)
398 self.assertEqual(out.get_shape().as_list(), [batch_size, num_units])
406 num_units = 3
414 num_units,
425 self.assertEqual(out.get_shape().as_list(), [batch_size, num_units])
433 self.assertAllEqual(value, np.zeros((batch_size, num_units)))
437 num_units = 3
444 state_saver = TestStateSaver(batch_size, 2 * num_units)
446 num_units,
[all …]
/external/tensorflow/tensorflow/core/api_def/base_api/
Dapi_def_CudnnRNNV3.pbtxt12 when input_size == num_units; 'auto_select' implies 'skip_input' when
13 input_size == num_units; otherwise, it implies 'linear_input'.
23 [num_layer * dir, batch_size, num_units]. If time_major is false, the shape
24 is [batch_size, num_layer * dir, num_units].
26 [num_layer * dir, batch, num_units]. For other models, it is ignored.
33 [seq_length, batch_size, dir * num_units]. If time_major is false, the
34 shape is [batch_size, seq_length, dir * num_units].
Dapi_def_CudnnRNNBackpropV3.pbtxt12 when input_size == num_units; 'auto_select' implies 'skip_input' when
13 input_size == num_units; otherwise, it implies 'linear_input'.
23 [num_layer * dir, batch_size, num_units]. If time_major is false, the shape
24 is [batch_size, num_layer * dir, num_units].
26 [num_layer * dir, batch, num_units]. For other models, it is ignored.
33 [seq_length, batch_size, dir * num_units]. If time_major is false, the
34 shape is [batch_size, seq_length, dir * num_units].
Dapi_def_CudnnRNN.pbtxt11 when input_size == num_units; 'auto_select' implies 'skip_input' when
12 input_size == num_units; otherwise, it implies 'linear_input'.
20 num_units].
22 [num_layer * dir, batch, num_units]. For other models, it is ignored.
28 dir * num_units].
Dapi_def_CudnnRNNV2.pbtxt12 when input_size == num_units; 'auto_select' implies 'skip_input' when
13 input_size == num_units; otherwise, it implies 'linear_input'.
21 num_units].
23 [num_layer * dir, batch, num_units]. For other models, it is ignored.
29 dir * num_units].
Dapi_def_CudnnRNNBackprop.pbtxt10 when input_size == num_units; 'auto_select' implies 'skip_input' when
11 input_size == num_units; otherwise, it implies 'linear_input'.
19 num_units].
21 [num_layer * dir, batch, num_units]. For other models, it is ignored.
27 dir * num_units].
Dapi_def_CudnnRNNBackpropV2.pbtxt13 when input_size == num_units; 'auto_select' implies 'skip_input' when
14 input_size == num_units; otherwise, it implies 'linear_input'.
22 num_units].
24 [num_layer * dir, batch, num_units]. For other models, it is ignored.
30 dir * num_units].
Dapi_def_CudnnRNNParamsSize.pbtxt9 num_units: Specifies the size of the hidden state.
14 when input_size == num_units; 'auto_select' implies 'skip_input' when
15 input_size == num_units; otherwise, it implies 'linear_input'.
/external/tensorflow/tensorflow/lite/experimental/examples/lstm/
Dunidirectional_sequence_lstm_test.py58 self.num_units = 16
63 self.num_units, use_peepholes=True, forget_bias=1.0, name="rnn1"),
65 self.num_units, num_proj=8, forget_bias=1.0, name="rnn2"),
67 self.num_units // 2,
73 self.num_units, forget_bias=1.0, name="rnn4")
92 tf.random.normal([self.num_units, self.n_classes]))
Dbidirectional_sequence_lstm_test.py58 self.num_units = 16
63 self.num_units, use_peepholes=True, forget_bias=0, name="rnn1"),
65 self.num_units, num_proj=8, forget_bias=0, name="rnn2"),
67 self.num_units // 2,
73 self.num_units, forget_bias=0, name="rnn4")
95 tf.random.normal([self.num_units * 2, self.n_classes]))
/external/tensorflow/tensorflow/core/kernels/
Dcudnn_rnn_ops.cc150 CudnnRnnParameters(int num_layers, int input_size, int num_units, in CudnnRnnParameters() argument
156 num_units_(num_units), in CudnnRnnParameters()
166 HashList({num_layers, input_size, num_units, max_seq_length, batch_size, in CudnnRnnParameters()
271 Status ToRNNInputMode(TFRNNInputMode tf_input_mode, int num_units, in ToRNNInputMode() argument
281 *input_mode = (input_size == num_units) ? RnnInputMode::kRnnSkipInput in ToRNNInputMode()
498 int num_units; member
510 num_units == rhs.num_units && dir_count == rhs.dir_count && in IsCompatibleWith()
517 num_layers, input_size, num_units, dir_count, max_seq_length, in DebugString()
532 HashList({shapes.num_layers, shapes.input_size, shapes.num_units, in operator ()()
601 model_shapes->num_units = (*input_h)->dim_size(2); in ExtractForwardInput()
[all …]

123