/external/tensorflow/tensorflow/lite/kernels/ |
D | unidirectional_sequence_rnn_test.cc | 284 float* batch_start = rnn_input; in TEST() local 285 float* batch_end = batch_start + input_sequence_size; in TEST() 286 rnn.SetInput(0, batch_start, batch_end); in TEST() 287 rnn.SetInput(input_sequence_size, batch_start, batch_end); in TEST() 309 float* batch_start = rnn_input; in TEST() local 310 float* batch_end = batch_start + input_sequence_size; in TEST() 311 rnn.SetInput(0, batch_start, batch_end); in TEST() 312 rnn.SetInput(input_sequence_size, batch_start, batch_end); in TEST() 335 float* batch_start = rnn_input; in TEST() local 336 float* batch_end = batch_start + input_sequence_size; in TEST() [all …]
|
D | bidirectional_sequence_rnn_test.cc | 799 float* batch_start = rnn_input; in TEST() local 800 float* batch_end = batch_start + input_sequence_size; in TEST() 801 rnn.SetInput(0, batch_start, batch_end); in TEST() 802 rnn.SetInput(input_sequence_size, batch_start, batch_end); in TEST() 841 float* batch_start = rnn_input + i * rnn.input_size(); in TEST() local 842 float* batch_end = batch_start + rnn.input_size(); in TEST() 844 rnn.SetInput(2 * i * rnn.input_size(), batch_start, batch_end); in TEST() 845 rnn.SetInput((2 * i + 1) * rnn.input_size(), batch_start, batch_end); in TEST() 875 float* batch_start = rnn_input; in TEST() local 876 float* batch_end = batch_start + input_sequence_size; in TEST() [all …]
|
D | basic_rnn_test.cc | 268 float* batch_start = rnn_input + i * rnn.input_size(); in TEST() local 269 float* batch_end = batch_start + rnn.input_size(); in TEST() 270 rnn.SetInput(0, batch_start, batch_end); in TEST() 271 rnn.SetInput(rnn.input_size(), batch_start, batch_end); in TEST() 295 float* batch_start = rnn_input + i * rnn.input_size(); in TEST() local 296 float* batch_end = batch_start + rnn.input_size(); in TEST() 297 rnn.SetInput(0, batch_start, batch_end); in TEST() 298 rnn.SetInput(rnn.input_size(), batch_start, batch_end); in TEST() 323 float* batch_start = rnn_input + i * rnn.input_size(); in TEST() local 324 float* batch_end = batch_start + rnn.input_size(); in TEST() [all …]
|
D | svdf_test.cc | 245 float* batch_start = in VerifyGoldens() local 247 float* batch_end = batch_start + svdf_input_size * svdf_num_batches; in VerifyGoldens() 248 svdf->SetInput(0, batch_start, batch_end); in VerifyGoldens()
|
D | unidirectional_sequence_lstm_test.cc | 355 const float* batch_start = input[b].data() + i * num_inputs; in VerifyGoldens() local 356 const float* batch_end = batch_start + num_inputs; in VerifyGoldens() 358 lstm->SetInput(((i * num_batches) + b) * num_inputs, batch_start, in VerifyGoldens() 364 const float* batch_start = input[b].data(); in VerifyGoldens() local 365 const float* batch_end = batch_start + input_sequence_size * num_inputs; in VerifyGoldens() 367 lstm->SetInput(b * input_sequence_size * num_inputs, batch_start, in VerifyGoldens()
|
D | fully_connected_test.cc | 862 float* batch_start = fully_connected_input + i * m.input_size(); in TEST_P() local 863 float* batch_end = batch_start + m.input_size(); in TEST_P() 864 m.SetInput(0, batch_start, batch_end); in TEST_P() 865 m.SetInput(m.input_size(), batch_start, batch_end); in TEST_P()
|
D | lstm_test.cc | 379 const float* batch_start = input[b].data() + i * num_inputs; in VerifyGoldens() local 380 const float* batch_end = batch_start + num_inputs; in VerifyGoldens() 382 lstm->SetInput(b * lstm->num_inputs(), batch_start, batch_end); in VerifyGoldens() 1777 const float* batch_start = input[b].data() + i * num_inputs; in VerifyGoldens() local 1778 const float* batch_end = batch_start + num_inputs; in VerifyGoldens() 1781 batch_start, batch_end); in VerifyGoldens()
|
/external/libtextclassifier/annotator/ |
D | annotator.cc | 2168 for (int batch_start = span_of_interest.first; in ModelClickContextScoreChunks() local 2169 batch_start < span_of_interest.second; batch_start += max_batch_size) { in ModelClickContextScoreChunks() 2171 std::min(batch_start + max_batch_size, span_of_interest.second); in ModelClickContextScoreChunks() 2176 for (int click_pos = batch_start; click_pos < batch_end; ++click_pos) { in ModelClickContextScoreChunks() 2182 const int batch_size = batch_end - batch_start; in ModelClickContextScoreChunks() 2199 for (int click_pos = batch_start; click_pos < batch_end; ++click_pos) { in ModelClickContextScoreChunks() 2201 logits.data() + logits.dim(1) * (click_pos - batch_start), in ModelClickContextScoreChunks() 2279 for (int batch_start = 0; batch_start < candidate_spans.size(); in ModelBoundsSensitiveScoreChunks() local 2280 batch_start += max_batch_size) { in ModelBoundsSensitiveScoreChunks() 2281 const int batch_end = std::min(batch_start + max_batch_size, in ModelBoundsSensitiveScoreChunks() [all …]
|
/external/tensorflow/tensorflow/python/keras/engine/ |
D | training_arrays.py | 326 for batch_index, (batch_start, batch_end) in enumerate(batches): 327 batch_ids = index_array[batch_start:batch_end] 359 aggregator.aggregate(batch_outs, batch_start, batch_end)
|
D | training_utils.py | 76 def aggregate(self, batch_outs, batch_start=None, batch_end=None): argument 100 def aggregate(self, batch_outs, batch_start=None, batch_end=None): argument 105 self.results[0] += batch_outs[0] * (batch_end - batch_start) 130 def aggregate(self, batch_outs, batch_start=None, batch_end=None): argument 136 self.results[i][batch_start:batch_end] = batch_out
|
D | training_generator.py | 487 for (batch_start, batch_end) in batches: 488 batch_ids = index_array[batch_start:batch_end]
|
/external/tensorflow/tensorflow/lite/delegates/nnapi/ |
D | nnapi_delegate_test.cc | 1970 float* batch_start = rnn_input + i * rnn.input_size(); in TEST() local 1971 float* batch_end = batch_start + rnn.input_size(); in TEST() 1972 rnn.SetInput(0, batch_start, batch_end); in TEST() 1973 rnn.SetInput(rnn.input_size(), batch_start, batch_end); in TEST() 2174 float* batch_start = in VerifyGoldens() local 2176 float* batch_end = batch_start + svdf_input_size * svdf_num_batches; in VerifyGoldens() 2177 svdf->SetInput(0, batch_start, batch_end); in VerifyGoldens() 2497 const float* batch_start = input[b].data() + i * num_inputs; in VerifyGoldens() local 2498 const float* batch_end = batch_start + num_inputs; in VerifyGoldens() 2500 lstm->SetInput(b * lstm->num_inputs(), batch_start, batch_end); in VerifyGoldens()
|
/external/tensorflow/tensorflow/core/kernels/ |
D | deep_conv2d.cc | 1009 &output](int64 batch_start, int64 batch_limit) { in operator ()() argument 1120 for (int64 b = batch_start; b < batch_limit; ++b) { in operator ()()
|
/external/tensorflow/tensorflow/contrib/tpu/python/tpu/ |
D | keras_support.py | 1793 for batch_index, (batch_start, batch_end) in enumerate(batches): 1794 batch_ids = index_array[batch_start:batch_end]
|
/external/tensorflow/tensorflow/contrib/metrics/python/ops/ |
D | metric_ops_test.py | 7232 batch_start, batch_end = idx, idx + batch_size 7236 labels_t: labels[batch_start:batch_end], 7237 predictions_t: predictions[batch_start:batch_end], 7238 weights_t: weights[batch_start:batch_end]
|