/external/igt-gpu-tools/lib/ |
D | media_fill.c | 140 uint32_t batch_end; in gen7_media_fillfunc() local 184 uint32_t batch_end; in gen8_media_fillfunc() local 229 uint32_t batch_end; in __gen9_media_fillfunc() local 298 uint32_t batch_end; in __gen11_media_vme_func() local
|
D | media_spin.c | 87 uint32_t batch_end; in gen8_media_spinfunc() local 127 uint32_t batch_end; in gen9_media_spinfunc() local
|
D | gpgpu_fill.c | 127 uint32_t batch_end; in gen7_gpgpu_fillfunc() local 176 uint32_t batch_end; in gen8_gpgpu_fillfunc() local 225 uint32_t batch_end; in __gen9_gpgpu_fillfunc() local
|
D | rendercopy_gen6.c | 63 drm_intel_context *context, uint32_t batch_end) in gen6_render_flush() 532 uint32_t batch_end; in gen6_render_copyfunc() local
|
D | rendercopy_gen7.c | 37 drm_intel_context *context, uint32_t batch_end) in gen7_render_flush() 514 uint32_t batch_end; in gen7_render_copyfunc() local
|
D | rendercopy_gen4.c | 125 drm_intel_context *context, uint32_t batch_end) in gen4_render_flush() 659 uint32_t offset, batch_end; in gen4_render_copyfunc() local
|
D | gpu_cmds.c | 28 gen7_render_flush(struct intel_batchbuffer *batch, uint32_t batch_end) in gen7_render_flush() 40 gen7_render_context_flush(struct intel_batchbuffer *batch, uint32_t batch_end) in gen7_render_context_flush()
|
D | rendercopy_gen8.c | 133 drm_intel_context *context, uint32_t batch_end) in gen6_render_flush() 911 uint32_t batch_end; in gen8_render_copyfunc() local
|
D | rendercopy_gen9.c | 164 drm_intel_context *context, uint32_t batch_end) in gen6_render_flush() 969 uint32_t batch_end; in _gen9_render_copyfunc() local
|
/external/tensorflow/tensorflow/lite/kernels/ |
D | unidirectional_sequence_rnn_test.cc | 288 float* batch_end = batch_start + input_sequence_size; in TEST() local 317 float* batch_end = batch_start + input_sequence_size; in TEST_P() local 344 float* batch_end = batch_start + input_sequence_size; in TEST_P() local 370 float* batch_end = batch_start + rnn.input_size(); in TEST() local 400 float* batch_end = batch_start + rnn.input_size(); in TEST_P() local 431 float* batch_end = batch_start + rnn.input_size(); in TEST_P() local
|
D | bidirectional_sequence_rnn_test.cc | 879 float* batch_end = batch_start + input_sequence_size; in TEST_P() local 930 float* batch_end = batch_start + rnn.input_size(); in TEST_P() local 974 float* batch_end = batch_start + input_sequence_size; in TEST_P() local 1018 float* batch_end = batch_start + rnn.input_size(); in TEST() local 1063 float* batch_end = batch_start + rnn.input_size(); in TEST() local 1132 float* batch_end = batch_start + input_sequence_size; in TEST() local 1189 float* batch_end = batch_start + rnn.input_size(); in TEST() local 1241 float* batch_end = batch_start + rnn.input_size(); in TEST() local 1289 float* batch_end = batch_start + rnn.input_size(); in TEST() local 1333 float* batch_end = batch_start + input_sequence_size; in TEST() local [all …]
|
D | basic_rnn_test.cc | 270 float* batch_end = batch_start + rnn.input_size(); in TEST() local 299 float* batch_end = batch_start + rnn.input_size(); in TEST_P() local 327 float* batch_end = batch_start + rnn.input_size(); in TEST_P() local
|
D | svdf_test.cc | 249 float* batch_end = batch_start + svdf_input_size * svdf_num_batches; in VerifyGoldens() local
|
D | unidirectional_sequence_lstm_test.cc | 403 const float* batch_end = batch_start + num_inputs; in VerifyGoldens() local 412 const float* batch_end = batch_start + input_sequence_size * num_inputs; in VerifyGoldens() local 2525 const float* batch_end = batch_start + num_inputs; in VerifyGoldens() local
|
D | lstm_test.cc | 360 const float* batch_end = batch_start + num_inputs; in VerifyGoldens() local 371 const float* batch_end = batch_start + num_outputs; in VerifyGoldens() local 2401 const float* batch_end = batch_start + num_inputs; in VerifyGoldens() local
|
/external/tensorflow/tensorflow/core/kernels/sparse/ |
D | sparse_cholesky_op.cc | 112 [&](int64 batch_begin, int64 batch_end) { in Compute() 191 [&](int64 batch_begin, int64 batch_end) { in Compute()
|
D | sparse_ordering_amd_op.cc | 99 amd_cost_per_batch, [&](int64 batch_begin, int64 batch_end) { in Compute()
|
D | csr_sparse_matrix_to_dense_op.cc | 95 auto shard = [&](int64 batch_begin, int64 batch_end) { in Compute()
|
D | csr_sparse_matrix_to_sparse_tensor_op.cc | 99 auto shard = [&](int64 batch_begin, int64 batch_end) { in Compute()
|
D | sparse_mat_mul_op.cc | 186 matmul_cost_per_batch, [&](int64 batch_begin, int64 batch_end) { in Compute() 228 [&](int64 batch_begin, int64 batch_end) { in Compute()
|
/external/tensorflow/tensorflow/python/keras/engine/ |
D | training_utils_v1.py | 105 def aggregate(self, batch_outs, batch_start=None, batch_end=None): argument 143 def aggregate(self, batch_outs, batch_start=None, batch_end=None): argument 286 def aggregate(self, batch_element, batch_start=None, batch_end=None): argument 377 def aggregate(self, batch_element, batch_start, batch_end): argument 405 def _slice_assign(self, batch_element, batch_start, batch_end, is_finished): argument 461 def aggregate(self, batch_outs, batch_start=None, batch_end=None): argument
|
D | training_utils_v1_test.py | 265 …def wrapped(batch_element, batch_start, batch_end, is_finished): # pylint: disable=unused-argument argument
|
/external/tensorflow/tensorflow/lite/delegates/gpu/cl/kernels/ |
D | lstm_full_test.cc | 243 const float* batch_end = batch_start + num_inputs; in VerifyGoldens() local 254 const float* batch_end = batch_start + num_outputs; in VerifyGoldens() local
|
/external/tensorflow/tensorflow/core/kernels/linalg/ |
D | matrix_band_part_op.cc | 156 const int64 batch_end = (end + m - 1) / m; in operator ()() local
|
/external/tensorflow/tensorflow/lite/kernels/internal/optimized/integer_ops/ |
D | depthwise_conv_hybrid.h | 145 int batch_end = batches; in DepthwiseConvHybridGeneral() local
|