Home
last modified time | relevance | path

Searched refs:batch_end (Results 1 – 25 of 38) sorted by relevance

12

/external/igt-gpu-tools/lib/
Dmedia_fill.c140 uint32_t batch_end; in gen7_media_fillfunc() local
169 batch_end = intel_batchbuffer_align(batch, 8); in gen7_media_fillfunc()
170 igt_assert(batch_end < BATCH_STATE_SPLIT); in gen7_media_fillfunc()
172 gen7_render_flush(batch, batch_end); in gen7_media_fillfunc()
184 uint32_t batch_end; in gen8_media_fillfunc() local
213 batch_end = intel_batchbuffer_align(batch, 8); in gen8_media_fillfunc()
214 igt_assert(batch_end < BATCH_STATE_SPLIT); in gen8_media_fillfunc()
216 gen7_render_flush(batch, batch_end); in gen8_media_fillfunc()
229 uint32_t batch_end; in __gen9_media_fillfunc() local
269 batch_end = intel_batchbuffer_align(batch, 8); in __gen9_media_fillfunc()
[all …]
Dgpgpu_fill.c127 uint32_t batch_end; in gen7_gpgpu_fillfunc() local
161 batch_end = intel_batchbuffer_align(batch, 8); in gen7_gpgpu_fillfunc()
162 igt_assert(batch_end < BATCH_STATE_SPLIT); in gen7_gpgpu_fillfunc()
164 gen7_render_flush(batch, batch_end); in gen7_gpgpu_fillfunc()
176 uint32_t batch_end; in gen8_gpgpu_fillfunc() local
209 batch_end = intel_batchbuffer_align(batch, 8); in gen8_gpgpu_fillfunc()
210 igt_assert(batch_end < BATCH_STATE_SPLIT); in gen8_gpgpu_fillfunc()
212 gen7_render_flush(batch, batch_end); in gen8_gpgpu_fillfunc()
225 uint32_t batch_end; in __gen9_gpgpu_fillfunc() local
259 batch_end = intel_batchbuffer_align(batch, 8); in __gen9_gpgpu_fillfunc()
[all …]
Dmedia_spin.c87 uint32_t batch_end; in gen8_media_spinfunc() local
115 batch_end = intel_batchbuffer_align(batch, 8); in gen8_media_spinfunc()
116 igt_assert(batch_end < BATCH_STATE_SPLIT); in gen8_media_spinfunc()
118 gen7_render_flush(batch, batch_end); in gen8_media_spinfunc()
127 uint32_t batch_end; in gen9_media_spinfunc() local
167 batch_end = intel_batchbuffer_align(batch, 8); in gen9_media_spinfunc()
168 igt_assert(batch_end < BATCH_STATE_SPLIT); in gen9_media_spinfunc()
170 gen7_render_flush(batch, batch_end); in gen9_media_spinfunc()
Drendercopy_gen7.c37 drm_intel_context *context, uint32_t batch_end) in gen7_render_flush() argument
44 batch_end, 0); in gen7_render_flush()
514 uint32_t batch_end; in gen7_render_copyfunc() local
572 batch_end = batch->ptr - batch->buffer; in gen7_render_copyfunc()
573 batch_end = ALIGN(batch_end, 8); in gen7_render_copyfunc()
574 igt_assert(batch_end < BATCH_STATE_SPLIT); in gen7_render_copyfunc()
576 gen7_render_flush(batch, context, batch_end); in gen7_render_copyfunc()
Drendercopy_gen6.c63 drm_intel_context *context, uint32_t batch_end) in gen6_render_flush() argument
70 batch_end, 0); in gen6_render_flush()
532 uint32_t batch_end; in gen6_render_copyfunc() local
575 batch_end = intel_batchbuffer_align(batch, 8); in gen6_render_copyfunc()
592 gen6_render_flush(batch, context, batch_end); in gen6_render_copyfunc()
Dgpu_cmds.h41 gen7_render_flush(struct intel_batchbuffer *batch, uint32_t batch_end);
44 gen7_render_context_flush(struct intel_batchbuffer *batch, uint32_t batch_end);
Drendercopy_gen8.c133 drm_intel_context *context, uint32_t batch_end) in gen6_render_flush() argument
140 batch_end, 0); in gen6_render_flush()
911 uint32_t batch_end; in gen8_render_copyfunc() local
1001 batch_end = intel_batchbuffer_align(batch, 8); in gen8_render_copyfunc()
1002 igt_assert(batch_end < BATCH_STATE_SPLIT); in gen8_render_copyfunc()
1003 annotation_add_batch(&aub_annotations, batch_end); in gen8_render_copyfunc()
1009 gen6_render_flush(batch, context, batch_end); in gen8_render_copyfunc()
Drendercopy_gen9.c164 drm_intel_context *context, uint32_t batch_end) in gen6_render_flush() argument
171 batch_end, 0); in gen6_render_flush()
969 uint32_t batch_end; in _gen9_render_copyfunc() local
1057 batch_end = intel_batchbuffer_align(batch, 8); in _gen9_render_copyfunc()
1058 assert(batch_end < BATCH_STATE_SPLIT); in _gen9_render_copyfunc()
1059 annotation_add_batch(&aub_annotations, batch_end); in _gen9_render_copyfunc()
1065 gen6_render_flush(batch, context, batch_end); in _gen9_render_copyfunc()
Drendercopy_gen4.c125 drm_intel_context *context, uint32_t batch_end) in gen4_render_flush() argument
132 batch_end, 0); in gen4_render_flush()
659 uint32_t offset, batch_end; in gen4_render_copyfunc() local
699 batch_end = intel_batchbuffer_align(batch, 8); in gen4_render_copyfunc()
716 gen4_render_flush(batch, context, batch_end); in gen4_render_copyfunc()
Dgpu_cmds.c28 gen7_render_flush(struct intel_batchbuffer *batch, uint32_t batch_end) in gen7_render_flush() argument
34 ret = drm_intel_bo_mrb_exec(batch->bo, batch_end, in gen7_render_flush()
40 gen7_render_context_flush(struct intel_batchbuffer *batch, uint32_t batch_end) in gen7_render_context_flush() argument
47 batch_end, 0); in gen7_render_context_flush()
/external/tensorflow/tensorflow/lite/kernels/
Dunidirectional_sequence_rnn_test.cc288 float* batch_end = batch_start + input_sequence_size; in TEST() local
289 rnn.SetInput(0, batch_start, batch_end); in TEST()
290 rnn.SetInput(input_sequence_size, batch_start, batch_end); in TEST()
317 float* batch_end = batch_start + input_sequence_size; in TEST_P() local
318 rnn.SetInput(0, batch_start, batch_end); in TEST_P()
319 rnn.SetInput(input_sequence_size, batch_start, batch_end); in TEST_P()
344 float* batch_end = batch_start + input_sequence_size; in TEST_P() local
345 rnn.SetInput(0, batch_start, batch_end); in TEST_P()
346 rnn.SetInput(input_sequence_size, batch_start, batch_end); in TEST_P()
370 float* batch_end = batch_start + rnn.input_size(); in TEST() local
[all …]
Dbidirectional_sequence_rnn_test.cc879 float* batch_end = batch_start + input_sequence_size; in TEST_P() local
880 rnn.SetInput(0, batch_start, batch_end); in TEST_P()
881 rnn.SetInput(input_sequence_size, batch_start, batch_end); in TEST_P()
930 float* batch_end = batch_start + rnn.input_size(); in TEST_P() local
932 rnn.SetInput(2 * i * rnn.input_size(), batch_start, batch_end); in TEST_P()
933 rnn.SetInput((2 * i + 1) * rnn.input_size(), batch_start, batch_end); in TEST_P()
974 float* batch_end = batch_start + input_sequence_size; in TEST_P() local
975 rnn.SetInput(0, batch_start, batch_end); in TEST_P()
976 rnn.SetInput(input_sequence_size, batch_start, batch_end); in TEST_P()
1018 float* batch_end = batch_start + rnn.input_size(); in TEST() local
[all …]
Dbasic_rnn_test.cc270 float* batch_end = batch_start + rnn.input_size(); in TEST() local
271 rnn.SetInput(0, batch_start, batch_end); in TEST()
272 rnn.SetInput(rnn.input_size(), batch_start, batch_end); in TEST()
299 float* batch_end = batch_start + rnn.input_size(); in TEST_P() local
300 rnn.SetInput(0, batch_start, batch_end); in TEST_P()
301 rnn.SetInput(rnn.input_size(), batch_start, batch_end); in TEST_P()
327 float* batch_end = batch_start + rnn.input_size(); in TEST_P() local
328 rnn.SetInput(0, batch_start, batch_end); in TEST_P()
329 rnn.SetInput(rnn.input_size(), batch_start, batch_end); in TEST_P()
Dsvdf_test.cc249 float* batch_end = batch_start + svdf_input_size * svdf_num_batches; in VerifyGoldens() local
250 svdf->SetInput(0, batch_start, batch_end); in VerifyGoldens()
/external/tensorflow/tensorflow/core/kernels/sparse/
Dsparse_cholesky_op.cc112 [&](int64 batch_begin, int64 batch_end) { in Compute() argument
113 for (int64 batch_index = batch_begin; batch_index < batch_end; in Compute()
191 [&](int64 batch_begin, int64 batch_end) { in Compute() argument
192 for (int64 batch_index = batch_begin; batch_index < batch_end; in Compute()
Dsparse_ordering_amd_op.cc99 amd_cost_per_batch, [&](int64 batch_begin, int64 batch_end) { in Compute() argument
100 for (int64 batch_index = batch_begin; batch_index < batch_end; in Compute()
Dcsr_sparse_matrix_to_sparse_tensor_op.cc99 auto shard = [&](int64 batch_begin, int64 batch_end) { in Compute() argument
100 for (int64 batch_idx = batch_begin; batch_idx < batch_end; ++batch_idx) { in Compute()
Dcsr_sparse_matrix_to_dense_op.cc95 auto shard = [&](int64 batch_begin, int64 batch_end) { in Compute() argument
96 for (int64 batch_idx = batch_begin; batch_idx < batch_end; ++batch_idx) { in Compute()
Dsparse_mat_mul_op.cc186 matmul_cost_per_batch, [&](int64 batch_begin, int64 batch_end) { in Compute() argument
187 for (int64 batch_idx = batch_begin; batch_idx < batch_end; in Compute()
228 [&](int64 batch_begin, int64 batch_end) { in Compute() argument
229 for (int64 batch_idx = batch_begin; batch_idx < batch_end; in Compute()
/external/tensorflow/tensorflow/python/keras/engine/
Dtraining_utils_v1_test.py265 …def wrapped(batch_element, batch_start, batch_end, is_finished): # pylint: disable=unused-argument argument
320 batch_end = batch_start + batch.shape[0]
321 aggregator.aggregate(batch, batch_start, batch_end)
322 batch_start = batch_end
346 batch_end = batch_start + batch[0].shape[0]
347 aggregator.aggregate(batch, batch_start, batch_end)
348 batch_start = batch_end
Dtraining_utils_v1.py105 def aggregate(self, batch_outs, batch_start=None, batch_end=None): argument
143 def aggregate(self, batch_outs, batch_start=None, batch_end=None): argument
148 self.results[0] += batch_outs[0] * (batch_end - batch_start)
286 def aggregate(self, batch_element, batch_start=None, batch_end=None): argument
377 def aggregate(self, batch_element, batch_start, batch_end): argument
383 if batch_end - batch_start == self.num_samples:
397 self.results[batch_start:batch_end] = batch_element
402 args=(batch_element, batch_start, batch_end, is_finished))
405 def _slice_assign(self, batch_element, batch_start, batch_end, is_finished): argument
408 self.results[batch_start:batch_end] = batch_element
[all …]
Dtraining_arrays_v1.py353 for batch_index, (batch_start, batch_end) in enumerate(batches):
354 batch_ids = index_array[batch_start:batch_end]
391 aggregator.aggregate(batch_outs, batch_start, batch_end)
/external/tensorflow/tensorflow/lite/delegates/gpu/cl/kernels/
Dlstm_full_test.cc243 const float* batch_end = batch_start + num_inputs; in VerifyGoldens() local
244 lstm->SetInput(b * num_inputs, batch_start, batch_end); in VerifyGoldens()
254 const float* batch_end = batch_start + num_outputs; in VerifyGoldens() local
255 expected.insert(expected.end(), batch_start, batch_end); in VerifyGoldens()
/external/tensorflow/tensorflow/core/kernels/linalg/
Dmatrix_band_part_op.cc156 const int64 batch_end = (end + m - 1) / m; in operator ()() local
157 for (int64 batch = batch_begin; batch < batch_end; ++batch) { in operator ()()
/external/tensorflow/tensorflow/lite/kernels/internal/optimized/integer_ops/
Ddepthwise_conv_hybrid.h145 int batch_end = batches; in DepthwiseConvHybridGeneral() local
155 batch_end = thread_end; in DepthwiseConvHybridGeneral()
170 for (int b = batch_start; b < batch_end; ++b) { in DepthwiseConvHybridGeneral()

12