Home
last modified time | relevance | path

Searched refs:batch_end (Results 1 – 25 of 50) sorted by relevance

12

/external/igt-gpu-tools/lib/
Dmedia_fill.c140 uint32_t batch_end; in gen7_media_fillfunc() local
169 batch_end = intel_batchbuffer_align(batch, 8); in gen7_media_fillfunc()
170 igt_assert(batch_end < BATCH_STATE_SPLIT); in gen7_media_fillfunc()
172 gen7_render_flush(batch, batch_end); in gen7_media_fillfunc()
184 uint32_t batch_end; in gen8_media_fillfunc() local
213 batch_end = intel_batchbuffer_align(batch, 8); in gen8_media_fillfunc()
214 igt_assert(batch_end < BATCH_STATE_SPLIT); in gen8_media_fillfunc()
216 gen7_render_flush(batch, batch_end); in gen8_media_fillfunc()
229 uint32_t batch_end; in __gen9_media_fillfunc() local
269 batch_end = intel_batchbuffer_align(batch, 8); in __gen9_media_fillfunc()
[all …]
Dgpgpu_fill.c127 uint32_t batch_end; in gen7_gpgpu_fillfunc() local
161 batch_end = intel_batchbuffer_align(batch, 8); in gen7_gpgpu_fillfunc()
162 igt_assert(batch_end < BATCH_STATE_SPLIT); in gen7_gpgpu_fillfunc()
164 gen7_render_flush(batch, batch_end); in gen7_gpgpu_fillfunc()
176 uint32_t batch_end; in gen8_gpgpu_fillfunc() local
209 batch_end = intel_batchbuffer_align(batch, 8); in gen8_gpgpu_fillfunc()
210 igt_assert(batch_end < BATCH_STATE_SPLIT); in gen8_gpgpu_fillfunc()
212 gen7_render_flush(batch, batch_end); in gen8_gpgpu_fillfunc()
225 uint32_t batch_end; in __gen9_gpgpu_fillfunc() local
259 batch_end = intel_batchbuffer_align(batch, 8); in __gen9_gpgpu_fillfunc()
[all …]
Dmedia_spin.c87 uint32_t batch_end; in gen8_media_spinfunc() local
115 batch_end = intel_batchbuffer_align(batch, 8); in gen8_media_spinfunc()
116 igt_assert(batch_end < BATCH_STATE_SPLIT); in gen8_media_spinfunc()
118 gen7_render_flush(batch, batch_end); in gen8_media_spinfunc()
127 uint32_t batch_end; in gen9_media_spinfunc() local
167 batch_end = intel_batchbuffer_align(batch, 8); in gen9_media_spinfunc()
168 igt_assert(batch_end < BATCH_STATE_SPLIT); in gen9_media_spinfunc()
170 gen7_render_flush(batch, batch_end); in gen9_media_spinfunc()
Drendercopy_gen7.c37 drm_intel_context *context, uint32_t batch_end) in gen7_render_flush() argument
44 batch_end, 0); in gen7_render_flush()
514 uint32_t batch_end; in gen7_render_copyfunc() local
572 batch_end = batch->ptr - batch->buffer; in gen7_render_copyfunc()
573 batch_end = ALIGN(batch_end, 8); in gen7_render_copyfunc()
574 igt_assert(batch_end < BATCH_STATE_SPLIT); in gen7_render_copyfunc()
576 gen7_render_flush(batch, context, batch_end); in gen7_render_copyfunc()
Drendercopy_gen6.c63 drm_intel_context *context, uint32_t batch_end) in gen6_render_flush() argument
70 batch_end, 0); in gen6_render_flush()
532 uint32_t batch_end; in gen6_render_copyfunc() local
575 batch_end = intel_batchbuffer_align(batch, 8); in gen6_render_copyfunc()
592 gen6_render_flush(batch, context, batch_end); in gen6_render_copyfunc()
Dgpu_cmds.h41 gen7_render_flush(struct intel_batchbuffer *batch, uint32_t batch_end);
44 gen7_render_context_flush(struct intel_batchbuffer *batch, uint32_t batch_end);
Drendercopy_gen8.c133 drm_intel_context *context, uint32_t batch_end) in gen6_render_flush() argument
140 batch_end, 0); in gen6_render_flush()
911 uint32_t batch_end; in gen8_render_copyfunc() local
1001 batch_end = intel_batchbuffer_align(batch, 8); in gen8_render_copyfunc()
1002 igt_assert(batch_end < BATCH_STATE_SPLIT); in gen8_render_copyfunc()
1003 annotation_add_batch(&aub_annotations, batch_end); in gen8_render_copyfunc()
1009 gen6_render_flush(batch, context, batch_end); in gen8_render_copyfunc()
Drendercopy_gen9.c164 drm_intel_context *context, uint32_t batch_end) in gen6_render_flush() argument
171 batch_end, 0); in gen6_render_flush()
969 uint32_t batch_end; in _gen9_render_copyfunc() local
1057 batch_end = intel_batchbuffer_align(batch, 8); in _gen9_render_copyfunc()
1058 assert(batch_end < BATCH_STATE_SPLIT); in _gen9_render_copyfunc()
1059 annotation_add_batch(&aub_annotations, batch_end); in _gen9_render_copyfunc()
1065 gen6_render_flush(batch, context, batch_end); in _gen9_render_copyfunc()
Drendercopy_gen4.c125 drm_intel_context *context, uint32_t batch_end) in gen4_render_flush() argument
132 batch_end, 0); in gen4_render_flush()
659 uint32_t offset, batch_end; in gen4_render_copyfunc() local
699 batch_end = intel_batchbuffer_align(batch, 8); in gen4_render_copyfunc()
716 gen4_render_flush(batch, context, batch_end); in gen4_render_copyfunc()
Dgpu_cmds.c28 gen7_render_flush(struct intel_batchbuffer *batch, uint32_t batch_end) in gen7_render_flush() argument
34 ret = drm_intel_bo_mrb_exec(batch->bo, batch_end, in gen7_render_flush()
40 gen7_render_context_flush(struct intel_batchbuffer *batch, uint32_t batch_end) in gen7_render_context_flush() argument
47 batch_end, 0); in gen7_render_context_flush()
/external/tensorflow/tensorflow/lite/kernels/
Dunidirectional_sequence_rnn_test.cc288 float* batch_end = batch_start + input_sequence_size; in TEST() local
289 rnn.SetInput(0, batch_start, batch_end); in TEST()
290 rnn.SetInput(input_sequence_size, batch_start, batch_end); in TEST()
317 float* batch_end = batch_start + input_sequence_size; in TEST_P() local
318 rnn.SetInput(0, batch_start, batch_end); in TEST_P()
319 rnn.SetInput(input_sequence_size, batch_start, batch_end); in TEST_P()
344 float* batch_end = batch_start + input_sequence_size; in TEST_P() local
345 rnn.SetInput(0, batch_start, batch_end); in TEST_P()
346 rnn.SetInput(input_sequence_size, batch_start, batch_end); in TEST_P()
370 float* batch_end = batch_start + rnn.input_size(); in TEST() local
[all …]
Dbidirectional_sequence_rnn_test.cc879 float* batch_end = batch_start + input_sequence_size; in TEST_P() local
880 rnn.SetInput(0, batch_start, batch_end); in TEST_P()
881 rnn.SetInput(input_sequence_size, batch_start, batch_end); in TEST_P()
930 float* batch_end = batch_start + rnn.input_size(); in TEST_P() local
932 rnn.SetInput(2 * i * rnn.input_size(), batch_start, batch_end); in TEST_P()
933 rnn.SetInput((2 * i + 1) * rnn.input_size(), batch_start, batch_end); in TEST_P()
974 float* batch_end = batch_start + input_sequence_size; in TEST_P() local
975 rnn.SetInput(0, batch_start, batch_end); in TEST_P()
976 rnn.SetInput(input_sequence_size, batch_start, batch_end); in TEST_P()
1018 float* batch_end = batch_start + rnn.input_size(); in TEST() local
[all …]
Dbasic_rnn_test.cc270 float* batch_end = batch_start + rnn.input_size(); in TEST() local
271 rnn.SetInput(0, batch_start, batch_end); in TEST()
272 rnn.SetInput(rnn.input_size(), batch_start, batch_end); in TEST()
299 float* batch_end = batch_start + rnn.input_size(); in TEST_P() local
300 rnn.SetInput(0, batch_start, batch_end); in TEST_P()
301 rnn.SetInput(rnn.input_size(), batch_start, batch_end); in TEST_P()
327 float* batch_end = batch_start + rnn.input_size(); in TEST_P() local
328 rnn.SetInput(0, batch_start, batch_end); in TEST_P()
329 rnn.SetInput(rnn.input_size(), batch_start, batch_end); in TEST_P()
/external/mesa3d/src/gallium/drivers/d3d12/
Dd3d12_resource_state.cpp194 d3d12_resource_state_cleanup(&entry->batch_end); in d3d12_destroy_context_state_table_entry()
241 d3d12_resource_state_init(&bo_state->batch_end, subresource_count, supports_simultaneous_access); in init_state_table_entry()
345 if (!bo_state->batch_end.supports_simultaneous_access) { in context_state_resolve_submission()
350 copy_resource_state(&bo_state->batch_begin, &bo_state->batch_end); in context_state_resolve_submission()
351 copy_resource_state(&bo->global_state, &bo_state->batch_end); in context_state_resolve_submission()
353 reset_resource_state(&bo_state->batch_end); in context_state_resolve_submission()
401 d3d12_resource_state *current_state = &state_entry->batch_end; in append_barrier()
495 } else if (state_entry->batch_end.homogenous) { in d3d12_transition_resource_state()
498 for (unsigned i = 0; i < state_entry->batch_end.num_subresources; ++i) { in d3d12_transition_resource_state()
520 …bool is_whole_resource = num_levels * num_layers * num_planes == state_entry->batch_end.num_subres… in d3d12_transition_subresources_state()
[all …]
Dd3d12_resource_state.h83 struct d3d12_resource_state batch_begin, batch_end; member
/external/tensorflow/tensorflow/core/kernels/sparse/
Dsparse_cholesky_op.cc114 [&](int64_t batch_begin, int64_t batch_end) { in Compute() argument
115 for (int64_t batch_index = batch_begin; batch_index < batch_end; in Compute()
193 [&](int64_t batch_begin, int64_t batch_end) { in Compute() argument
194 for (int64_t batch_index = batch_begin; batch_index < batch_end; in Compute()
Dsparse_ordering_amd_op.cc99 amd_cost_per_batch, [&](int64_t batch_begin, int64_t batch_end) { in Compute() argument
100 for (int64_t batch_index = batch_begin; batch_index < batch_end; in Compute()
Dcsr_sparse_matrix_to_sparse_tensor_op.cc99 auto shard = [&](int64_t batch_begin, int64_t batch_end) { in Compute() argument
100 for (int64_t batch_idx = batch_begin; batch_idx < batch_end; in Compute()
Dcsr_sparse_matrix_to_dense_op.cc95 auto shard = [&](int64_t batch_begin, int64_t batch_end) { in Compute() argument
96 for (int64_t batch_idx = batch_begin; batch_idx < batch_end; in Compute()
/external/ComputeLibrary/src/core/NEON/kernels/arm_gemm/
Dgemm_interleaved_pretransposed_2d.hpp189 unsigned int batch_end = m_end / window_per_batch; in execute_pretranspose() local
193 unsigned int m_max = (m_end - (batch_end * window_per_batch)) * strategy::out_height(); in execute_pretranspose()
233 for (unsigned int batch = batch_0; batch <= batch_end; batch++) { in execute_pretranspose()
235 unsigned int last_m = (batch == batch_end) ? m_max : _Msize; in execute_pretranspose()
259 for (unsigned int batch = batch_0; batch <= batch_end; batch++) { in execute_pretranspose()
261 unsigned int last_m = (batch == batch_end) ? m_max : _Msize; in execute_pretranspose()
/external/pytorch/aten/src/ATen/native/cuda/
DEmbedding.cu72 int batch_end = batch_start + blockDim.x*blockDim.y < n ? in embedding_backward_feature_kernel() local
76 for(int chunk_start = batch_start; chunk_start < batch_end; chunk_start += blockDim.y) in embedding_backward_feature_kernel()
82 int n_this_chunk = (batch_end - chunk_start) < blockDim.y ? in embedding_backward_feature_kernel()
83 (batch_end - chunk_start) : blockDim.y; in embedding_backward_feature_kernel()
/external/tensorflow/tensorflow/lite/delegates/gpu/cl/kernels/
Dlstm_full_test.cc243 const float* batch_end = batch_start + num_inputs; in VerifyGoldens() local
244 lstm->SetInput(b * num_inputs, batch_start, batch_end); in VerifyGoldens()
254 const float* batch_end = batch_start + num_outputs; in VerifyGoldens() local
255 expected.insert(expected.end(), batch_start, batch_end); in VerifyGoldens()
/external/tensorflow/tensorflow/python/keras/engine/
Dtraining_utils_v1.py99 def aggregate(self, batch_outs, batch_start=None, batch_end=None): argument
137 def aggregate(self, batch_outs, batch_start=None, batch_end=None): argument
142 self.results[0] += batch_outs[0] * (batch_end - batch_start)
280 def aggregate(self, batch_element, batch_start=None, batch_end=None): argument
371 def aggregate(self, batch_element, batch_start, batch_end): argument
377 if batch_end - batch_start == self.num_samples:
391 self.results[batch_start:batch_end] = batch_element
396 args=(batch_element, batch_start, batch_end, is_finished))
399 def _slice_assign(self, batch_element, batch_start, batch_end, is_finished): argument
402 self.results[batch_start:batch_end] = batch_element
[all …]
/external/tensorflow/tensorflow/core/kernels/linalg/
Dmatrix_band_part_op.cc156 const int64_t batch_end = (end + m - 1) / m; in operator ()() local
157 for (int64_t batch = batch_begin; batch < batch_end; ++batch) { in operator ()()
/external/tensorflow/tensorflow/lite/kernels/internal/optimized/integer_ops/
Ddepthwise_conv_hybrid.h147 int batch_end = batches; in DepthwiseConvHybridGeneral() local
157 batch_end = thread_end; in DepthwiseConvHybridGeneral()
172 for (int b = batch_start; b < batch_end; ++b) { in DepthwiseConvHybridGeneral()

12