/third_party/mesa3d/src/gallium/drivers/freedreno/ |
D | freedreno_batch_cache.c | 154 struct fd_batch *batches[ARRAY_SIZE(cache->batches)] = {0}; in fd_bc_flush() local 162 fd_batch_reference_locked(&batches[n++], batch); in fd_bc_flush() 175 if (batches[i] && (batches[i]->ctx == ctx) && in fd_bc_flush() 176 (batches[i] != current_batch)) { in fd_bc_flush() 177 fd_batch_add_dep(current_batch, batches[i]); in fd_bc_flush() 188 fd_batch_flush(batches[i]); in fd_bc_flush() 193 fd_batch_reference(&batches[i], NULL); in fd_bc_flush() 221 struct fd_batch *batch, *batches[32] = {}; in fd_bc_flush_readers() local 231 fd_batch_reference_locked(&batches[batch_count++], batch); in fd_bc_flush_readers() 235 fd_batch_flush(batches[i]); in fd_bc_flush_readers() [all …]
|
D | freedreno_batch_cache.h | 53 struct fd_batch *batches[32]; member 64 _m && ((batch) = (cache)->batches[u_bit_scan(&_m)]); _m &= (mask))
|
/third_party/mesa3d/src/gallium/drivers/d3d12/ |
D | d3d12_context.h | 159 struct d3d12_batch batches[4]; member 245 assert(ctx->current_batch_idx < ARRAY_SIZE(ctx->batches)); in d3d12_current_batch() 246 return ctx->batches + ctx->current_batch_idx; in d3d12_current_batch() 250 unsigned oldest = (ctx->current_batch_idx + 1) % ARRAY_SIZE(ctx->batches); \ 251 while (ctx->batches[oldest].fence == NULL && oldest != ctx->current_batch_idx) \ 252 oldest = (oldest + 1) % ARRAY_SIZE(ctx->batches); \ 253 struct d3d12_batch *batch = &ctx->batches[oldest]; \ 255 oldest = (oldest + 1) % ARRAY_SIZE(ctx->batches), \ 256 batch = &ctx->batches[oldest])
|
/third_party/mesa3d/src/gallium/drivers/crocus/ |
D | crocus_pipe_control.c | 299 struct crocus_batch *render_batch = &ice->batches[CROCUS_BATCH_RENDER]; in crocus_texture_barrier() 300 struct crocus_batch *compute_batch = &ice->batches[CROCUS_BATCH_COMPUTE]; in crocus_texture_barrier() 336 const struct intel_device_info *devinfo = &ice->batches[0].screen->devinfo; in crocus_memory_barrier() 363 if (ice->batches[i].contains_draw) { in crocus_memory_barrier() 364 crocus_batch_maybe_flush(&ice->batches[i], 24); in crocus_memory_barrier() 365 crocus_emit_pipe_control_flush(&ice->batches[i], "API: memory barrier", in crocus_memory_barrier()
|
D | crocus_context.c | 117 crocus_batch_check_for_reset(&ice->batches[i]); in crocus_get_device_reset_status() 205 crocus_batch_free(&ice->batches[CROCUS_BATCH_RENDER]); in crocus_destroy_context() 206 if (ice->batches[CROCUS_BATCH_COMPUTE].ice) in crocus_destroy_context() 207 crocus_batch_free(&ice->batches[CROCUS_BATCH_COMPUTE]); in crocus_destroy_context() 318 screen->vtbl.init_render_context(&ice->batches[CROCUS_BATCH_RENDER]); in crocus_create_context() 320 screen->vtbl.init_compute_context(&ice->batches[CROCUS_BATCH_COMPUTE]); in crocus_create_context()
|
D | crocus_fence.c | 220 crocus_batch_flush(&ice->batches[i]); in crocus_fence_flush() 236 struct crocus_batch *batch = &ice->batches[b]; in crocus_fence_flush() 276 struct crocus_batch *batch = &ice->batches[b]; in crocus_fence_await() 341 if (fine->syncobj == crocus_batch_get_signal_syncobj(&ice->batches[i])) in crocus_fence_finish() 342 crocus_batch_flush(&ice->batches[i]); in crocus_fence_finish() 550 ice->batches[b].contains_fence_signal = true; in crocus_fence_signal() 551 crocus_batch_add_syncobj(&ice->batches[b], fine->syncobj, in crocus_fence_signal()
|
D | crocus_monitor.c | 130 crocus_emit_pipe_control_flush(&ice->batches[CROCUS_BATCH_RENDER], 142 struct crocus_batch *batch = &ice->batches[CROCUS_BATCH_RENDER]; in crocus_monitor_emit_mi_report_perf_count() 151 _crocus_batch_flush(&ice->batches[CROCUS_BATCH_RENDER], __FILE__, __LINE__); in crocus_monitor_batchbuffer_flush() 161 struct crocus_batch *batch = &ice->batches[CROCUS_BATCH_RENDER]; 170 struct crocus_batch *batch = &ice->batches[CROCUS_BATCH_RENDER]; 319 ice->batches[CROCUS_BATCH_RENDER].hw_ctx_id, in crocus_init_monitor_ctx() 434 struct crocus_batch *batch = &ice->batches[CROCUS_BATCH_RENDER]; in crocus_get_monitor_result()
|
D | crocus_query.c | 155 struct crocus_batch *batch = &ice->batches[q->batch_idx]; in mark_available() 192 struct crocus_batch *batch = &ice->batches[q->batch_idx]; in write_value() 210 crocus_pipelined_write(&ice->batches[CROCUS_BATCH_RENDER], q, in write_value() 218 crocus_pipelined_write(&ice->batches[CROCUS_BATCH_RENDER], q, in write_value() 276 struct crocus_batch *batch = &ice->batches[CROCUS_BATCH_RENDER]; in write_overflow_values() 595 struct crocus_batch *batch = &ice->batches[q->batch_idx]; in crocus_end_query() 668 struct crocus_batch *batch = &ice->batches[q->batch_idx]; in crocus_get_query_result() 710 struct crocus_batch *batch = &ice->batches[q->batch_idx]; in crocus_get_query_result_resource() 819 struct crocus_batch *batch = &ice->batches[CROCUS_BATCH_RENDER]; in set_predicate_for_result()
|
D | crocus_draw.c | 272 struct crocus_batch *batch = &ice->batches[CROCUS_BATCH_RENDER]; in crocus_indirect_draw_vbo() 321 struct crocus_batch *batch = &ice->batches[CROCUS_BATCH_RENDER]; in crocus_simple_draw_vbo() 372 struct crocus_batch *batch = &ice->batches[CROCUS_BATCH_RENDER]; in crocus_draw_vbo() 481 struct crocus_batch *batch = &ice->batches[CROCUS_BATCH_COMPUTE]; in crocus_launch_grid() 496 crocus_predraw_resolve_inputs(ice, &ice->batches[CROCUS_BATCH_RENDER], NULL, in crocus_launch_grid()
|
/third_party/mesa3d/src/mesa/main/ |
D | glthread.c | 76 unsigned batch_index = batch - ctx->GLThread.batches; in glthread_unmarshal_batch() 120 glthread->batches[i].ctx = ctx; in _mesa_glthread_init() 121 util_queue_fence_init(&glthread->batches[i].fence); in _mesa_glthread_init() 123 glthread->next_batch = &glthread->batches[glthread->next]; in _mesa_glthread_init() 171 util_queue_fence_destroy(&glthread->batches[i].fence); in _mesa_glthread_destroy() 256 glthread->next_batch = &glthread->batches[glthread->next]; in _mesa_glthread_flush_batch() 281 struct glthread_batch *last = &glthread->batches[glthread->last]; in _mesa_glthread_finish()
|
/third_party/mesa3d/src/gallium/drivers/iris/ |
D | iris_perf.c | 35 iris_emit_end_of_pipe_sync(&ice->batches[IRIS_BATCH_RENDER], in iris_perf_emit_stall_at_pixel_scoreboard() 47 struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER]; in iris_perf_emit_mi_report_perf_count() 55 _iris_batch_flush(&ice->batches[IRIS_BATCH_RENDER], __FILE__, __LINE__); in iris_perf_batchbuffer_flush() 64 struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER]; in iris_perf_store_register_mem()
|
D | iris_pipe_control.c | 311 struct iris_batch *render_batch = &ice->batches[IRIS_BATCH_RENDER]; in iris_texture_barrier() 312 struct iris_batch *compute_batch = &ice->batches[IRIS_BATCH_COMPUTE]; in iris_texture_barrier() 361 if (ice->batches[i].contains_draw) { in iris_memory_barrier() 362 iris_batch_maybe_flush(&ice->batches[i], 24); in iris_memory_barrier() 363 iris_emit_pipe_control_flush(&ice->batches[i], "API: memory barrier", in iris_memory_barrier()
|
D | iris_fence.c | 267 iris_batch_flush(&ice->batches[i]); in iris_fence_flush() 287 struct iris_batch *batch = &ice->batches[b]; in iris_fence_flush() 340 struct iris_batch *batch = &ice->batches[b]; in iris_fence_await() 408 if (fine->syncobj == iris_batch_get_signal_syncobj(&ice->batches[i])) in iris_fence_finish() 409 iris_batch_flush(&ice->batches[i]); in iris_fence_finish() 603 ice->batches[b].contains_fence_signal = true; in iris_fence_signal() 604 iris_batch_add_syncobj(&ice->batches[b], fine->syncobj, in iris_fence_signal()
|
D | iris_context.c | 103 iris_batch_check_for_reset(&ice->batches[i]); in iris_get_device_reset_status() 243 iris_batch_free(&ice->batches[IRIS_BATCH_RENDER]); in iris_destroy_context() 244 iris_batch_free(&ice->batches[IRIS_BATCH_COMPUTE]); in iris_destroy_context() 369 screen->vtbl.init_render_context(&ice->batches[IRIS_BATCH_RENDER]); in iris_create_context() 370 screen->vtbl.init_compute_context(&ice->batches[IRIS_BATCH_COMPUTE]); in iris_create_context()
|
D | iris_performance_query.c | 79 ice->batches[IRIS_BATCH_RENDER].hw_ctx_id, in iris_init_perf_query_info() 196 intel_perf_wait_query(perf_ctx, obj, &ice->batches[IRIS_BATCH_RENDER]); in iris_wait_perf_query() 213 &ice->batches[IRIS_BATCH_RENDER]); in iris_is_perf_query_ready() 229 intel_perf_get_query_data(perf_ctx, obj, &ice->batches[IRIS_BATCH_RENDER], in iris_get_perf_query_data()
|
D | iris_query.c | 134 struct iris_batch *batch = &ice->batches[q->batch_idx]; in mark_available() 172 struct iris_batch *batch = &ice->batches[q->batch_idx]; in write_value() 197 iris_pipelined_write(&ice->batches[IRIS_BATCH_RENDER], q, in write_value() 205 iris_pipelined_write(&ice->batches[IRIS_BATCH_RENDER], q, in write_value() 248 struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER]; in write_overflow_values() 557 struct iris_batch *batch = &ice->batches[q->batch_idx]; in iris_end_query() 629 struct iris_batch *batch = &ice->batches[q->batch_idx]; in iris_get_query_result() 662 struct iris_batch *batch = &ice->batches[q->batch_idx]; in iris_get_query_result_resource() 771 struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER]; in set_predicate_for_result()
|
D | iris_border_color.c | 118 if (iris_batch_references(&ice->batches[i], pool->bo)) in iris_border_color_pool_reserve() 119 iris_batch_flush(&ice->batches[i]); in iris_border_color_pool_reserve()
|
D | iris_draw.c | 186 struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER]; in iris_indirect_draw_vbo() 239 struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER]; in iris_simple_draw_vbo() 269 struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER]; in iris_draw_vbo() 380 struct iris_batch *batch = &ice->batches[IRIS_BATCH_COMPUTE]; in iris_launch_grid()
|
D | iris_clear.c | 43 struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER]; in iris_is_color_fast_clear_compatible() 218 struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER]; in fast_clear_color() 353 struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER]; in clear_color() 466 struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER]; in fast_clear_depth() 561 struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER]; in clear_depth_stencil()
|
/third_party/mesa3d/src/gallium/drivers/panfrost/ |
D | pan_job.c | 44 BITSET_FOREACH_SET(idx, ctx->batches.active, PAN_MAX_BATCHES) 49 return batch - batch->ctx->batches.slots; in panfrost_batch_idx() 74 batch->seqnum = ++ctx->batches.seqnum; in panfrost_batch_init() 149 BITSET_CLEAR(ctx->batches.active, batch_idx); in panfrost_batch_cleanup() 164 if (ctx->batches.slots[i].seqnum && in panfrost_get_batch() 165 util_framebuffer_state_equal(&ctx->batches.slots[i].key, key)) { in panfrost_get_batch() 169 ctx->batches.slots[i].seqnum = ++ctx->batches.seqnum; in panfrost_get_batch() 170 return &ctx->batches.slots[i]; in panfrost_get_batch() 173 if (!batch || batch->seqnum > ctx->batches.slots[i].seqnum) in panfrost_get_batch() 174 batch = &ctx->batches.slots[i]; in panfrost_get_batch() [all …]
|
/third_party/skia/third_party/externals/dawn/src/dawn_native/ |
D | IndirectDrawValidationEncoder.cpp | 213 std::vector<Batch> batches; in EncodeIndirectDrawValidationCommands() member 268 currentPass->batches.push_back(newBatch); in EncodeIndirectDrawValidationCommands() 279 newPass.batches.push_back(newBatch); in EncodeIndirectDrawValidationCommands() 304 for (Batch& batch : pass.batches) { in EncodeIndirectDrawValidationCommands() 363 for (const Batch& batch : pass.batches) { in EncodeIndirectDrawValidationCommands()
|
/third_party/mindspore/mindspore/lite/src/delegate/npu/ |
D | transpose_kernel.cc | 23 void PackNHWCToNCHWFp32(const void *src, void *dst, int batches, int plane, int channel) { in PackNHWCToNCHWFp32() argument 26 for (int n = 0; n < batches; n++) { in PackNHWCToNCHWFp32()
|
D | transpose_kernel.h | 30 void PackNHWCToNCHWFp32(const void *src, void *dst, int batches, int plane, int channel);
|
/third_party/typescript/src/testRunner/parallel/ |
D | host.ts | 364 …const batches: { runner: TestRunnerKind | "unittest", file: string, size: number }[][] = new Array… constant 376 if (!batches[i]) { 377 batches[i] = []; 379 const total = batches[i].reduce((p, c) => p + c.size, 0); 385 batches[i].push(task); 403 const payload = batches.pop();
|
/third_party/mesa3d/src/virtio/vulkan/ |
D | vn_renderer.h | 104 const struct vn_renderer_submit_batch *batches; member 272 .batches = in vn_renderer_submit_simple() 480 .batches = in vn_renderer_submit_simple_sync()
|