| /third_party/mesa3d/src/gallium/drivers/freedreno/ |
| D | freedreno_batch.c | 3 * SPDX-License-Identifier: MIT 21 alloc_ring(struct fd_batch *batch, unsigned sz, enum fd_ringbuffer_flags flags) in alloc_ring() argument 23 struct fd_context *ctx = batch->ctx; in alloc_ring() 26 * have no option but to allocate large worst-case sizes so that in alloc_ring() 33 if ((fd_device_version(ctx->screen->dev) >= FD_VERSION_UNLIMITED_CMDS) && in alloc_ring() 39 return fd_submit_new_ringbuffer(batch->submit, sz, flags); in alloc_ring() 43 subpass_create(struct fd_batch *batch) in subpass_create() argument 47 subpass->draw = alloc_ring(batch, 0x100000, 0); in subpass_create() 49 /* Replace batch->draw with reference to current subpass, for in subpass_create() 52 if (batch->draw) in subpass_create() [all …]
|
| D | freedreno_draw.c | 3 * SPDX-License-Identifier: MIT 28 batch_references_resource(struct fd_batch *batch, struct pipe_resource *prsc) in batch_references_resource() argument 31 return fd_batch_references_resource(batch, fd_resource(prsc)); in batch_references_resource() 35 resource_read(struct fd_batch *batch, struct pipe_resource *prsc) assert_dt in resource_read() argument 39 fd_batch_resource_read(batch, fd_resource(prsc)); in resource_read() 43 resource_written(struct fd_batch *batch, struct pipe_resource *prsc) assert_dt in resource_written() argument 47 fd_batch_resource_write(batch, fd_resource(prsc)); in resource_written() 51 batch_draw_tracking_for_dirty_bits(struct fd_batch *batch) assert_dt in batch_draw_tracking_for_dirty_bits() argument 53 struct fd_context *ctx = batch->ctx; in batch_draw_tracking_for_dirty_bits() 54 struct pipe_framebuffer_state *pfb = &batch->framebuffer; in batch_draw_tracking_for_dirty_bits() [all …]
|
| D | freedreno_batch_cache.c | 3 * SPDX-License-Identifier: MIT 23 * The batch cache provides lookup for mapping pipe_framebuffer_state 24 * to a batch. 29 * Batch Cache hashtable key: 36 * Batch: 38 * Each batch needs to hold a reference to each resource it depends on (ie. 43 * When a resource is destroyed, we need to remove entries in the batch 48 * When a batch has weak reference to no more resources (ie. all the 49 * surfaces it rendered to are destroyed) the batch can be destroyed. 52 * surfaces are destroyed before the batch is submitted. [all …]
|
| D | freedreno_query_hw.c | 3 * SPDX-License-Identifier: MIT 24 get_sample(struct fd_batch *batch, struct fd_ringbuffer *ring, in get_sample() argument 27 struct fd_context *ctx = batch->ctx; in get_sample() 33 if (!batch->sample_cache[idx]) { in get_sample() 35 ctx->hw_sample_providers[idx]->get_sample(batch, ring); in get_sample() 36 fd_hw_sample_reference(ctx, &batch->sample_cache[idx], new_samp); in get_sample() 37 util_dynarray_append(&batch->samples, struct fd_hw_sample *, new_samp); in get_sample() 38 fd_batch_needs_flush(batch); in get_sample() 41 fd_hw_sample_reference(ctx, &samp, batch->sample_cache[idx]); in get_sample() 47 clear_sample_cache(struct fd_batch *batch) in clear_sample_cache() argument [all …]
|
| D | freedreno_batch.h | 3 * SPDX-License-Identifier: MIT 31 * A subpass is a fragment of a batch potentially starting with a clear. 32 * If the app does a mid-batch clear, that clear and subsequent draws 33 * can be split out into another sub-pass. At gmem time, the appropriate 76 * A batch tracks everything about a cmdstream batch/submit, including the 78 * fd_resource-s, etc. 83 unsigned idx; /* index into cache->batches[] */ 95 /* update seqno of most recent draw/etc to the batch. */ 112 /* is this a non-draw batch (ie compute/blit which has no pfb state)? */ 116 bool tessellation : 1; /* tessellation used in batch */ [all …]
|
| D | freedreno_gmem.c | 3 * SPDX-License-Identifier: MIT 35 * to restore the previous tiles contents (system mem -> GMEM), and after all 37 * contents (GMEM -> system mem). 43 * +--<---<-- IB ---<---+---<---+---<---<---<--+ 46 * ------------------------------------------------------ 48 * ------------------------------------------------------ 53 * Where the per-tile section handles scissor setup, mem2gmem restore (if 103 printf("{ .minx=%u, .miny=%u, .width=%u, .height=%u", key->minx, key->miny, in dump_gmem_key() 104 key->width, key->height); in dump_gmem_key() 105 printf(", .gmem_page_align=%u, .nr_cbufs=%u", key->gmem_page_align, in dump_gmem_key() [all …]
|
| /third_party/mesa3d/src/gallium/drivers/iris/ |
| D | iris_batch.c | 29 * collect into a "batch buffer". Typically, many draw calls are grouped 30 * into a single batch to amortize command submission overhead. 36 * virtual memory address before executing our batch. If a BO is not in 75 iris_batch_reset(struct iris_batch *batch); 78 iris_batch_num_fences(struct iris_batch *batch) in iris_batch_num_fences() argument 80 return util_dynarray_num_elements(&batch->exec_fences, in iris_batch_num_fences() 88 iris_dump_fence_list(struct iris_batch *batch) in iris_dump_fence_list() argument 90 fprintf(stderr, "Fence list (length %u): ", iris_batch_num_fences(batch)); in iris_dump_fence_list() 92 util_dynarray_foreach(&batch->exec_fences, struct iris_batch_fence, f) { in iris_dump_fence_list() 94 (f->flags & IRIS_BATCH_FENCE_WAIT) ? "..." : "", in iris_dump_fence_list() [all …]
|
| D | iris_batch.h | 46 /* Terminating the batch takes either 4 bytes for MI_BATCH_BUFFER_END or 12 53 /* Our target batch size - flush approximately at this point. */ 54 #define BATCH_SZ (128 * 1024 - BATCH_RESERVED) 79 /** What batch is this? (e.g. IRIS_BATCH_RENDER/COMPUTE) */ 87 /** Size of the primary batch being submitted to execbuf (in bytes). */ 109 /** A list of all BOs referenced by this batch */ 113 /** Bitset of whether this batch writes to BO `i'. */ 117 /** Whether INTEL_BLACKHOLE_RENDER is enabled in the batch (aka first 127 * A list of iris_syncobjs associated with this batch. 129 * The first list entry will always be a signalling sync-point, indicating [all …]
|
| D | iris_fine_fence.c | 6 iris_fine_fence_reset(struct iris_batch *batch) in iris_fine_fence_reset() argument 8 u_upload_alloc(batch->fine_fences.uploader, in iris_fine_fence_reset() 10 &batch->fine_fences.ref.offset, &batch->fine_fences.ref.res, in iris_fine_fence_reset() 11 (void **)&batch->fine_fences.map); in iris_fine_fence_reset() 12 WRITE_ONCE(*batch->fine_fences.map, 0); in iris_fine_fence_reset() 13 batch->fine_fences.next++; in iris_fine_fence_reset() 17 iris_fine_fence_init(struct iris_batch *batch) in iris_fine_fence_init() argument 19 batch->fine_fences.ref.res = NULL; in iris_fine_fence_init() 20 batch->fine_fences.next = 0; in iris_fine_fence_init() 21 iris_fine_fence_reset(batch); in iris_fine_fence_init() [all …]
|
| /third_party/mesa3d/src/gallium/drivers/crocus/ |
| D | crocus_batch.c | 29 * collect into a "batch buffer". Typically, many draw calls are grouped 30 * into a single batch to amortize command submission overhead. 36 * virtual memory address before executing our batch. If a BO is not in 45 #include "drm-uapi/i915_drm.h" 65 /* Terminating the batch takes either 4 bytes for MI_BATCH_BUFFER_END 69 #define BATCH_RESERVED(devinfo) ((devinfo)->platform == INTEL_PLATFORM_HSW ? 32 : 16) 71 static void crocus_batch_reset(struct crocus_batch *batch); 74 num_fences(struct crocus_batch *batch) in num_fences() argument 76 return util_dynarray_num_elements(&batch->exec_fences, in num_fences() 84 dump_fence_list(struct crocus_batch *batch) in dump_fence_list() argument [all …]
|
| D | crocus_blorp.c | 31 * on the 3D engine). This provides the driver-specific hooks needed to 53 stream_state(struct crocus_batch *batch, in stream_state() argument 59 uint32_t offset = ALIGN(batch->state.used, alignment); in stream_state() 61 if (offset + size >= STATE_SZ && !batch->no_wrap) { in stream_state() 62 crocus_batch_flush(batch); in stream_state() 63 offset = ALIGN(batch->state.used, alignment); in stream_state() 64 } else if (offset + size >= batch->state.bo->size) { in stream_state() 66 MIN2(batch->state.bo->size + batch->state.bo->size / 2, in stream_state() 68 crocus_grow_buffer(batch, true, batch->state.used, new_size); in stream_state() 69 assert(offset + size < batch->state.bo->size); in stream_state() [all …]
|
| D | crocus_batch.h | 34 #include "drm-uapi/i915_drm.h" 49 /* Our target batch size - flush approximately at this point. */ 89 /** What batch is this? (e.g. CROCUS_BATCH_RENDER/COMPUTE) */ 95 /** Size of the primary batch if we've moved on to a secondary. */ 114 /** Whether INTEL_BLACKHOLE_RENDER is enabled in the batch (aka first 120 * A list of crocus_syncobjs associated with this batch. 122 * The first list entry will always be a signalling sync-point, indicating 123 * that this batch has completed. The others are likely to be sync-points 124 * to wait on before executing the batch. 146 /** A seqno (and syncobj) for the last batch that was submitted. */ [all …]
|
| D | crocus_fine_fence.c | 6 crocus_fine_fence_reset(struct crocus_batch *batch) in crocus_fine_fence_reset() argument 8 u_upload_alloc(batch->fine_fences.uploader, in crocus_fine_fence_reset() 10 &batch->fine_fences.ref.offset, &batch->fine_fences.ref.res, in crocus_fine_fence_reset() 11 (void **)&batch->fine_fences.map); in crocus_fine_fence_reset() 12 WRITE_ONCE(*batch->fine_fences.map, 0); in crocus_fine_fence_reset() 13 batch->fine_fences.next++; in crocus_fine_fence_reset() 17 crocus_fine_fence_init(struct crocus_batch *batch) in crocus_fine_fence_init() argument 19 batch->fine_fences.ref.res = NULL; in crocus_fine_fence_init() 20 batch->fine_fences.next = 0; in crocus_fine_fence_init() 21 if (batch_has_fine_fence(batch)) in crocus_fine_fence_init() [all …]
|
| /third_party/mesa3d/src/gallium/drivers/panfrost/ |
| D | pan_job.c | 2 * Copyright (C) 2019-2020 Collabora, Ltd. 4 * Copyright (C) 2014-2017 Broadcom 40 BITSET_FOREACH_SET(idx, ctx->batches.active, PAN_MAX_BATCHES) 43 panfrost_batch_idx(struct panfrost_batch *batch) in panfrost_batch_idx() argument 45 return batch - batch->ctx->batches.slots; in panfrost_batch_idx() 60 /* Adds the BO backing surface to a batch if the surface is non-null */ 63 panfrost_batch_add_surface(struct panfrost_batch *batch, in panfrost_batch_add_surface() argument 67 struct panfrost_resource *rsrc = pan_resource(surf->texture); in panfrost_batch_add_surface() 68 pan_legalize_format(batch->ctx, rsrc, surf->format, true, false); in panfrost_batch_add_surface() 69 panfrost_batch_write_rsrc(batch, rsrc, PIPE_SHADER_FRAGMENT); in panfrost_batch_add_surface() [all …]
|
| D | pan_jm.c | 28 #include "drm-uapi/panfrost_drm.h" 42 GENX(jm_init_batch)(struct panfrost_batch *batch) in GENX() 45 batch->framebuffer = in GENX() 47 pan_pool_alloc_desc(&batch->pool.base, FRAMEBUFFER); in GENX() 50 &batch->pool.base, PAN_DESC(FRAMEBUFFER), PAN_DESC(ZS_CRC_EXTENSION), in GENX() 51 PAN_DESC_ARRAY(MAX2(batch->key.nr_cbufs, 1), RENDER_TARGET)); in GENX() 53 if (!batch->framebuffer.gpu) in GENX() 54 return -1; in GENX() 57 batch->tls = pan_pool_alloc_desc(&batch->pool.base, LOCAL_STORAGE); in GENX() 60 batch->tls = batch->framebuffer; in GENX() [all …]
|
| D | pan_csf.c | 26 #include "drm-uapi/panthor_drm.h" 45 assert(cookie && "Self-contained queues can't be extended."); in csf_alloc_cs_buffer() 47 struct panfrost_batch *batch = cookie; in csf_alloc_cs_buffer() local 51 pan_pool_alloc_aligned(&batch->csf.cs_chunk_pool.base, capacity * 8, 64); in csf_alloc_cs_buffer() 61 * Register is reserved to pass the batch tiler OOM context 78 if (likely(!b->conf.reg_perm)) { in csf_update_tiler_oom_ctx() 83 orig_cb = b->conf.reg_perm; in csf_update_tiler_oom_ctx() 84 b->conf.reg_perm = NULL; in csf_update_tiler_oom_ctx() 86 b->conf.reg_perm = orig_cb; in csf_update_tiler_oom_ctx() 100 struct panfrost_device *dev = pan_device(ctx->base.screen); in csf_oom_handler_init() [all …]
|
| /third_party/mesa3d/src/gallium/drivers/d3d12/ |
| D | d3d12_batch.cpp | 45 return _mesa_hash_data(table->descs, sizeof(table->descs[0]) * table->count); in d3d12_sampler_desc_table_key_hash() 51 …return table_a->count == table_b->count && memcmp(table_a->descs, table_b->descs, sizeof(table_a->… in d3d12_sampler_desc_table_key_equals() 55 d3d12_init_batch(struct d3d12_context *ctx, struct d3d12_batch *batch) in d3d12_init_batch() argument 57 struct d3d12_screen *screen = d3d12_screen(ctx->base.screen); in d3d12_init_batch() 59 batch->bos = _mesa_hash_table_create(NULL, _mesa_hash_pointer, in d3d12_init_batch() 62 util_dynarray_init(&batch->local_bos, NULL); in d3d12_init_batch() 64 batch->surfaces = _mesa_set_create(NULL, _mesa_hash_pointer, in d3d12_init_batch() 66 batch->objects = _mesa_set_create(NULL, in d3d12_init_batch() 70 if (!batch->bos || !batch->surfaces || !batch->objects) in d3d12_init_batch() 74 if (screen->max_feature_level >= D3D_FEATURE_LEVEL_11_0) { in d3d12_init_batch() [all …]
|
| /third_party/mesa3d/src/gallium/drivers/asahi/ |
| D | agx_batch.c | 3 * Copyright 2019-2020 Collabora, Ltd. 4 * SPDX-License-Identifier: MIT 17 BITSET_FOREACH_SET(idx, ctx->batches.active, AGX_MAX_BATCHES) 20 BITSET_FOREACH_SET(idx, ctx->batches.submitted, AGX_MAX_BATCHES) 22 #define batch_debug(batch, fmt, ...) \ argument 24 if (unlikely(agx_device(batch->ctx->base.screen)->debug & \ 26 agx_msg("[Queue %u Batch %u] " fmt "\n", batch->ctx->queue_id, \ 27 agx_batch_idx(batch), ##__VA_ARGS__); \ 31 agx_batch_is_active(struct agx_batch *batch) in agx_batch_is_active() argument 33 return BITSET_TEST(batch->ctx->batches.active, agx_batch_idx(batch)); in agx_batch_is_active() [all …]
|
| D | agx_uniforms.c | 3 * SPDX-License-Identifier: MIT 16 agx_const_buffer_ptr(struct agx_batch *batch, struct pipe_constant_buffer *cb) in agx_const_buffer_ptr() argument 18 if (cb->buffer) { in agx_const_buffer_ptr() 19 struct agx_resource *rsrc = agx_resource(cb->buffer); in agx_const_buffer_ptr() 20 agx_batch_reads(batch, rsrc); in agx_const_buffer_ptr() 22 return rsrc->bo->va->addr + cb->buffer_offset; in agx_const_buffer_ptr() 29 agx_upload_vbos(struct agx_batch *batch) in agx_upload_vbos() argument 31 struct agx_context *ctx = batch->ctx; in agx_upload_vbos() 32 struct agx_vertex_elements *attribs = ctx->attributes; in agx_upload_vbos() 33 struct agx_device *dev = agx_device(ctx->base.screen); in agx_upload_vbos() [all …]
|
| /third_party/mesa3d/src/gallium/winsys/i915/drm/ |
| D | i915_drm_batchbuffer.c | 5 #include "drm-uapi/i915_drm.h" 30 i915_drm_batchbuffer(struct i915_winsys_batchbuffer *batch) in i915_drm_batchbuffer() argument 32 return (struct i915_drm_batchbuffer *)batch; in i915_drm_batchbuffer() 36 i915_drm_batchbuffer_reset(struct i915_drm_batchbuffer *batch) in i915_drm_batchbuffer_reset() argument 38 struct i915_drm_winsys *idws = i915_drm_winsys(batch->base.iws); in i915_drm_batchbuffer_reset() 40 if (batch->bo) in i915_drm_batchbuffer_reset() 41 drm_intel_bo_unreference(batch->bo); in i915_drm_batchbuffer_reset() 42 batch->bo = drm_intel_bo_alloc(idws->gem_manager, in i915_drm_batchbuffer_reset() 44 batch->actual_size, in i915_drm_batchbuffer_reset() 47 memset(batch->base.map, 0, batch->actual_size); in i915_drm_batchbuffer_reset() [all …]
|
| /third_party/mesa3d/src/panfrost/vulkan/jm/ |
| D | panvk_vX_cmd_buffer.c | 56 const struct pan_fb_info *fbinfo = &cmdbuf->state.gfx.render.fb.info; in panvk_cmd_prepare_fragment_job() 57 struct panvk_batch *batch = cmdbuf->cur_batch; in panvk_cmd_prepare_fragment_job() local 70 pan_jc_add_job(&batch->frag_jc, MALI_JOB_TYPE_FRAGMENT, false, false, 0, 0, in panvk_cmd_prepare_fragment_job() 72 util_dynarray_append(&batch->jobs, void *, job_ptr.cpu); in panvk_cmd_prepare_fragment_job() 79 struct panvk_batch *batch = cmdbuf->cur_batch; in panvk_per_arch() local 81 if (!batch) in panvk_per_arch() 84 struct pan_fb_info *fbinfo = &cmdbuf->state.gfx.render.fb.info; in panvk_per_arch() 86 assert(batch); in panvk_per_arch() 88 if (!batch->fb.desc.gpu && !batch->vtc_jc.first_job) { in panvk_per_arch() 89 if (util_dynarray_num_elements(&batch->event_ops, in panvk_per_arch() [all …]
|
| /third_party/mesa3d/src/gallium/drivers/freedreno/a6xx/ |
| D | fd6_gmem.cc | 4 * SPDX-License-Identifier: MIT 45 OUT_RELOC(ring, rsc->bo, fd_resource_ubwc_offset(rsc, level, layer), 0, in fd6_emit_flag_reference() 48 fdl_ubwc_pitch(&rsc->layout, level)) | in fd6_emit_flag_reference() 50 rsc->layout.ubwc_layer_size >> 2)); in fd6_emit_flag_reference() 73 for (i = 0; i < pfb->nr_cbufs; i++) { in emit_mrt() 82 if (!pfb->cbufs[i]) in emit_mrt() 85 struct pipe_surface *psurf = pfb->cbufs[i]; in emit_mrt() 86 enum pipe_format pformat = psurf->format; in emit_mrt() 87 rsc = fd_resource(psurf->texture); in emit_mrt() 89 uint32_t base = gmem ? gmem->cbuf_base[i] : 0; in emit_mrt() [all …]
|
| /third_party/mesa3d/src/intel/blorp/ |
| D | blorp_genX_exec_elk.h | 55 blorp_emit_dwords(struct blorp_batch *batch, unsigned n); 58 blorp_emit_reloc(struct blorp_batch *batch, 62 blorp_measure_start(struct blorp_batch *batch, 66 blorp_measure_end(struct blorp_batch *batch, 70 blorp_alloc_dynamic_state(struct blorp_batch *batch, 76 blorp_alloc_general_state(struct blorp_batch *batch, 82 blorp_alloc_vertex_buffer(struct blorp_batch *batch, uint32_t size, 85 blorp_vf_invalidate_for_vb_48b_transitions(struct blorp_batch *batch, 91 blorp_get_workaround_address(struct blorp_batch *batch); 94 blorp_alloc_binding_table(struct blorp_batch *batch, unsigned num_entries, [all …]
|
| D | blorp_genX_exec_brw.h | 51 blorp_emit_dwords(struct blorp_batch *batch, unsigned n); 54 blorp_emit_reloc(struct blorp_batch *batch, 58 blorp_measure_start(struct blorp_batch *batch, 62 blorp_measure_end(struct blorp_batch *batch, 66 blorp_alloc_dynamic_state(struct blorp_batch *batch, 72 blorp_alloc_general_state(struct blorp_batch *batch, 78 blorp_get_dynamic_state(struct blorp_batch *batch, 82 blorp_alloc_vertex_buffer(struct blorp_batch *batch, uint32_t size, 85 blorp_vf_invalidate_for_vb_48b_transitions(struct blorp_batch *batch, 91 blorp_get_workaround_address(struct blorp_batch *batch); [all …]
|
| /third_party/mesa3d/src/gallium/drivers/i915/ |
| D | i915_batchbuffer.h | 20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 37 i915_winsys_batchbuffer_space(struct i915_winsys_batchbuffer *batch) in i915_winsys_batchbuffer_space() argument 39 return batch->size - (batch->ptr - batch->map); in i915_winsys_batchbuffer_space() 43 i915_winsys_batchbuffer_check(struct i915_winsys_batchbuffer *batch, in i915_winsys_batchbuffer_check() argument 46 return dwords * 4 <= i915_winsys_batchbuffer_space(batch); in i915_winsys_batchbuffer_check() 50 i915_winsys_batchbuffer_dword_unchecked(struct i915_winsys_batchbuffer *batch, in i915_winsys_batchbuffer_dword_unchecked() argument 53 *(unsigned *)batch->ptr = dword; in i915_winsys_batchbuffer_dword_unchecked() 54 batch->ptr += 4; in i915_winsys_batchbuffer_dword_unchecked() 58 i915_winsys_batchbuffer_float(struct i915_winsys_batchbuffer *batch, float f) in i915_winsys_batchbuffer_float() argument 65 assert(i915_winsys_batchbuffer_space(batch) >= 4); in i915_winsys_batchbuffer_float() [all …]
|