| /external/mesa3d/src/gallium/drivers/freedreno/ |
| D | freedreno_batch.c | 3 * SPDX-License-Identifier: MIT 21 alloc_ring(struct fd_batch *batch, unsigned sz, enum fd_ringbuffer_flags flags) in alloc_ring() argument 23 struct fd_context *ctx = batch->ctx; in alloc_ring() 26 * have no option but to allocate large worst-case sizes so that in alloc_ring() 33 if ((fd_device_version(ctx->screen->dev) >= FD_VERSION_UNLIMITED_CMDS) && in alloc_ring() 39 return fd_submit_new_ringbuffer(batch->submit, sz, flags); in alloc_ring() 43 subpass_create(struct fd_batch *batch) in subpass_create() argument 47 subpass->draw = alloc_ring(batch, 0x100000, 0); in subpass_create() 49 /* Replace batch->draw with reference to current subpass, for in subpass_create() 52 if (batch->draw) in subpass_create() [all …]
|
| D | freedreno_draw.c | 3 * SPDX-License-Identifier: MIT 28 batch_references_resource(struct fd_batch *batch, struct pipe_resource *prsc) in batch_references_resource() argument 31 return fd_batch_references_resource(batch, fd_resource(prsc)); in batch_references_resource() 35 resource_read(struct fd_batch *batch, struct pipe_resource *prsc) assert_dt in resource_read() argument 39 fd_batch_resource_read(batch, fd_resource(prsc)); in resource_read() 43 resource_written(struct fd_batch *batch, struct pipe_resource *prsc) assert_dt in resource_written() argument 47 fd_batch_resource_write(batch, fd_resource(prsc)); in resource_written() 51 batch_draw_tracking_for_dirty_bits(struct fd_batch *batch) assert_dt in batch_draw_tracking_for_dirty_bits() argument 53 struct fd_context *ctx = batch->ctx; in batch_draw_tracking_for_dirty_bits() 54 struct pipe_framebuffer_state *pfb = &batch->framebuffer; in batch_draw_tracking_for_dirty_bits() [all …]
|
| D | freedreno_batch_cache.c | 3 * SPDX-License-Identifier: MIT 23 * The batch cache provides lookup for mapping pipe_framebuffer_state 24 * to a batch. 29 * Batch Cache hashtable key: 36 * Batch: 38 * Each batch needs to hold a reference to each resource it depends on (ie. 43 * When a resource is destroyed, we need to remove entries in the batch 48 * When a batch has weak reference to no more resources (ie. all the 49 * surfaces it rendered to are destroyed) the batch can be destroyed. 52 * surfaces are destroyed before the batch is submitted. [all …]
|
| /external/mesa3d/src/gallium/drivers/iris/ |
| D | iris_batch.c | 29 * collect into a "batch buffer". Typically, many draw calls are grouped 30 * into a single batch to amortize command submission overhead. 36 * virtual memory address before executing our batch. If a BO is not in 73 iris_batch_reset(struct iris_batch *batch); 76 iris_batch_num_fences(struct iris_batch *batch) in iris_batch_num_fences() argument 78 return util_dynarray_num_elements(&batch->exec_fences, in iris_batch_num_fences() 86 iris_dump_fence_list(struct iris_batch *batch) in iris_dump_fence_list() argument 88 fprintf(stderr, "Fence list (length %u): ", iris_batch_num_fences(batch)); in iris_dump_fence_list() 90 util_dynarray_foreach(&batch->exec_fences, struct iris_batch_fence, f) { in iris_dump_fence_list() 92 (f->flags & IRIS_BATCH_FENCE_WAIT) ? "..." : "", in iris_dump_fence_list() [all …]
|
| D | iris_batch.h | 46 /* Terminating the batch takes either 4 bytes for MI_BATCH_BUFFER_END or 12 53 /* Our target batch size - flush approximately at this point. */ 54 #define BATCH_SZ (128 * 1024 - BATCH_RESERVED) 79 /** What batch is this? (e.g. IRIS_BATCH_RENDER/COMPUTE) */ 87 /** Size of the primary batch being submitted to execbuf (in bytes). */ 109 /** A list of all BOs referenced by this batch */ 113 /** Bitset of whether this batch writes to BO `i'. */ 117 /** Whether INTEL_BLACKHOLE_RENDER is enabled in the batch (aka first 127 * A list of iris_syncobjs associated with this batch. 129 * The first list entry will always be a signalling sync-point, indicating [all …]
|
| /external/mesa3d/src/gallium/drivers/crocus/ |
| D | crocus_batch.c | 29 * collect into a "batch buffer". Typically, many draw calls are grouped 30 * into a single batch to amortize command submission overhead. 36 * virtual memory address before executing our batch. If a BO is not in 45 #include "drm-uapi/i915_drm.h" 65 /* Terminating the batch takes either 4 bytes for MI_BATCH_BUFFER_END 69 #define BATCH_RESERVED(devinfo) ((devinfo)->platform == INTEL_PLATFORM_HSW ? 32 : 16) 71 static void crocus_batch_reset(struct crocus_batch *batch); 74 num_fences(struct crocus_batch *batch) in num_fences() argument 76 return util_dynarray_num_elements(&batch->exec_fences, in num_fences() 84 dump_fence_list(struct crocus_batch *batch) in dump_fence_list() argument [all …]
|
| D | crocus_batch.h | 34 #include "drm-uapi/i915_drm.h" 49 /* Our target batch size - flush approximately at this point. */ 89 /** What batch is this? (e.g. CROCUS_BATCH_RENDER/COMPUTE) */ 95 /** Size of the primary batch if we've moved on to a secondary. */ 114 /** Whether INTEL_BLACKHOLE_RENDER is enabled in the batch (aka first 120 * A list of crocus_syncobjs associated with this batch. 122 * The first list entry will always be a signalling sync-point, indicating 123 * that this batch has completed. The others are likely to be sync-points 124 * to wait on before executing the batch. 146 /** A seqno (and syncobj) for the last batch that was submitted. */ [all …]
|
| /external/mesa3d/src/gallium/drivers/panfrost/ |
| D | pan_job.c | 2 * Copyright (C) 2019-2020 Collabora, Ltd. 4 * Copyright (C) 2014-2017 Broadcom 40 BITSET_FOREACH_SET(idx, ctx->batches.active, PAN_MAX_BATCHES) 43 panfrost_batch_idx(struct panfrost_batch *batch) in panfrost_batch_idx() argument 45 return batch - batch->ctx->batches.slots; in panfrost_batch_idx() 60 /* Adds the BO backing surface to a batch if the surface is non-null */ 63 panfrost_batch_add_surface(struct panfrost_batch *batch, in panfrost_batch_add_surface() argument 67 struct panfrost_resource *rsrc = pan_resource(surf->texture); in panfrost_batch_add_surface() 68 pan_legalize_format(batch->ctx, rsrc, surf->format, true, false); in panfrost_batch_add_surface() 69 panfrost_batch_write_rsrc(batch, rsrc, PIPE_SHADER_FRAGMENT); in panfrost_batch_add_surface() [all …]
|
| D | pan_jm.c | 28 #include "drm-uapi/panfrost_drm.h" 42 GENX(jm_init_batch)(struct panfrost_batch *batch) in GENX() 45 batch->framebuffer = in GENX() 47 pan_pool_alloc_desc(&batch->pool.base, FRAMEBUFFER); in GENX() 50 &batch->pool.base, PAN_DESC(FRAMEBUFFER), PAN_DESC(ZS_CRC_EXTENSION), in GENX() 51 PAN_DESC_ARRAY(MAX2(batch->key.nr_cbufs, 1), RENDER_TARGET)); in GENX() 53 if (!batch->framebuffer.gpu) in GENX() 54 return -1; in GENX() 57 batch->tls = pan_pool_alloc_desc(&batch->pool.base, LOCAL_STORAGE); in GENX() 60 batch->tls = batch->framebuffer; in GENX() [all …]
|
| /external/XNNPACK/test/ |
| D | s16-rmaxabs.cc | 3 // This source code is licensed under the BSD-style license found in the 6 // Auto-generated file. Do not edit! 7 // Specification: test/s16-rmaxabs.yaml 8 // Generator: tools/generate-rmaxabs-test.py 14 #include <xnnpack/isa-checks.h> 17 #include "rmaxabs-microkernel-tester.h" 24 .batch(8) in TEST() 30 for (size_t batch = 16; batch < 80; batch += 8) { in TEST() local 32 .batch(batch) in TEST() 39 for (size_t batch = 1; batch < 8; batch++) { in TEST() local [all …]
|
| D | s16-vlshift.cc | 3 // This source code is licensed under the BSD-style license found in the 6 // Auto-generated file. Do not edit! 7 // Specification: test/s16-vlshift.yaml 8 // Generator: tools/generate-vlshift-test.py 14 #include <xnnpack/isa-checks.h> 17 #include "vlshift-microkernel-tester.h" 24 .batch(8) in TEST() 30 for (size_t batch = 16; batch < 80; batch += 8) { in TEST() local 32 .batch(batch) in TEST() 39 for (size_t batch = 1; batch < 8; batch++) { in TEST() local [all …]
|
| D | u32-vlog.cc | 3 // This source code is licensed under the BSD-style license found in the 6 // Auto-generated file. Do not edit! 7 // Specification: test/u32-vlog.yaml 8 // Generator: tools/generate-vlog-test.py 14 #include <xnnpack/isa-checks.h> 17 #include "vlog-microkernel-tester.h" 22 .batch(1) in TEST() 27 for (size_t batch = 2; batch < 10; batch++) { in TEST() local 29 .batch(batch) in TEST() 37 .batch(1) in TEST() [all …]
|
| /external/mesa3d/src/gallium/drivers/d3d12/ |
| D | d3d12_batch.cpp | 45 return _mesa_hash_data(table->descs, sizeof(table->descs[0]) * table->count); in d3d12_sampler_desc_table_key_hash() 51 …return table_a->count == table_b->count && memcmp(table_a->descs, table_b->descs, sizeof(table_a->… in d3d12_sampler_desc_table_key_equals() 55 d3d12_init_batch(struct d3d12_context *ctx, struct d3d12_batch *batch) in d3d12_init_batch() argument 57 struct d3d12_screen *screen = d3d12_screen(ctx->base.screen); in d3d12_init_batch() 59 batch->bos = _mesa_hash_table_create(NULL, _mesa_hash_pointer, in d3d12_init_batch() 62 util_dynarray_init(&batch->local_bos, NULL); in d3d12_init_batch() 64 batch->surfaces = _mesa_set_create(NULL, _mesa_hash_pointer, in d3d12_init_batch() 66 batch->objects = _mesa_set_create(NULL, in d3d12_init_batch() 70 if (!batch->bos || !batch->surfaces || !batch->objects) in d3d12_init_batch() 74 if (screen->max_feature_level >= D3D_FEATURE_LEVEL_11_0) { in d3d12_init_batch() [all …]
|
| /external/mesa3d/src/gallium/drivers/asahi/ |
| D | agx_batch.c | 3 * Copyright 2019-2020 Collabora, Ltd. 4 * SPDX-License-Identifier: MIT 17 BITSET_FOREACH_SET(idx, ctx->batches.active, AGX_MAX_BATCHES) 20 BITSET_FOREACH_SET(idx, ctx->batches.submitted, AGX_MAX_BATCHES) 22 #define batch_debug(batch, fmt, ...) \ argument 24 if (unlikely(agx_device(batch->ctx->base.screen)->debug & \ 26 agx_msg("[Queue %u Batch %u] " fmt "\n", batch->ctx->queue_id, \ 27 agx_batch_idx(batch), ##__VA_ARGS__); \ 31 agx_batch_is_active(struct agx_batch *batch) in agx_batch_is_active() argument 33 return BITSET_TEST(batch->ctx->batches.active, agx_batch_idx(batch)); in agx_batch_is_active() [all …]
|
| /external/igt-gpu-tools/lib/ |
| D | rendercopy_gen4.c | 16 #define GEN4_GRF_BLOCKS(nreg) (((nreg) + 15) / 16 - 1) 83 batch_used(struct intel_batchbuffer *batch) in batch_used() argument 85 return batch->ptr - batch->buffer; in batch_used() 89 batch_round_upto(struct intel_batchbuffer *batch, uint32_t divisor) in batch_round_upto() argument 91 uint32_t offset = batch_used(batch); in batch_round_upto() 93 offset = (offset + divisor - 1) / divisor * divisor; in batch_round_upto() 94 batch->ptr = batch->buffer + offset; in batch_round_upto() 124 gen4_render_flush(struct intel_batchbuffer *batch, in gen4_render_flush() argument 129 ret = drm_intel_bo_subdata(batch->bo, 0, 4096, batch->buffer); in gen4_render_flush() 131 ret = drm_intel_gem_bo_context_exec(batch->bo, context, in gen4_render_flush() [all …]
|
| D | media_fill.c | 107 * +---------------+ <---- 4096 113 * |_______|_______| <---- 2048 + ? 116 * | batch | 120 * +---------------+ <---- 0 + ? 133 gen7_media_fillfunc(struct intel_batchbuffer *batch, in gen7_media_fillfunc() argument 142 intel_batchbuffer_flush(batch); in gen7_media_fillfunc() 145 batch->ptr = &batch->buffer[BATCH_STATE_SPLIT]; in gen7_media_fillfunc() 147 curbe_buffer = gen7_fill_curbe_buffer_data(batch, color); in gen7_media_fillfunc() 148 interface_descriptor = gen7_fill_interface_descriptor(batch, dst, in gen7_media_fillfunc() 151 igt_assert(batch->ptr < &batch->buffer[4095]); in gen7_media_fillfunc() [all …]
|
| D | rendercopy_gen6.c | 52 batch_round_upto(struct intel_batchbuffer *batch, uint32_t divisor) in batch_round_upto() argument 54 uint32_t offset = batch->ptr - batch->buffer; in batch_round_upto() 56 offset = (offset + divisor-1) / divisor * divisor; in batch_round_upto() 57 batch->ptr = batch->buffer + offset; in batch_round_upto() 62 gen6_render_flush(struct intel_batchbuffer *batch, in gen6_render_flush() argument 67 ret = drm_intel_bo_subdata(batch->bo, 0, 4096, batch->buffer); in gen6_render_flush() 69 ret = drm_intel_gem_bo_context_exec(batch->bo, context, in gen6_render_flush() 75 gen6_bind_buf(struct intel_batchbuffer *batch, const struct igt_buf *buf, in gen6_bind_buf() argument 82 igt_assert_lte(buf->stride, 128*1024); in gen6_bind_buf() 93 ss = intel_batchbuffer_subdata_alloc(batch, sizeof(*ss), 32); in gen6_bind_buf() [all …]
|
| D | rendercopy_gen7.c | 36 gen7_render_flush(struct intel_batchbuffer *batch, in gen7_render_flush() argument 41 ret = drm_intel_bo_subdata(batch->bo, 0, 4096, batch->buffer); in gen7_render_flush() 43 ret = drm_intel_gem_bo_context_exec(batch->bo, context, in gen7_render_flush() 60 gen7_bind_buf(struct intel_batchbuffer *batch, in gen7_bind_buf() argument 68 igt_assert_lte(buf->stride, 256*1024); in gen7_bind_buf() 72 switch (buf->bpp) { in gen7_bind_buf() 87 ss = intel_batchbuffer_subdata_alloc(batch, 8 * sizeof(*ss), 32); in gen7_bind_buf() 90 gen7_tiling_bits(buf->tiling) | in gen7_bind_buf() 92 ss[1] = buf->bo->offset; in gen7_bind_buf() 93 ss[2] = ((igt_buf_width(buf) - 1) << GEN7_SURFACE_WIDTH_SHIFT | in gen7_bind_buf() [all …]
|
| D | intel_batchbuffer.c | 20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 51 * @title: Batch Buffer 61 * structure called batch is in scope. The basic macros are #BEGIN_BATCH, 64 * Note that this library's header pulls in the [i-g-t core](igt-gpu-tools-i-g-t-core.html) 70 * @batch: batchbuffer object 73 * Aligns the current in-batch offset to the given value. 78 intel_batchbuffer_align(struct intel_batchbuffer *batch, uint32_t align) in intel_batchbuffer_align() argument 80 uint32_t offset = batch->ptr - batch->buffer; in intel_batchbuffer_align() 83 batch->ptr = batch->buffer + offset; in intel_batchbuffer_align() 89 * @batch: batchbuffer object [all …]
|
| /external/tensorflow/tensorflow/core/kernels/batching_util/ |
| D | batch_scheduler_test.cc | 7 http://www.apache.org/licenses/LICENSE-2.0 41 Batch<FakeTask> batch; in TEST() local 43 EXPECT_EQ(0, batch.num_tasks()); in TEST() 44 EXPECT_TRUE(batch.empty()); in TEST() 45 EXPECT_EQ(0, batch.size()); in TEST() 46 EXPECT_FALSE(batch.IsClosed()); in TEST() 49 batch.AddTask(std::unique_ptr<FakeTask>(task0)); in TEST() 51 EXPECT_EQ(1, batch.num_tasks()); in TEST() 52 EXPECT_FALSE(batch.empty()); in TEST() 53 EXPECT_EQ(task0->size(), batch.size()); in TEST() [all …]
|
| /external/mesa3d/src/gallium/winsys/i915/drm/ |
| D | i915_drm_batchbuffer.c | 5 #include "drm-uapi/i915_drm.h" 30 i915_drm_batchbuffer(struct i915_winsys_batchbuffer *batch) in i915_drm_batchbuffer() argument 32 return (struct i915_drm_batchbuffer *)batch; in i915_drm_batchbuffer() 36 i915_drm_batchbuffer_reset(struct i915_drm_batchbuffer *batch) in i915_drm_batchbuffer_reset() argument 38 struct i915_drm_winsys *idws = i915_drm_winsys(batch->base.iws); in i915_drm_batchbuffer_reset() 40 if (batch->bo) in i915_drm_batchbuffer_reset() 41 drm_intel_bo_unreference(batch->bo); in i915_drm_batchbuffer_reset() 42 batch->bo = drm_intel_bo_alloc(idws->gem_manager, in i915_drm_batchbuffer_reset() 44 batch->actual_size, in i915_drm_batchbuffer_reset() 47 memset(batch->base.map, 0, batch->actual_size); in i915_drm_batchbuffer_reset() [all …]
|
| /external/rust/android-crates-io/crates/grpcio-sys/grpc/src/core/lib/transport/ |
| D | batch_builder.h | 7 // http://www.apache.org/licenses/LICENSE-2.0 57 // Build up a transport stream op batch for a stream for a promise based 94 // cancellation batch instead of a trailing metadata op in a coalesced batch. 98 // paired with an initial op batch, and the transports would wait for the 99 // initial metadata batch to arrive (in case of reordering up the stack). 105 // Error => non-ok status 131 struct Batch; 135 explicit PendingCompletion(RefCountedPtr<Batch> batch); 140 RefCountedPtr<Batch> batch; member 153 return GetContext<Arena>()->MakePooled<Message>(std::move(*payload), in IntoMessageHandle() [all …]
|
| D | batch_builder.cc | 7 // http://www.apache.org/licenses/LICENSE-2.0 36 auto* party = pc->batch->party.get(); in CompletionCallback() 39 GPR_DEBUG, "%sFinish batch-component %s for %s: status=%s", in CompletionCallback() 40 pc->batch->DebugPrefix(party).c_str(), std::string(pc->name()).c_str(), in CompletionCallback() 41 grpc_transport_stream_op_batch_string(&pc->batch->batch, false).c_str(), in CompletionCallback() 44 party->Spawn( in CompletionCallback() 45 "batch-completion", in CompletionCallback() 47 RefCountedPtr<Batch> batch = std::exchange(pc->batch, nullptr); in CompletionCallback() local 48 pc->done_latch.Set(std::move(error)); in CompletionCallback() 54 BatchBuilder::PendingCompletion::PendingCompletion(RefCountedPtr<Batch> batch) in PendingCompletion() argument [all …]
|
| /external/grpc-grpc/src/core/lib/transport/ |
| D | batch_builder.h | 7 // http://www.apache.org/licenses/LICENSE-2.0 55 // Build up a transport stream op batch for a stream for a promise based 92 // cancellation batch instead of a trailing metadata op in a coalesced batch. 96 // paired with an initial op batch, and the transports would wait for the 97 // initial metadata batch to arrive (in case of reordering up the stack). 103 // Error => non-ok status 129 struct Batch; 133 explicit PendingCompletion(RefCountedPtr<Batch> batch); 138 RefCountedPtr<Batch> batch; member 151 return GetContext<Arena>()->MakePooled<Message>(std::move(*payload), in IntoMessageHandle() [all …]
|
| /external/igt-gpu-tools/tools/null_state_gen/ |
| D | intel_batchbuffer.c | 23 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 43 assert(a->num_items < MAX_ITEMS); in bb_area_emit() 44 item = &a->item[a->num_items]; in bb_area_emit() 46 item->data = dword; in bb_area_emit() 47 item->type = type; in bb_area_emit() 48 strncpy(item->str, str, MAX_STRLEN); in bb_area_emit() 49 item->str[MAX_STRLEN - 1] = 0; in bb_area_emit() 51 a->num_items++; in bb_area_emit() 59 assert(a->num_items < MAX_ITEMS); in bb_area_emit_offset() 60 assert(i < a->num_items); in bb_area_emit_offset() [all …]
|