Home
last modified time | relevance | path

Searched +full:- +full:- +full:batch (Results 1 – 25 of 1261) sorted by relevance

12345678910>>...51

/external/mesa3d/src/gallium/drivers/freedreno/
Dfreedreno_batch.c39 alloc_ring(struct fd_batch *batch, unsigned sz, enum fd_ringbuffer_flags flags) in alloc_ring() argument
41 struct fd_context *ctx = batch->ctx; in alloc_ring()
44 * have no option but to allocate large worst-case sizes so that in alloc_ring()
51 if ((fd_device_version(ctx->screen->dev) >= FD_VERSION_UNLIMITED_CMDS) && in alloc_ring()
57 return fd_submit_new_ringbuffer(batch->submit, sz, flags); in alloc_ring()
61 batch_init(struct fd_batch *batch) in batch_init() argument
63 struct fd_context *ctx = batch->ctx; in batch_init()
65 batch->submit = fd_submit_new(ctx->pipe); in batch_init()
66 if (batch->nondraw) { in batch_init()
67 batch->gmem = alloc_ring(batch, 0x1000, FD_RINGBUFFER_PRIMARY); in batch_init()
[all …]
Dfreedreno_draw.c46 resource_read(struct fd_batch *batch, struct pipe_resource *prsc) in resource_read() argument
50 fd_batch_resource_read(batch, fd_resource(prsc)); in resource_read()
54 resource_written(struct fd_batch *batch, struct pipe_resource *prsc) in resource_written() argument
58 fd_batch_resource_write(batch, fd_resource(prsc)); in resource_written()
62 batch_draw_tracking(struct fd_batch *batch, const struct pipe_draw_info *info) in batch_draw_tracking() argument
64 struct fd_context *ctx = batch->ctx; in batch_draw_tracking()
65 struct pipe_framebuffer_state *pfb = &batch->framebuffer; in batch_draw_tracking()
68 /* NOTE: needs to be before resource_written(batch->query_buf), otherwise in batch_draw_tracking()
71 fd_batch_set_stage(batch, FD_STAGE_DRAW); in batch_draw_tracking()
77 fd_screen_lock(ctx->screen); in batch_draw_tracking()
[all …]
/external/mesa3d/src/gallium/drivers/iris/
Diris_batch.c29 * collect into a "batch buffer". Typically, many draw calls are grouped
30 * into a single batch to amortize command submission overhead.
36 * virtual memory address before executing our batch. If a BO is not in
45 #include "drm-uapi/i915_drm.h"
68 iris_batch_reset(struct iris_batch *batch);
71 num_fences(struct iris_batch *batch) in num_fences() argument
73 return util_dynarray_num_elements(&batch->exec_fences, in num_fences()
81 dump_fence_list(struct iris_batch *batch) in dump_fence_list() argument
83 fprintf(stderr, "Fence list (length %u): ", num_fences(batch)); in dump_fence_list()
85 util_dynarray_foreach(&batch->exec_fences, in dump_fence_list()
[all …]
Diris_batch.h33 #include "drm-uapi/i915_drm.h"
44 /* Terminating the batch takes either 4 bytes for MI_BATCH_BUFFER_END
50 /* Our target batch size - flush approximately at this point. */
51 #define BATCH_SZ (64 * 1024 - BATCH_RESERVED)
65 /** What batch is this? (e.g. IRIS_BATCH_RENDER/COMPUTE) */
73 /** Size of the primary batch being submitted to execbuf (in bytes). */
90 /** Whether INTEL_BLACKHOLE_RENDER is enabled in the batch (aka first
96 * A list of iris_syncobjs associated with this batch.
98 * The first list entry will always be a signalling sync-point, indicating
99 * that this batch has completed. The others are likely to be sync-points
[all …]
/external/mesa3d/src/mesa/drivers/dri/i965/
Dintel_batchbuffer.c40 #include "drm-uapi/i915_drm.h"
45 * Target sizes of the batch and state buffers. We create the initial
50 * should flush. Each time we flush the batch, we recreate both buffers
62 dump_validation_list(struct intel_batchbuffer *batch) in dump_validation_list() argument
64 fprintf(stderr, "Validation list (length %d):\n", batch->exec_count); in dump_validation_list()
66 for (int i = 0; i < batch->exec_count; i++) { in dump_validation_list()
67 uint64_t flags = batch->validation_list[i].flags; in dump_validation_list()
68 assert(batch->validation_list[i].handle == in dump_validation_list()
69 batch->exec_bos[i]->gem_handle); in dump_validation_list()
70 fprintf(stderr, "[%2d]: %2d %-14s %p %s%-7s @ 0x%"PRIx64"%s (%"PRIu64"B)\n", in dump_validation_list()
[all …]
Dgen4_blorp_exec.h25 dynamic_state_address(struct blorp_batch *batch, uint32_t offset) in dynamic_state_address() argument
27 assert(batch->blorp->driver_ctx == batch->driver_batch); in dynamic_state_address()
28 struct brw_context *brw = batch->driver_batch; in dynamic_state_address()
31 .buffer = brw->batch.state.bo, in dynamic_state_address()
37 instruction_state_address(struct blorp_batch *batch, uint32_t offset) in instruction_state_address() argument
39 assert(batch->blorp->driver_ctx == batch->driver_batch); in instruction_state_address()
40 struct brw_context *brw = batch->driver_batch; in instruction_state_address()
43 .buffer = brw->cache.bo, in instruction_state_address()
49 blorp_emit_vs_state(struct blorp_batch *batch) in blorp_emit_vs_state() argument
51 assert(batch->blorp->driver_ctx == batch->driver_batch); in blorp_emit_vs_state()
[all …]
DgenX_blorp_exec.c42 blorp_emit_dwords(struct blorp_batch *batch, unsigned n) in blorp_emit_dwords() argument
44 assert(batch->blorp->driver_ctx == batch->driver_batch); in blorp_emit_dwords()
45 struct brw_context *brw = batch->driver_batch; in blorp_emit_dwords()
48 uint32_t *map = brw->batch.map_next; in blorp_emit_dwords()
49 brw->batch.map_next += n; in blorp_emit_dwords()
55 blorp_emit_reloc(struct blorp_batch *batch, in blorp_emit_reloc() argument
58 assert(batch->blorp->driver_ctx == batch->driver_batch); in blorp_emit_reloc()
59 struct brw_context *brw = batch->driver_batch; in blorp_emit_reloc()
62 if (GEN_GEN < 6 && brw_ptr_in_state_buffer(&brw->batch, location)) { in blorp_emit_reloc()
63 offset = (char *)location - (char *)brw->batch.state.map; in blorp_emit_reloc()
[all …]
/external/mesa3d/src/gallium/drivers/panfrost/
Dpan_job.c2 * Copyright (C) 2019-2020 Collabora, Ltd.
4 * Copyright (C) 2014-2017 Broadcom
29 #include "drm-uapi/panfrost_drm.h"
42 #include "panfrost-quirks.h"
44 /* panfrost_bo_access is here to help us keep track of batch accesses to BOs
48 * Each accessed BO has a corresponding entry in the ->accessed_bos hash table.
50 * When the last access is a write, the batch writing the BO might have read
53 * depend on another batch to push its results to memory. That's what the
55 * There can only be one writer at any given time, if a new batch wants to
57 * the old writer (at the batch level), and panfrost_bo_access->writer will be
[all …]
Dpan_mfbd.c2 * Copyright (C) 2019-2020 Collabora, Ltd.
3 * Copyright 2018-2019 Alyssa Rosenzweig
30 #include "panfrost-quirks.h"
34 panfrost_mfbd_has_zs_crc_ext(struct panfrost_batch *batch) in panfrost_mfbd_has_zs_crc_ext() argument
36 if (batch->key.nr_cbufs == 1) { in panfrost_mfbd_has_zs_crc_ext()
37 struct pipe_surface *surf = batch->key.cbufs[0]; in panfrost_mfbd_has_zs_crc_ext()
38 struct panfrost_resource *rsrc = pan_resource(surf->texture); in panfrost_mfbd_has_zs_crc_ext()
40 if (rsrc->checksummed) in panfrost_mfbd_has_zs_crc_ext()
44 if (batch->key.zsbuf && in panfrost_mfbd_has_zs_crc_ext()
45 ((batch->clear | batch->draws) & PIPE_CLEAR_DEPTHSTENCIL)) in panfrost_mfbd_has_zs_crc_ext()
[all …]
/external/igt-gpu-tools/lib/
Drendercopy_gen4.c16 #define GEN4_GRF_BLOCKS(nreg) (((nreg) + 15) / 16 - 1)
83 batch_used(struct intel_batchbuffer *batch) in batch_used() argument
85 return batch->ptr - batch->buffer; in batch_used()
89 batch_round_upto(struct intel_batchbuffer *batch, uint32_t divisor) in batch_round_upto() argument
91 uint32_t offset = batch_used(batch); in batch_round_upto()
93 offset = (offset + divisor - 1) / divisor * divisor; in batch_round_upto()
94 batch->ptr = batch->buffer + offset; in batch_round_upto()
124 gen4_render_flush(struct intel_batchbuffer *batch, in gen4_render_flush() argument
129 ret = drm_intel_bo_subdata(batch->bo, 0, 4096, batch->buffer); in gen4_render_flush()
131 ret = drm_intel_gem_bo_context_exec(batch->bo, context, in gen4_render_flush()
[all …]
Dmedia_fill.c107 * +---------------+ <---- 4096
113 * |_______|_______| <---- 2048 + ?
116 * | batch |
120 * +---------------+ <---- 0 + ?
133 gen7_media_fillfunc(struct intel_batchbuffer *batch, in gen7_media_fillfunc() argument
142 intel_batchbuffer_flush(batch); in gen7_media_fillfunc()
145 batch->ptr = &batch->buffer[BATCH_STATE_SPLIT]; in gen7_media_fillfunc()
147 curbe_buffer = gen7_fill_curbe_buffer_data(batch, color); in gen7_media_fillfunc()
148 interface_descriptor = gen7_fill_interface_descriptor(batch, dst, in gen7_media_fillfunc()
151 igt_assert(batch->ptr < &batch->buffer[4095]); in gen7_media_fillfunc()
[all …]
Drendercopy_gen6.c52 batch_round_upto(struct intel_batchbuffer *batch, uint32_t divisor) in batch_round_upto() argument
54 uint32_t offset = batch->ptr - batch->buffer; in batch_round_upto()
56 offset = (offset + divisor-1) / divisor * divisor; in batch_round_upto()
57 batch->ptr = batch->buffer + offset; in batch_round_upto()
62 gen6_render_flush(struct intel_batchbuffer *batch, in gen6_render_flush() argument
67 ret = drm_intel_bo_subdata(batch->bo, 0, 4096, batch->buffer); in gen6_render_flush()
69 ret = drm_intel_gem_bo_context_exec(batch->bo, context, in gen6_render_flush()
75 gen6_bind_buf(struct intel_batchbuffer *batch, const struct igt_buf *buf, in gen6_bind_buf() argument
82 igt_assert_lte(buf->stride, 128*1024); in gen6_bind_buf()
93 ss = intel_batchbuffer_subdata_alloc(batch, sizeof(*ss), 32); in gen6_bind_buf()
[all …]
Drendercopy_gen7.c36 gen7_render_flush(struct intel_batchbuffer *batch, in gen7_render_flush() argument
41 ret = drm_intel_bo_subdata(batch->bo, 0, 4096, batch->buffer); in gen7_render_flush()
43 ret = drm_intel_gem_bo_context_exec(batch->bo, context, in gen7_render_flush()
60 gen7_bind_buf(struct intel_batchbuffer *batch, in gen7_bind_buf() argument
68 igt_assert_lte(buf->stride, 256*1024); in gen7_bind_buf()
72 switch (buf->bpp) { in gen7_bind_buf()
87 ss = intel_batchbuffer_subdata_alloc(batch, 8 * sizeof(*ss), 32); in gen7_bind_buf()
90 gen7_tiling_bits(buf->tiling) | in gen7_bind_buf()
92 ss[1] = buf->bo->offset; in gen7_bind_buf()
93 ss[2] = ((igt_buf_width(buf) - 1) << GEN7_SURFACE_WIDTH_SHIFT | in gen7_bind_buf()
[all …]
Dintel_batchbuffer.c20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
51 * @title: Batch Buffer
61 * structure called batch is in scope. The basic macros are #BEGIN_BATCH,
64 * Note that this library's header pulls in the [i-g-t core](igt-gpu-tools-i-g-t-core.html)
70 * @batch: batchbuffer object
73 * Aligns the current in-batch offset to the given value.
78 intel_batchbuffer_align(struct intel_batchbuffer *batch, uint32_t align) in intel_batchbuffer_align() argument
80 uint32_t offset = batch->ptr - batch->buffer; in intel_batchbuffer_align()
83 batch->ptr = batch->buffer + offset; in intel_batchbuffer_align()
89 * @batch: batchbuffer object
[all …]
Drendercopy_gen8.c31 static void dump_batch(struct intel_batchbuffer *batch) { in dump_batch() argument
32 int fd = open("/tmp/i965-batchbuffers.dump", O_WRONLY | O_CREAT, 0666); in dump_batch()
33 if (fd != -1) { in dump_batch()
34 igt_assert_eq(write(fd, batch->buffer, 4096), 4096); in dump_batch()
60 /* Write all -1 */
83 /* aub->annotations is an array keeping a list of annotations of the in annotation_init()
84 * batch buffer ordered by offset. aub->annotations[0] is thus left in annotation_init()
86 * the batch buffer with annotations_add_batch() */ in annotation_init()
87 aub->index = 1; in annotation_init()
94 a->type = type; in add_annotation()
[all …]
Dgpgpu_fill.c93 * +---------------+ <---- 4096
99 * |_______|_______| <---- 2048 + ?
102 * | batch |
106 * +---------------+ <---- 0 + ?
120 gen7_gpgpu_fillfunc(struct intel_batchbuffer *batch, in gen7_gpgpu_fillfunc() argument
129 intel_batchbuffer_flush(batch); in gen7_gpgpu_fillfunc()
132 batch->ptr = &batch->buffer[BATCH_STATE_SPLIT]; in gen7_gpgpu_fillfunc()
139 curbe_buffer = gen7_fill_curbe_buffer_data(batch, color); in gen7_gpgpu_fillfunc()
141 interface_descriptor = gen7_fill_interface_descriptor(batch, dst, in gen7_gpgpu_fillfunc()
144 igt_assert(batch->ptr < &batch->buffer[4095]); in gen7_gpgpu_fillfunc()
[all …]
Drendercopy_gen9.c31 static void dump_batch(struct intel_batchbuffer *batch) { in dump_batch() argument
32 int fd = open("/tmp/i965-batchbuffers.dump", O_WRONLY | O_CREAT, 0666); in dump_batch()
33 if (fd != -1) { in dump_batch()
34 igt_assert_eq(write(fd, batch->buffer, 4096), 4096); in dump_batch()
60 /* Write all -1 */
91 /* Write all -1 */
114 /* ctx->annotations is an array keeping a list of annotations of the in annotation_init()
115 * batch buffer ordered by offset. ctx->annotations[0] is thus left in annotation_init()
117 * the batch buffer with annotations_add_batch() */ in annotation_init()
118 ctx->index = 1; in annotation_init()
[all …]
/external/rust/crates/grpcio-sys/grpc/src/core/lib/transport/
Dmetadata_batch.cc9 * http://www.apache.org/licenses/LICENSE-2.0
37 GPR_ASSERT((list->head == nullptr) == (list->tail == nullptr)); in assert_valid_list()
38 if (!list->head) return; in assert_valid_list()
39 GPR_ASSERT(list->head->prev == nullptr); in assert_valid_list()
40 GPR_ASSERT(list->tail->next == nullptr); in assert_valid_list()
41 GPR_ASSERT((list->head == list->tail) == (list->head->next == nullptr)); in assert_valid_list()
44 for (l = list->head; l; l = l->next) { in assert_valid_list()
45 GPR_ASSERT(!GRPC_MDISNULL(l->md)); in assert_valid_list()
46 GPR_ASSERT((l->prev == nullptr) == (l == list->head)); in assert_valid_list()
47 GPR_ASSERT((l->next == nullptr) == (l == list->tail)); in assert_valid_list()
[all …]
/external/mesa3d/src/gallium/winsys/i915/drm/
Di915_drm_batchbuffer.c5 #include "drm-uapi/i915_drm.h"
30 i915_drm_batchbuffer(struct i915_winsys_batchbuffer *batch) in i915_drm_batchbuffer() argument
32 return (struct i915_drm_batchbuffer *)batch; in i915_drm_batchbuffer()
36 i915_drm_batchbuffer_reset(struct i915_drm_batchbuffer *batch) in i915_drm_batchbuffer_reset() argument
38 struct i915_drm_winsys *idws = i915_drm_winsys(batch->base.iws); in i915_drm_batchbuffer_reset()
40 if (batch->bo) in i915_drm_batchbuffer_reset()
41 drm_intel_bo_unreference(batch->bo); in i915_drm_batchbuffer_reset()
42 batch->bo = drm_intel_bo_alloc(idws->gem_manager, in i915_drm_batchbuffer_reset()
44 batch->actual_size, in i915_drm_batchbuffer_reset()
47 memset(batch->base.map, 0, batch->actual_size); in i915_drm_batchbuffer_reset()
[all …]
/external/tensorflow/tensorflow/core/kernels/batching_util/
Dbatch_scheduler_test.cc7 http://www.apache.org/licenses/LICENSE-2.0
41 Batch<FakeTask> batch; in TEST() local
43 EXPECT_EQ(0, batch.num_tasks()); in TEST()
44 EXPECT_TRUE(batch.empty()); in TEST()
45 EXPECT_EQ(0, batch.size()); in TEST()
46 EXPECT_FALSE(batch.IsClosed()); in TEST()
49 batch.AddTask(std::unique_ptr<FakeTask>(task0)); in TEST()
51 EXPECT_EQ(1, batch.num_tasks()); in TEST()
52 EXPECT_FALSE(batch.empty()); in TEST()
53 EXPECT_EQ(task0->size(), batch.size()); in TEST()
[all …]
/external/mesa3d/src/gallium/drivers/freedreno/a6xx/
Dfd6_gmem.c61 OUT_RELOC(ring, rsc->bo, fd_resource_ubwc_offset(rsc, level, layer), 0, 0); in fd6_emit_flag_reference()
63 A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH(fdl_ubwc_pitch(&rsc->layout, level)) | in fd6_emit_flag_reference()
64 A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH(rsc->layout.ubwc_layer_size >> 2)); in fd6_emit_flag_reference()
82 for (i = 0; i < pfb->nr_cbufs; i++) { in emit_mrt()
92 if (!pfb->cbufs[i]) in emit_mrt()
97 struct pipe_surface *psurf = pfb->cbufs[i]; in emit_mrt()
98 enum pipe_format pformat = psurf->format; in emit_mrt()
99 rsc = fd_resource(psurf->texture); in emit_mrt()
100 if (!rsc->bo) in emit_mrt()
103 uint32_t base = gmem ? gmem->cbuf_base[i] : 0; in emit_mrt()
[all …]
/external/igt-gpu-tools/tools/null_state_gen/
Dintel_batchbuffer.c23 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
43 assert(a->num_items < MAX_ITEMS); in bb_area_emit()
44 item = &a->item[a->num_items]; in bb_area_emit()
46 item->data = dword; in bb_area_emit()
47 item->type = type; in bb_area_emit()
48 strncpy(item->str, str, MAX_STRLEN); in bb_area_emit()
49 item->str[MAX_STRLEN - 1] = 0; in bb_area_emit()
51 a->num_items++; in bb_area_emit()
59 assert(a->num_items < MAX_ITEMS); in bb_area_emit_offset()
60 assert(i < a->num_items); in bb_area_emit_offset()
[all …]
Dintel_renderstate_gen7.c44 gen7_bind_buf_null(struct intel_batchbuffer *batch) in gen7_bind_buf_null() argument
46 return intel_batch_state_alloc(batch, 32, 32, "bind buf null"); in gen7_bind_buf_null()
50 gen7_emit_vertex_elements(struct intel_batchbuffer *batch) in gen7_emit_vertex_elements() argument
53 ((2 * (1 + 2)) + 1 - 2)); in gen7_emit_vertex_elements()
85 gen7_create_vertex_buffer(struct intel_batchbuffer *batch) in gen7_create_vertex_buffer() argument
89 return intel_batch_state_alloc(batch, 12*sizeof(*v), 8, "vertex buffer"); in gen7_create_vertex_buffer()
92 static void gen7_emit_vertex_buffer(struct intel_batchbuffer *batch) in gen7_emit_vertex_buffer() argument
96 offset = gen7_create_vertex_buffer(batch); in gen7_emit_vertex_buffer()
98 OUT_BATCH(GEN4_3DSTATE_VERTEX_BUFFERS | (5 - 2)); in gen7_emit_vertex_buffer()
105 OUT_RELOC_STATE(batch, I915_GEM_DOMAIN_VERTEX, 0, offset); in gen7_emit_vertex_buffer()
[all …]
/external/mesa3d/src/gallium/drivers/zink/
Dzink_batch.c17 reset_batch(struct zink_context *ctx, struct zink_batch *batch) in reset_batch() argument
19 struct zink_screen *screen = zink_screen(ctx->base.screen); in reset_batch()
20 batch->descs_left = ZINK_BATCH_DESC_SIZE; in reset_batch()
23 if (!batch->fence) in reset_batch()
26 zink_fence_finish(screen, batch->fence, PIPE_TIMEOUT_INFINITE); in reset_batch()
27 zink_fence_reference(screen, &batch->fence, NULL); in reset_batch()
29 zink_render_pass_reference(screen, &batch->rp, NULL); in reset_batch()
30 zink_framebuffer_reference(screen, &batch->fb, NULL); in reset_batch()
31 set_foreach(batch->programs, entry) { in reset_batch()
32 struct zink_gfx_program *prog = (struct zink_gfx_program*)entry->key; in reset_batch()
[all …]
/external/grpc-grpc/src/core/lib/transport/
Dmetadata_batch.cc9 * http://www.apache.org/licenses/LICENSE-2.0
37 GPR_ASSERT((list->head == nullptr) == (list->tail == nullptr)); in assert_valid_list()
38 if (!list->head) return; in assert_valid_list()
39 GPR_ASSERT(list->head->prev == nullptr); in assert_valid_list()
40 GPR_ASSERT(list->tail->next == nullptr); in assert_valid_list()
41 GPR_ASSERT((list->head == list->tail) == (list->head->next == nullptr)); in assert_valid_list()
44 for (l = list->head; l; l = l->next) { in assert_valid_list()
45 GPR_ASSERT(!GRPC_MDISNULL(l->md)); in assert_valid_list()
46 GPR_ASSERT((l->prev == nullptr) == (l == list->head)); in assert_valid_list()
47 GPR_ASSERT((l->next == nullptr) == (l == list->tail)); in assert_valid_list()
[all …]

12345678910>>...51