Lines Matching +full:- +full:- +full:batch
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
51 * @title: Batch Buffer
61 * structure called batch is in scope. The basic macros are #BEGIN_BATCH,
64 * Note that this library's header pulls in the [i-g-t core](igt-gpu-tools-i-g-t-core.html)
70 * @batch: batchbuffer object
73 * Aligns the current in-batch offset to the given value.
78 intel_batchbuffer_align(struct intel_batchbuffer *batch, uint32_t align) in intel_batchbuffer_align() argument
80 uint32_t offset = batch->ptr - batch->buffer; in intel_batchbuffer_align()
83 batch->ptr = batch->buffer + offset; in intel_batchbuffer_align()
89 * @batch: batchbuffer object
93 * Verify if sufficient @size within @batch is available to deny overflow.
94 * Then allocate @size bytes within @batch.
96 * Returns: Offset within @batch between allocated subdata and base of @batch.
99 intel_batchbuffer_subdata_alloc(struct intel_batchbuffer *batch, uint32_t size, in intel_batchbuffer_subdata_alloc() argument
102 uint32_t offset = intel_batchbuffer_align(batch, align); in intel_batchbuffer_subdata_alloc()
104 igt_assert(size <= intel_batchbuffer_space(batch)); in intel_batchbuffer_subdata_alloc()
106 batch->ptr += size; in intel_batchbuffer_subdata_alloc()
107 return memset(batch->buffer + offset, 0, size); in intel_batchbuffer_subdata_alloc()
112 * @batch: batchbuffer object
115 * Returns: Offset within @batch between @ptr and base of @batch.
118 intel_batchbuffer_subdata_offset(struct intel_batchbuffer *batch, void *ptr) in intel_batchbuffer_subdata_offset() argument
120 return (uint8_t *)ptr - batch->buffer; in intel_batchbuffer_subdata_offset()
125 * @batch: batchbuffer object
127 * Resets @batch by allocating a new gem buffer object as backing storage.
130 intel_batchbuffer_reset(struct intel_batchbuffer *batch) in intel_batchbuffer_reset() argument
132 if (batch->bo != NULL) { in intel_batchbuffer_reset()
133 drm_intel_bo_unreference(batch->bo); in intel_batchbuffer_reset()
134 batch->bo = NULL; in intel_batchbuffer_reset()
137 batch->bo = drm_intel_bo_alloc(batch->bufmgr, "batchbuffer", in intel_batchbuffer_reset()
140 memset(batch->buffer, 0, sizeof(batch->buffer)); in intel_batchbuffer_reset()
141 batch->ctx = NULL; in intel_batchbuffer_reset()
143 batch->ptr = batch->buffer; in intel_batchbuffer_reset()
144 batch->end = NULL; in intel_batchbuffer_reset()
160 struct intel_batchbuffer *batch = calloc(sizeof(*batch), 1); in intel_batchbuffer_alloc() local
162 batch->bufmgr = bufmgr; in intel_batchbuffer_alloc()
163 batch->devid = devid; in intel_batchbuffer_alloc()
164 batch->gen = intel_gen(devid); in intel_batchbuffer_alloc()
165 intel_batchbuffer_reset(batch); in intel_batchbuffer_alloc()
167 return batch; in intel_batchbuffer_alloc()
172 * @batch: batchbuffer object
174 * Releases all resource of the batchbuffer object @batch.
177 intel_batchbuffer_free(struct intel_batchbuffer *batch) in intel_batchbuffer_free() argument
179 drm_intel_bo_unreference(batch->bo); in intel_batchbuffer_free()
180 batch->bo = NULL; in intel_batchbuffer_free()
181 free(batch); in intel_batchbuffer_free()
187 flush_on_ring_common(struct intel_batchbuffer *batch, int ring) in flush_on_ring_common() argument
189 unsigned int used = batch->ptr - batch->buffer; in flush_on_ring_common()
194 if (IS_GEN5(batch->devid)) { in flush_on_ring_common()
195 /* emit gen5 w/a without batch space checks - we reserve that in flush_on_ring_common()
197 *(uint32_t *) (batch->ptr) = CMD_POLY_STIPPLE_OFFSET << 16; in flush_on_ring_common()
198 batch->ptr += 4; in flush_on_ring_common()
199 *(uint32_t *) (batch->ptr) = 0; in flush_on_ring_common()
200 batch->ptr += 4; in flush_on_ring_common()
205 *(uint32_t *) (batch->ptr) = 0; /* noop */ in flush_on_ring_common()
206 batch->ptr += 4; in flush_on_ring_common()
210 *(uint32_t *)(batch->ptr) = MI_BATCH_BUFFER_END; /* noop */ in flush_on_ring_common()
211 batch->ptr += 4; in flush_on_ring_common()
212 return batch->ptr - batch->buffer; in flush_on_ring_common()
217 * @batch: batchbuffer object
220 * Submits the batch for execution on @ring.
223 intel_batchbuffer_flush_on_ring(struct intel_batchbuffer *batch, int ring) in intel_batchbuffer_flush_on_ring() argument
225 unsigned int used = flush_on_ring_common(batch, ring); in intel_batchbuffer_flush_on_ring()
231 do_or_die(drm_intel_bo_subdata(batch->bo, 0, used, batch->buffer)); in intel_batchbuffer_flush_on_ring()
233 batch->ptr = NULL; in intel_batchbuffer_flush_on_ring()
236 ctx = batch->ctx; in intel_batchbuffer_flush_on_ring()
239 do_or_die(drm_intel_gem_bo_context_exec(batch->bo, ctx, used, ring)); in intel_batchbuffer_flush_on_ring()
241 intel_batchbuffer_reset(batch); in intel_batchbuffer_flush_on_ring()
245 intel_batchbuffer_set_context(struct intel_batchbuffer *batch, in intel_batchbuffer_set_context() argument
248 batch->ctx = context; in intel_batchbuffer_set_context()
253 * @batch: batchbuffer object
256 * Submits the batch for execution on the render engine with the supplied
260 intel_batchbuffer_flush_with_context(struct intel_batchbuffer *batch, in intel_batchbuffer_flush_with_context() argument
264 unsigned int used = flush_on_ring_common(batch, I915_EXEC_RENDER); in intel_batchbuffer_flush_with_context()
269 ret = drm_intel_bo_subdata(batch->bo, 0, used, batch->buffer); in intel_batchbuffer_flush_with_context()
272 batch->ptr = NULL; in intel_batchbuffer_flush_with_context()
274 ret = drm_intel_gem_bo_context_exec(batch->bo, context, used, in intel_batchbuffer_flush_with_context()
278 intel_batchbuffer_reset(batch); in intel_batchbuffer_flush_with_context()
283 * @batch: batchbuffer object
285 * Submits the batch for execution on the blitter engine, selecting the right
289 intel_batchbuffer_flush(struct intel_batchbuffer *batch) in intel_batchbuffer_flush() argument
292 if (HAS_BLT_RING(batch->devid)) in intel_batchbuffer_flush()
294 intel_batchbuffer_flush_on_ring(batch, ring); in intel_batchbuffer_flush()
300 * @batch: batchbuffer object
307 * Emits both a libdrm relocation entry pointing at @buffer and the pre-computed
308 * DWORD of @batch's presumed gpu address plus the supplied @delta into @batch.
315 intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch, in intel_batchbuffer_emit_reloc() argument
323 if (batch->ptr - batch->buffer > BATCH_SZ) in intel_batchbuffer_emit_reloc()
325 batch->ptr, batch->buffer, in intel_batchbuffer_emit_reloc()
326 (int)(batch->ptr - batch->buffer), BATCH_SZ); in intel_batchbuffer_emit_reloc()
329 ret = drm_intel_bo_emit_reloc_fence(batch->bo, batch->ptr - batch->buffer, in intel_batchbuffer_emit_reloc()
333 ret = drm_intel_bo_emit_reloc(batch->bo, batch->ptr - batch->buffer, in intel_batchbuffer_emit_reloc()
337 offset = buffer->offset64; in intel_batchbuffer_emit_reloc()
339 intel_batchbuffer_emit_dword(batch, offset); in intel_batchbuffer_emit_reloc()
340 if (batch->gen >= 8) in intel_batchbuffer_emit_reloc()
341 intel_batchbuffer_emit_dword(batch, offset >> 32); in intel_batchbuffer_emit_reloc()
347 * @batch: batchbuffer object
354 * confirm that there is enough space in the batch for the data to be
360 intel_batchbuffer_copy_data(struct intel_batchbuffer *batch, in intel_batchbuffer_copy_data() argument
367 subdata = intel_batchbuffer_subdata_alloc(batch, bytes, align); in intel_batchbuffer_copy_data()
370 return intel_batchbuffer_subdata_offset(batch, subdata); in intel_batchbuffer_copy_data()
380 * @batch: batchbuffer object
382 * @src_x1: source pixel x-coordination
383 * @src_y1: source pixel y-coordination
386 * @dst_x1: destination pixel x-coordination
387 * @dst_y1: destination pixel y-coordination
393 * This emits a 2D copy operation using blitter commands into the supplied batch
397 intel_blt_copy(struct intel_batchbuffer *batch, in intel_blt_copy() argument
402 const int gen = batch->gen; in intel_blt_copy()
409 igt_assert(src_pitch * (src_y1 + height) <= src_bo->size); in intel_blt_copy()
410 igt_assert(dst_pitch * (dst_y1 + height) <= dst_bo->size); in intel_blt_copy()
476 intel_batchbuffer_flush(batch); in intel_blt_copy()
481 * @batch: batchbuffer object
486 * This emits a copy operation using blitter commands into the supplied batch
488 * over to @dst_bo. Note that @size must be page-aligned.
491 intel_copy_bo(struct intel_batchbuffer *batch, in intel_copy_bo() argument
497 intel_blt_copy(batch, in intel_copy_bo()
505 * @buf: the i-g-t buffer object
507 * Computes the width in 32-bit pixels of the given buffer.
514 return buf->stride/(buf->bpp / 8); in igt_buf_width()
519 * @buf: the i-g-t buffer object
521 * Computes the height in 32-bit pixels of the given buffer.
528 return buf->size/buf->stride; in igt_buf_height()
625 reloc->target_handle = gem_handle; in fill_relocation()
626 reloc->delta = delta; in fill_relocation()
627 reloc->offset = offset * sizeof(uint32_t); in fill_relocation()
628 reloc->presumed_offset = 0; in fill_relocation()
629 reloc->read_domains = read_domains; in fill_relocation()
630 reloc->write_domain = write_domains; in fill_relocation()
638 obj->handle = gem_handle; in fill_object()
639 obj->relocation_count = count; in fill_object()
640 obj->relocs_ptr = to_user_pointer(relocs); in fill_object()
705 uint32_t batch[12]; in igt_blitter_fast_copy__raw() local
725 batch[i++] = dword0; in igt_blitter_fast_copy__raw()
726 batch[i++] = dword1 | dst_pitch; in igt_blitter_fast_copy__raw()
727 batch[i++] = (dst_y << 16) | dst_x; /* dst x1,y1 */ in igt_blitter_fast_copy__raw()
728 batch[i++] = ((dst_y + height) << 16) | (dst_x + width); /* dst x2,y2 */ in igt_blitter_fast_copy__raw()
729 batch[i++] = dst_delta; /* dst address lower bits */ in igt_blitter_fast_copy__raw()
730 batch[i++] = 0; /* dst address upper bits */ in igt_blitter_fast_copy__raw()
731 batch[i++] = (src_y << 16) | src_x; /* src x1,y1 */ in igt_blitter_fast_copy__raw()
732 batch[i++] = src_pitch; in igt_blitter_fast_copy__raw()
733 batch[i++] = src_delta; /* src address lower bits */ in igt_blitter_fast_copy__raw()
734 batch[i++] = 0; /* src address upper bits */ in igt_blitter_fast_copy__raw()
735 batch[i++] = MI_BATCH_BUFFER_END; in igt_blitter_fast_copy__raw()
736 batch[i++] = MI_NOOP; in igt_blitter_fast_copy__raw()
738 igt_assert(i == ARRAY_SIZE(batch)); in igt_blitter_fast_copy__raw()
741 gem_write(fd, batch_handle, 0, batch, sizeof(batch)); in igt_blitter_fast_copy__raw()
751 exec_blit(fd, objs, 3, ARRAY_SIZE(batch)); in igt_blitter_fast_copy__raw()
758 * @batch: batchbuffer object
759 * @src: source i-g-t buffer object
760 * @src_delta: offset into the source i-g-t bo
761 * @src_x: source pixel x-coordination
762 * @src_y: source pixel y-coordination
765 * @dst: destination i-g-t buffer object
766 * @dst_delta: offset into the destination i-g-t bo
767 * @dst_x: destination pixel x-coordination
768 * @dst_y: destination pixel y-coordination
774 void igt_blitter_fast_copy(struct intel_batchbuffer *batch, in igt_blitter_fast_copy() argument
785 igt_assert(src->bpp == dst->bpp); in igt_blitter_fast_copy()
787 src_pitch = fast_copy_pitch(src->stride, src->tiling); in igt_blitter_fast_copy()
788 dst_pitch = fast_copy_pitch(dst->stride, src->tiling); in igt_blitter_fast_copy()
789 dword0 = fast_copy_dword0(src->tiling, dst->tiling); in igt_blitter_fast_copy()
790 dword1 = fast_copy_dword1(src->tiling, dst->tiling, dst->bpp); in igt_blitter_fast_copy()
804 OUT_RELOC(dst->bo, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, dst_delta); in igt_blitter_fast_copy()
808 OUT_RELOC(src->bo, I915_GEM_DOMAIN_RENDER, 0, src_delta); in igt_blitter_fast_copy()
812 intel_batchbuffer_flush(batch); in igt_blitter_fast_copy()
823 * The platform-specific render copy function pointer for the device
857 * The platform-specific media fill function pointer for the device specified
889 * The platform-specific gpgpu fill function pointer for the device specified
914 * The platform-specific media spin function pointer for the device specified