Home
last modified time | relevance | path

Searched refs:submit (Results 1 – 25 of 171) sorted by relevance

1234567

/kernel/linux/linux-5.10/drivers/gpu/drm/etnaviv/
Detnaviv_gem_submit.c34 struct etnaviv_gem_submit *submit; in submit_create() local
35 size_t sz = size_vstruct(nr_bos, sizeof(submit->bos[0]), sizeof(*submit)); in submit_create()
37 submit = kzalloc(sz, GFP_KERNEL); in submit_create()
38 if (!submit) in submit_create()
41 submit->pmrs = kcalloc(nr_pmrs, sizeof(struct etnaviv_perfmon_request), in submit_create()
43 if (!submit->pmrs) { in submit_create()
44 kfree(submit); in submit_create()
47 submit->nr_pmrs = nr_pmrs; in submit_create()
49 submit->gpu = gpu; in submit_create()
50 kref_init(&submit->refcount); in submit_create()
[all …]
Detnaviv_sched.c24 struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job); in etnaviv_sched_dependency() local
28 if (unlikely(submit->in_fence)) { in etnaviv_sched_dependency()
29 fence = submit->in_fence; in etnaviv_sched_dependency()
30 submit->in_fence = NULL; in etnaviv_sched_dependency()
38 for (i = 0; i < submit->nr_bos; i++) { in etnaviv_sched_dependency()
39 struct etnaviv_gem_submit_bo *bo = &submit->bos[i]; in etnaviv_sched_dependency()
74 struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job); in etnaviv_sched_run_job() local
78 fence = etnaviv_gpu_submit(submit); in etnaviv_sched_run_job()
80 dev_dbg(submit->gpu->dev, "skipping bad job\n"); in etnaviv_sched_run_job()
87 struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job); in etnaviv_sched_timedout_job() local
[all …]
Detnaviv_dump.c113 void etnaviv_core_dump(struct etnaviv_gem_submit *submit) in etnaviv_core_dump() argument
115 struct etnaviv_gpu *gpu = submit->gpu; in etnaviv_core_dump()
137 mmu_size + gpu->buffer.size + submit->cmdbuf.size; in etnaviv_core_dump()
140 for (i = 0; i < submit->nr_bos; i++) { in etnaviv_core_dump()
141 obj = submit->bos[i].obj; in etnaviv_core_dump()
179 submit->cmdbuf.vaddr, submit->cmdbuf.size, in etnaviv_core_dump()
180 etnaviv_cmdbuf_get_va(&submit->cmdbuf, in etnaviv_core_dump()
196 for (i = 0; i < submit->nr_bos; i++) { in etnaviv_core_dump()
201 obj = submit->bos[i].obj; in etnaviv_core_dump()
202 vram = submit->bos[i].mapping; in etnaviv_core_dump()
Detnaviv_gpu.c1262 const struct etnaviv_gem_submit *submit = event->submit; in sync_point_perfmon_sample() local
1265 for (i = 0; i < submit->nr_pmrs; i++) { in sync_point_perfmon_sample()
1266 const struct etnaviv_perfmon_request *pmr = submit->pmrs + i; in sync_point_perfmon_sample()
1269 etnaviv_perfmon_process(gpu, pmr, submit->exec_state); in sync_point_perfmon_sample()
1294 const struct etnaviv_gem_submit *submit = event->submit; in sync_point_perfmon_sample_post() local
1300 for (i = 0; i < submit->nr_pmrs; i++) { in sync_point_perfmon_sample_post()
1301 const struct etnaviv_perfmon_request *pmr = submit->pmrs + i; in sync_point_perfmon_sample_post()
1319 struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit) in etnaviv_gpu_submit() argument
1321 struct etnaviv_gpu *gpu = submit->gpu; in etnaviv_gpu_submit()
1326 if (!submit->runtime_resumed) { in etnaviv_gpu_submit()
[all …]
/kernel/linux/linux-5.10/drivers/gpu/drm/msm/
Dmsm_gem_submit.c34 struct msm_gem_submit *submit; in submit_create() local
35 uint64_t sz = struct_size(submit, bos, nr_bos) + in submit_create()
36 ((u64)nr_cmds * sizeof(submit->cmd[0])); in submit_create()
41 submit = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); in submit_create()
42 if (!submit) in submit_create()
45 submit->dev = dev; in submit_create()
46 submit->aspace = queue->ctx->aspace; in submit_create()
47 submit->gpu = gpu; in submit_create()
48 submit->fence = NULL; in submit_create()
49 submit->cmd = (void *)&submit->bos[nr_bos]; in submit_create()
[all …]
Dmsm_gpu.c344 struct msm_gem_submit *submit, char *comm, char *cmd) in msm_gpu_crashstate_capture() argument
364 if (submit) { in msm_gpu_crashstate_capture()
368 for (i = 0; i < submit->nr_bos; i++) in msm_gpu_crashstate_capture()
369 if (should_dump(submit, i)) in msm_gpu_crashstate_capture()
372 for (i = 0; i < submit->nr_cmds; i++) in msm_gpu_crashstate_capture()
373 if (!should_dump(submit, submit->cmd[i].idx)) in msm_gpu_crashstate_capture()
379 for (i = 0; state->bos && i < submit->nr_bos; i++) { in msm_gpu_crashstate_capture()
380 if (should_dump(submit, i)) { in msm_gpu_crashstate_capture()
381 msm_gpu_crashstate_get_bo(state, submit->bos[i].obj, in msm_gpu_crashstate_capture()
382 submit->bos[i].iova, submit->bos[i].flags); in msm_gpu_crashstate_capture()
[all …]
Dmsm_rd.c86 struct msm_gem_submit *submit; member
300 struct msm_gem_submit *submit, int idx, in snapshot_buf() argument
303 struct msm_gem_object *obj = submit->bos[idx].obj; in snapshot_buf()
308 offset = iova - submit->bos[idx].iova; in snapshot_buf()
310 iova = submit->bos[idx].iova; in snapshot_buf()
325 if (!(submit->bos[idx].flags & MSM_SUBMIT_BO_READ)) in snapshot_buf()
340 void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit, in msm_rd_dump_submit() argument
343 struct drm_device *dev = submit->dev; in msm_rd_dump_submit()
367 task = pid_task(submit->pid, PIDTYPE_PID); in msm_rd_dump_submit()
371 pid_nr(submit->pid), submit->seqno); in msm_rd_dump_submit()
[all …]
Dmsm_gpu_trace.h34 TP_PROTO(struct msm_gem_submit *submit, u64 ticks),
35 TP_ARGS(submit, ticks),
44 __entry->pid = pid_nr(submit->pid);
45 __entry->id = submit->ident;
46 __entry->ringid = submit->ring->id;
47 __entry->seqno = submit->seqno;
57 TP_PROTO(struct msm_gem_submit *submit, u64 elapsed, u64 clock,
59 TP_ARGS(submit, elapsed, clock, start, end),
71 __entry->pid = pid_nr(submit->pid);
72 __entry->id = submit->ident;
[all …]
/kernel/linux/linux-5.10/crypto/async_tx/
Dasync_raid6_recov.c20 size_t len, struct async_submit_ctl *submit) in async_sum_product() argument
22 struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ, in async_sum_product()
39 if (submit->flags & ASYNC_TX_FENCE) in async_sum_product()
58 async_tx_submit(chan, tx, submit); in async_sum_product()
70 async_tx_quiesce(&submit->depend_tx); in async_sum_product()
89 struct async_submit_ctl *submit) in async_mult() argument
91 struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ, in async_mult()
107 if (submit->flags & ASYNC_TX_FENCE) in async_mult()
128 async_tx_submit(chan, tx, submit); in async_mult()
141 async_tx_quiesce(&submit->depend_tx); in async_mult()
[all …]
Dasync_xor.c24 struct async_submit_ctl *submit) in do_async_xor() argument
28 dma_async_tx_callback cb_fn_orig = submit->cb_fn; in do_async_xor()
29 void *cb_param_orig = submit->cb_param; in do_async_xor()
30 enum async_tx_flags flags_orig = submit->flags; in do_async_xor()
40 submit->flags = flags_orig; in do_async_xor()
46 submit->flags &= ~ASYNC_TX_ACK; in do_async_xor()
47 submit->flags |= ASYNC_TX_FENCE; in do_async_xor()
48 submit->cb_fn = NULL; in do_async_xor()
49 submit->cb_param = NULL; in do_async_xor()
51 submit->cb_fn = cb_fn_orig; in do_async_xor()
[all …]
Dasync_pq.c39 struct async_submit_ctl *submit) in do_async_gen_syndrome() argument
43 enum async_tx_flags flags_orig = submit->flags; in do_async_gen_syndrome()
44 dma_async_tx_callback cb_fn_orig = submit->cb_fn; in do_async_gen_syndrome()
45 dma_async_tx_callback cb_param_orig = submit->cb_param; in do_async_gen_syndrome()
52 submit->flags = flags_orig; in do_async_gen_syndrome()
59 submit->flags &= ~ASYNC_TX_ACK; in do_async_gen_syndrome()
60 submit->flags |= ASYNC_TX_FENCE; in do_async_gen_syndrome()
61 submit->cb_fn = NULL; in do_async_gen_syndrome()
62 submit->cb_param = NULL; in do_async_gen_syndrome()
64 submit->cb_fn = cb_fn_orig; in do_async_gen_syndrome()
[all …]
Draid6test.c60 struct async_submit_ctl submit; in raid6_dual_recov() local
71 init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv); in raid6_dual_recov()
73 disks, bytes, &submit); in raid6_dual_recov()
91 init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL, in raid6_dual_recov()
93 tx = async_xor(dest, blocks, 0, count, bytes, &submit); in raid6_dual_recov()
95 init_async_submit(&submit, 0, tx, NULL, NULL, addr_conv); in raid6_dual_recov()
97 disks, bytes, &submit); in raid6_dual_recov()
102 init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv); in raid6_dual_recov()
104 faila, ptrs, offs, &submit); in raid6_dual_recov()
107 init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv); in raid6_dual_recov()
[all …]
Dasync_tx.c43 __async_tx_find_channel(struct async_submit_ctl *submit, in __async_tx_find_channel() argument
46 struct dma_async_tx_descriptor *depend_tx = submit->depend_tx; in __async_tx_find_channel()
144 struct async_submit_ctl *submit) in async_tx_submit() argument
146 struct dma_async_tx_descriptor *depend_tx = submit->depend_tx; in async_tx_submit()
148 tx->callback = submit->cb_fn; in async_tx_submit()
149 tx->callback_param = submit->cb_param; in async_tx_submit()
204 if (submit->flags & ASYNC_TX_ACK) in async_tx_submit()
221 async_trigger_callback(struct async_submit_ctl *submit) in async_trigger_callback() argument
226 struct dma_async_tx_descriptor *depend_tx = submit->depend_tx; in async_trigger_callback()
245 async_tx_submit(chan, tx, submit); in async_trigger_callback()
[all …]
Dasync_memcpy.c34 struct async_submit_ctl *submit) in async_memcpy() argument
36 struct dma_chan *chan = async_tx_find_channel(submit, DMA_MEMCPY, in async_memcpy()
48 if (submit->cb_fn) in async_memcpy()
50 if (submit->flags & ASYNC_TX_FENCE) in async_memcpy()
70 async_tx_submit(chan, tx, submit); in async_memcpy()
76 async_tx_quiesce(&submit->depend_tx); in async_memcpy()
86 async_tx_sync_epilog(submit); in async_memcpy()
/kernel/linux/linux-5.10/include/linux/
Dasync_tx.h103 __async_tx_find_channel(struct async_submit_ctl *submit,
118 async_tx_find_channel(struct async_submit_ctl *submit, in async_tx_find_channel() argument
133 async_tx_sync_epilog(struct async_submit_ctl *submit) in async_tx_sync_epilog() argument
135 if (submit->cb_fn) in async_tx_sync_epilog()
136 submit->cb_fn(submit->cb_param); in async_tx_sync_epilog()
159 struct async_submit_ctl *submit);
163 int src_cnt, size_t len, struct async_submit_ctl *submit);
168 int src_cnt, size_t len, struct async_submit_ctl *submit);
173 struct async_submit_ctl *submit);
179 struct async_submit_ctl *submit);
[all …]
/kernel/linux/linux-5.10/drivers/gpu/drm/lima/
Dlima_gem.c273 static int lima_gem_add_deps(struct drm_file *file, struct lima_submit *submit) in lima_gem_add_deps() argument
277 for (i = 0; i < ARRAY_SIZE(submit->in_sync); i++) { in lima_gem_add_deps()
280 if (!submit->in_sync[i]) in lima_gem_add_deps()
283 err = drm_syncobj_find_fence(file, submit->in_sync[i], in lima_gem_add_deps()
288 err = drm_gem_fence_array_add(&submit->task->deps, fence); in lima_gem_add_deps()
298 int lima_gem_submit(struct drm_file *file, struct lima_submit *submit) in lima_gem_submit() argument
306 struct lima_bo **bos = submit->lbos; in lima_gem_submit()
308 if (submit->out_sync) { in lima_gem_submit()
309 out_sync = drm_syncobj_find(file, submit->out_sync); in lima_gem_submit()
314 for (i = 0; i < submit->nr_bos; i++) { in lima_gem_submit()
[all …]
Dlima_drv.c110 struct lima_submit submit = {0}; in lima_ioctl_gem_submit() local
124 bos = kvcalloc(args->nr_bos, sizeof(*submit.bos) + sizeof(*submit.lbos), GFP_KERNEL); in lima_ioctl_gem_submit()
128 size = args->nr_bos * sizeof(*submit.bos); in lima_ioctl_gem_submit()
156 submit.pipe = args->pipe; in lima_ioctl_gem_submit()
157 submit.bos = bos; in lima_ioctl_gem_submit()
158 submit.lbos = (void *)bos + size; in lima_ioctl_gem_submit()
159 submit.nr_bos = args->nr_bos; in lima_ioctl_gem_submit()
160 submit.task = task; in lima_ioctl_gem_submit()
161 submit.ctx = ctx; in lima_ioctl_gem_submit()
162 submit.flags = args->flags; in lima_ioctl_gem_submit()
[all …]
/kernel/linux/linux-5.10/drivers/dma/ti/
Dcppi41.c119 u16 submit; member
157 [ 0] = { .submit = 32, .complete = 93},
158 [ 1] = { .submit = 34, .complete = 94},
159 [ 2] = { .submit = 36, .complete = 95},
160 [ 3] = { .submit = 38, .complete = 96},
161 [ 4] = { .submit = 40, .complete = 97},
162 [ 5] = { .submit = 42, .complete = 98},
163 [ 6] = { .submit = 44, .complete = 99},
164 [ 7] = { .submit = 46, .complete = 100},
165 [ 8] = { .submit = 48, .complete = 101},
[all …]
/kernel/linux/linux-5.10/fs/iomap/
Ddirect-io.c43 } submit; member
70 dio->submit.last_queue = bdev_get_queue(iomap->bdev); in iomap_dio_submit_bio()
72 dio->submit.cookie = dio->dops->submit_io( in iomap_dio_submit_bio()
76 dio->submit.cookie = submit_bio(bio); in iomap_dio_submit_bio()
163 struct task_struct *waiter = dio->submit.waiter; in iomap_dio_bio_end_io()
164 WRITE_ONCE(dio->submit.waiter, NULL); in iomap_dio_bio_end_io()
210 unsigned int align = iov_iter_alignment(dio->submit.iter); in iomap_dio_bio_actor()
250 orig_count = iov_iter_count(dio->submit.iter); in iomap_dio_bio_actor()
251 iov_iter_truncate(dio->submit.iter, length); in iomap_dio_bio_actor()
253 nr_pages = iov_iter_npages(dio->submit.iter, BIO_MAX_PAGES); in iomap_dio_bio_actor()
[all …]
/kernel/linux/linux-5.10/drivers/gpu/drm/msm/adreno/
Da2xx_gpu.c13 static void a2xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) in a2xx_submit() argument
16 struct msm_ringbuffer *ring = submit->ring; in a2xx_submit()
19 for (i = 0; i < submit->nr_cmds; i++) { in a2xx_submit()
20 switch (submit->cmd[i].type) { in a2xx_submit()
26 if (priv->lastctx == submit->queue->ctx) in a2xx_submit()
31 OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); in a2xx_submit()
32 OUT_RING(ring, submit->cmd[i].size); in a2xx_submit()
39 OUT_RING(ring, submit->seqno); in a2xx_submit()
48 OUT_RING(ring, submit->seqno); in a2xx_submit()
479 .submit = a2xx_submit,
Da5xx_gpu.c57 static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit) in a5xx_submit_in_rb() argument
60 struct msm_ringbuffer *ring = submit->ring; in a5xx_submit_in_rb()
65 for (i = 0; i < submit->nr_cmds; i++) { in a5xx_submit_in_rb()
66 switch (submit->cmd[i].type) { in a5xx_submit_in_rb()
70 if (priv->lastctx == submit->queue->ctx) in a5xx_submit_in_rb()
75 obj = submit->bos[submit->cmd[i].idx].obj; in a5xx_submit_in_rb()
76 dwords = submit->cmd[i].size; in a5xx_submit_in_rb()
112 ring->memptrs->fence = submit->seqno; in a5xx_submit_in_rb()
116 static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) in a5xx_submit() argument
121 struct msm_ringbuffer *ring = submit->ring; in a5xx_submit()
[all …]
Da3xx_gpu.c31 static void a3xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) in a3xx_submit() argument
34 struct msm_ringbuffer *ring = submit->ring; in a3xx_submit()
37 for (i = 0; i < submit->nr_cmds; i++) { in a3xx_submit()
38 switch (submit->cmd[i].type) { in a3xx_submit()
44 if (priv->lastctx == submit->queue->ctx) in a3xx_submit()
49 OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); in a3xx_submit()
50 OUT_RING(ring, submit->cmd[i].size); in a3xx_submit()
57 OUT_RING(ring, submit->seqno); in a3xx_submit()
74 OUT_RING(ring, submit->seqno); in a3xx_submit()
494 .submit = a3xx_submit,
Da4xx_gpu.c25 static void a4xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) in a4xx_submit() argument
28 struct msm_ringbuffer *ring = submit->ring; in a4xx_submit()
31 for (i = 0; i < submit->nr_cmds; i++) { in a4xx_submit()
32 switch (submit->cmd[i].type) { in a4xx_submit()
38 if (priv->lastctx == submit->queue->ctx) in a4xx_submit()
43 OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); in a4xx_submit()
44 OUT_RING(ring, submit->cmd[i].size); in a4xx_submit()
51 OUT_RING(ring, submit->seqno); in a4xx_submit()
68 OUT_RING(ring, submit->seqno); in a4xx_submit()
629 .submit = a4xx_submit,
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/selftests/
Di915_active.c80 struct i915_sw_fence *submit; in __live_active_setup() local
89 submit = heap_fence_create(GFP_KERNEL); in __live_active_setup()
90 if (!submit) { in __live_active_setup()
108 err = i915_sw_fence_await_sw_fence_gfp(&rq->submit, in __live_active_setup()
109 submit, in __live_active_setup()
134 i915_sw_fence_commit(submit); in __live_active_setup()
135 heap_fence_put(submit); in __live_active_setup()
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/
Di915_request.c121 i915_sw_fence_fini(&rq->submit); in i915_fence_release()
183 cb->hook(container_of(cb->fence, struct i915_request, submit), in irq_execute_cb_hook()
283 GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit)); in i915_request_retire()
440 cb->fence = &rq->submit; in __await_execution()
675 container_of(fence, typeof(*request), submit); in submit_notify()
776 i915_sw_fence_init(&rq->submit, submit_notify); in __i915_request_ctor()
856 i915_sw_fence_reinit(&i915_request_get(rq)->submit); in __i915_request_create()
997 err = i915_sw_fence_await_dma_fence(&rq->submit, in i915_request_await_start()
1080 struct i915_sw_fence *wait = &to->submit; in emit_semaphore_wait()
1227 return i915_sw_fence_await_dma_fence(&rq->submit, fence, in __i915_request_await_external()
[all …]

1234567