Lines Matching full:job
10 * scheduler will round-robin between clients to submit the next job.
13 * jobs when bulk background jobs are queued up, we submit a new job
60 struct v3d_job *job = to_v3d_job(sched_job); in v3d_job_free() local
63 v3d_job_put(job); in v3d_job_free()
67 * Returns the fences that the job depends on, one by one.
76 struct v3d_job *job = to_v3d_job(sched_job); in v3d_job_dependency() local
82 if (!xa_empty(&job->deps)) in v3d_job_dependency()
83 return xa_erase(&job->deps, job->last_dep++); in v3d_job_dependency()
90 struct v3d_bin_job *job = to_bin_job(sched_job); in v3d_bin_job_run() local
91 struct v3d_dev *v3d = job->base.v3d; in v3d_bin_job_run()
96 if (unlikely(job->base.base.s_fence->finished.error)) in v3d_bin_job_run()
103 v3d->bin_job = job; in v3d_bin_job_run()
105 * reuse the overflow attached to a previous job. in v3d_bin_job_run()
116 if (job->base.irq_fence) in v3d_bin_job_run()
117 dma_fence_put(job->base.irq_fence); in v3d_bin_job_run()
118 job->base.irq_fence = dma_fence_get(fence); in v3d_bin_job_run()
121 job->start, job->end); in v3d_bin_job_run()
124 * Writing the end register is what starts the job. in v3d_bin_job_run()
126 if (job->qma) { in v3d_bin_job_run()
127 V3D_CORE_WRITE(0, V3D_CLE_CT0QMA, job->qma); in v3d_bin_job_run()
128 V3D_CORE_WRITE(0, V3D_CLE_CT0QMS, job->qms); in v3d_bin_job_run()
130 if (job->qts) { in v3d_bin_job_run()
133 job->qts); in v3d_bin_job_run()
135 V3D_CORE_WRITE(0, V3D_CLE_CT0QBA, job->start); in v3d_bin_job_run()
136 V3D_CORE_WRITE(0, V3D_CLE_CT0QEA, job->end); in v3d_bin_job_run()
143 struct v3d_render_job *job = to_render_job(sched_job); in v3d_render_job_run() local
144 struct v3d_dev *v3d = job->base.v3d; in v3d_render_job_run()
148 if (unlikely(job->base.base.s_fence->finished.error)) in v3d_render_job_run()
151 v3d->render_job = job; in v3d_render_job_run()
165 if (job->base.irq_fence) in v3d_render_job_run()
166 dma_fence_put(job->base.irq_fence); in v3d_render_job_run()
167 job->base.irq_fence = dma_fence_get(fence); in v3d_render_job_run()
170 job->start, job->end); in v3d_render_job_run()
175 * Writing the end register is what starts the job. in v3d_render_job_run()
177 V3D_CORE_WRITE(0, V3D_CLE_CT1QBA, job->start); in v3d_render_job_run()
178 V3D_CORE_WRITE(0, V3D_CLE_CT1QEA, job->end); in v3d_render_job_run()
186 struct v3d_tfu_job *job = to_tfu_job(sched_job); in v3d_tfu_job_run() local
187 struct v3d_dev *v3d = job->base.v3d; in v3d_tfu_job_run()
195 v3d->tfu_job = job; in v3d_tfu_job_run()
196 if (job->base.irq_fence) in v3d_tfu_job_run()
197 dma_fence_put(job->base.irq_fence); in v3d_tfu_job_run()
198 job->base.irq_fence = dma_fence_get(fence); in v3d_tfu_job_run()
202 V3D_WRITE(V3D_TFU_IIA, job->args.iia); in v3d_tfu_job_run()
203 V3D_WRITE(V3D_TFU_IIS, job->args.iis); in v3d_tfu_job_run()
204 V3D_WRITE(V3D_TFU_ICA, job->args.ica); in v3d_tfu_job_run()
205 V3D_WRITE(V3D_TFU_IUA, job->args.iua); in v3d_tfu_job_run()
206 V3D_WRITE(V3D_TFU_IOA, job->args.ioa); in v3d_tfu_job_run()
207 V3D_WRITE(V3D_TFU_IOS, job->args.ios); in v3d_tfu_job_run()
208 V3D_WRITE(V3D_TFU_COEF0, job->args.coef[0]); in v3d_tfu_job_run()
209 if (job->args.coef[0] & V3D_TFU_COEF0_USECOEF) { in v3d_tfu_job_run()
210 V3D_WRITE(V3D_TFU_COEF1, job->args.coef[1]); in v3d_tfu_job_run()
211 V3D_WRITE(V3D_TFU_COEF2, job->args.coef[2]); in v3d_tfu_job_run()
212 V3D_WRITE(V3D_TFU_COEF3, job->args.coef[3]); in v3d_tfu_job_run()
214 /* ICFG kicks off the job. */ in v3d_tfu_job_run()
215 V3D_WRITE(V3D_TFU_ICFG, job->args.icfg | V3D_TFU_ICFG_IOC); in v3d_tfu_job_run()
223 struct v3d_csd_job *job = to_csd_job(sched_job); in v3d_csd_job_run() local
224 struct v3d_dev *v3d = job->base.v3d; in v3d_csd_job_run()
229 v3d->csd_job = job; in v3d_csd_job_run()
237 if (job->base.irq_fence) in v3d_csd_job_run()
238 dma_fence_put(job->base.irq_fence); in v3d_csd_job_run()
239 job->base.irq_fence = dma_fence_get(fence); in v3d_csd_job_run()
244 V3D_CORE_WRITE(0, V3D_CSD_QUEUED_CFG0 + 4 * i, job->args.cfg[i]); in v3d_csd_job_run()
245 /* CFG0 write kicks off the job. */ in v3d_csd_job_run()
246 V3D_CORE_WRITE(0, V3D_CSD_QUEUED_CFG0, job->args.cfg[0]); in v3d_csd_job_run()
254 struct v3d_job *job = to_v3d_job(sched_job); in v3d_cache_clean_job_run() local
255 struct v3d_dev *v3d = job->v3d; in v3d_cache_clean_job_run()
299 struct v3d_job *job = to_v3d_job(sched_job); in v3d_cl_job_timedout() local
300 struct v3d_dev *v3d = job->v3d; in v3d_cl_job_timedout()
316 struct v3d_bin_job *job = to_bin_job(sched_job); in v3d_bin_job_timedout() local
319 &job->timedout_ctca, &job->timedout_ctra); in v3d_bin_job_timedout()
325 struct v3d_render_job *job = to_render_job(sched_job); in v3d_render_job_timedout() local
328 &job->timedout_ctca, &job->timedout_ctra); in v3d_render_job_timedout()
334 struct v3d_job *job = to_v3d_job(sched_job); in v3d_generic_job_timedout() local
336 v3d_gpu_reset_for_timeout(job->v3d, sched_job); in v3d_generic_job_timedout()
342 struct v3d_csd_job *job = to_csd_job(sched_job); in v3d_csd_job_timedout() local
343 struct v3d_dev *v3d = job->base.v3d; in v3d_csd_job_timedout()
349 if (job->timedout_batches != batches) { in v3d_csd_job_timedout()
350 job->timedout_batches = batches; in v3d_csd_job_timedout()