• Home
  • Raw
  • Download

Lines Matching full:job

10  * scheduler will round-robin between clients to submit the next job.
13 * jobs when bulk background jobs are queued up, we submit a new job
60 struct v3d_job *job = to_v3d_job(sched_job); in v3d_sched_job_free() local
62 v3d_job_cleanup(job); in v3d_sched_job_free()
66 v3d_switch_perfmon(struct v3d_dev *v3d, struct v3d_job *job) in v3d_switch_perfmon() argument
68 if (job->perfmon != v3d->active_perfmon) in v3d_switch_perfmon()
71 if (job->perfmon && v3d->active_perfmon != job->perfmon) in v3d_switch_perfmon()
72 v3d_perfmon_start(v3d, job->perfmon); in v3d_switch_perfmon()
77 struct v3d_bin_job *job = to_bin_job(sched_job); in v3d_bin_job_run() local
78 struct v3d_dev *v3d = job->base.v3d; in v3d_bin_job_run()
83 if (unlikely(job->base.base.s_fence->finished.error)) in v3d_bin_job_run()
90 v3d->bin_job = job; in v3d_bin_job_run()
92 * reuse the overflow attached to a previous job. in v3d_bin_job_run()
103 if (job->base.irq_fence) in v3d_bin_job_run()
104 dma_fence_put(job->base.irq_fence); in v3d_bin_job_run()
105 job->base.irq_fence = dma_fence_get(fence); in v3d_bin_job_run()
108 job->start, job->end); in v3d_bin_job_run()
110 v3d_switch_perfmon(v3d, &job->base); in v3d_bin_job_run()
113 * Writing the end register is what starts the job. in v3d_bin_job_run()
115 if (job->qma) { in v3d_bin_job_run()
116 V3D_CORE_WRITE(0, V3D_CLE_CT0QMA, job->qma); in v3d_bin_job_run()
117 V3D_CORE_WRITE(0, V3D_CLE_CT0QMS, job->qms); in v3d_bin_job_run()
119 if (job->qts) { in v3d_bin_job_run()
122 job->qts); in v3d_bin_job_run()
124 V3D_CORE_WRITE(0, V3D_CLE_CT0QBA, job->start); in v3d_bin_job_run()
125 V3D_CORE_WRITE(0, V3D_CLE_CT0QEA, job->end); in v3d_bin_job_run()
132 struct v3d_render_job *job = to_render_job(sched_job); in v3d_render_job_run() local
133 struct v3d_dev *v3d = job->base.v3d; in v3d_render_job_run()
137 if (unlikely(job->base.base.s_fence->finished.error)) in v3d_render_job_run()
140 v3d->render_job = job; in v3d_render_job_run()
154 if (job->base.irq_fence) in v3d_render_job_run()
155 dma_fence_put(job->base.irq_fence); in v3d_render_job_run()
156 job->base.irq_fence = dma_fence_get(fence); in v3d_render_job_run()
159 job->start, job->end); in v3d_render_job_run()
161 v3d_switch_perfmon(v3d, &job->base); in v3d_render_job_run()
166 * Writing the end register is what starts the job. in v3d_render_job_run()
168 V3D_CORE_WRITE(0, V3D_CLE_CT1QBA, job->start); in v3d_render_job_run()
169 V3D_CORE_WRITE(0, V3D_CLE_CT1QEA, job->end); in v3d_render_job_run()
177 struct v3d_tfu_job *job = to_tfu_job(sched_job); in v3d_tfu_job_run() local
178 struct v3d_dev *v3d = job->base.v3d; in v3d_tfu_job_run()
182 if (unlikely(job->base.base.s_fence->finished.error)) in v3d_tfu_job_run()
185 v3d->tfu_job = job; in v3d_tfu_job_run()
191 if (job->base.irq_fence) in v3d_tfu_job_run()
192 dma_fence_put(job->base.irq_fence); in v3d_tfu_job_run()
193 job->base.irq_fence = dma_fence_get(fence); in v3d_tfu_job_run()
197 V3D_WRITE(V3D_TFU_IIA, job->args.iia); in v3d_tfu_job_run()
198 V3D_WRITE(V3D_TFU_IIS, job->args.iis); in v3d_tfu_job_run()
199 V3D_WRITE(V3D_TFU_ICA, job->args.ica); in v3d_tfu_job_run()
200 V3D_WRITE(V3D_TFU_IUA, job->args.iua); in v3d_tfu_job_run()
201 V3D_WRITE(V3D_TFU_IOA, job->args.ioa); in v3d_tfu_job_run()
202 V3D_WRITE(V3D_TFU_IOS, job->args.ios); in v3d_tfu_job_run()
203 V3D_WRITE(V3D_TFU_COEF0, job->args.coef[0]); in v3d_tfu_job_run()
204 if (job->args.coef[0] & V3D_TFU_COEF0_USECOEF) { in v3d_tfu_job_run()
205 V3D_WRITE(V3D_TFU_COEF1, job->args.coef[1]); in v3d_tfu_job_run()
206 V3D_WRITE(V3D_TFU_COEF2, job->args.coef[2]); in v3d_tfu_job_run()
207 V3D_WRITE(V3D_TFU_COEF3, job->args.coef[3]); in v3d_tfu_job_run()
209 /* ICFG kicks off the job. */ in v3d_tfu_job_run()
210 V3D_WRITE(V3D_TFU_ICFG, job->args.icfg | V3D_TFU_ICFG_IOC); in v3d_tfu_job_run()
218 struct v3d_csd_job *job = to_csd_job(sched_job); in v3d_csd_job_run() local
219 struct v3d_dev *v3d = job->base.v3d; in v3d_csd_job_run()
224 if (unlikely(job->base.base.s_fence->finished.error)) in v3d_csd_job_run()
227 v3d->csd_job = job; in v3d_csd_job_run()
235 if (job->base.irq_fence) in v3d_csd_job_run()
236 dma_fence_put(job->base.irq_fence); in v3d_csd_job_run()
237 job->base.irq_fence = dma_fence_get(fence); in v3d_csd_job_run()
241 v3d_switch_perfmon(v3d, &job->base); in v3d_csd_job_run()
244 V3D_CORE_WRITE(0, V3D_CSD_QUEUED_CFG0 + 4 * i, job->args.cfg[i]); in v3d_csd_job_run()
245 /* CFG0 write kicks off the job. */ in v3d_csd_job_run()
246 V3D_CORE_WRITE(0, V3D_CSD_QUEUED_CFG0, job->args.cfg[0]); in v3d_csd_job_run()
254 struct v3d_job *job = to_v3d_job(sched_job); in v3d_cache_clean_job_run() local
255 struct v3d_dev *v3d = job->v3d; in v3d_cache_clean_job_run()
301 struct v3d_job *job = to_v3d_job(sched_job); in v3d_cl_job_timedout() local
302 struct v3d_dev *v3d = job->v3d; in v3d_cl_job_timedout()
318 struct v3d_bin_job *job = to_bin_job(sched_job); in v3d_bin_job_timedout() local
321 &job->timedout_ctca, &job->timedout_ctra); in v3d_bin_job_timedout()
327 struct v3d_render_job *job = to_render_job(sched_job); in v3d_render_job_timedout() local
330 &job->timedout_ctca, &job->timedout_ctra); in v3d_render_job_timedout()
336 struct v3d_job *job = to_v3d_job(sched_job); in v3d_generic_job_timedout() local
338 return v3d_gpu_reset_for_timeout(job->v3d, sched_job); in v3d_generic_job_timedout()
344 struct v3d_csd_job *job = to_csd_job(sched_job); in v3d_csd_job_timedout() local
345 struct v3d_dev *v3d = job->base.v3d; in v3d_csd_job_timedout()
351 if (job->timedout_batches != batches) { in v3d_csd_job_timedout()
352 job->timedout_batches = batches; in v3d_csd_job_timedout()