1 // SPDX-License-Identifier: GPL-2.0+
2 /* Copyright (C) 2014-2018 Broadcom */
3
4 #include <linux/device.h>
5 #include <linux/dma-mapping.h>
6 #include <linux/io.h>
7 #include <linux/module.h>
8 #include <linux/platform_device.h>
9 #include <linux/pm_runtime.h>
10 #include <linux/reset.h>
11 #include <linux/sched/signal.h>
12 #include <linux/uaccess.h>
13
14 #include <drm/drm_syncobj.h>
15 #include <uapi/drm/v3d_drm.h>
16
17 #include "v3d_drv.h"
18 #include "v3d_regs.h"
19 #include "v3d_trace.h"
20
21 static void
v3d_init_core(struct v3d_dev * v3d,int core)22 v3d_init_core(struct v3d_dev *v3d, int core)
23 {
24 /* Set OVRTMUOUT, which means that the texture sampler uniform
25 * configuration's tmu output type field is used, instead of
26 * using the hardware default behavior based on the texture
27 * type. If you want the default behavior, you can still put
28 * "2" in the indirect texture state's output_type field.
29 */
30 if (v3d->ver < 40)
31 V3D_CORE_WRITE(core, V3D_CTL_MISCCFG, V3D_MISCCFG_OVRTMUOUT);
32
33 /* Whenever we flush the L2T cache, we always want to flush
34 * the whole thing.
35 */
36 V3D_CORE_WRITE(core, V3D_CTL_L2TFLSTA, 0);
37 V3D_CORE_WRITE(core, V3D_CTL_L2TFLEND, ~0);
38 }
39
40 /* Sets invariant state for the HW. */
41 static void
v3d_init_hw_state(struct v3d_dev * v3d)42 v3d_init_hw_state(struct v3d_dev *v3d)
43 {
44 v3d_init_core(v3d, 0);
45 }
46
47 static void
v3d_idle_axi(struct v3d_dev * v3d,int core)48 v3d_idle_axi(struct v3d_dev *v3d, int core)
49 {
50 V3D_CORE_WRITE(core, V3D_GMP_CFG, V3D_GMP_CFG_STOP_REQ);
51
52 if (wait_for((V3D_CORE_READ(core, V3D_GMP_STATUS) &
53 (V3D_GMP_STATUS_RD_COUNT_MASK |
54 V3D_GMP_STATUS_WR_COUNT_MASK |
55 V3D_GMP_STATUS_CFG_BUSY)) == 0, 100)) {
56 DRM_ERROR("Failed to wait for safe GMP shutdown\n");
57 }
58 }
59
60 static void
v3d_idle_gca(struct v3d_dev * v3d)61 v3d_idle_gca(struct v3d_dev *v3d)
62 {
63 if (v3d->ver >= 41)
64 return;
65
66 V3D_GCA_WRITE(V3D_GCA_SAFE_SHUTDOWN, V3D_GCA_SAFE_SHUTDOWN_EN);
67
68 if (wait_for((V3D_GCA_READ(V3D_GCA_SAFE_SHUTDOWN_ACK) &
69 V3D_GCA_SAFE_SHUTDOWN_ACK_ACKED) ==
70 V3D_GCA_SAFE_SHUTDOWN_ACK_ACKED, 100)) {
71 DRM_ERROR("Failed to wait for safe GCA shutdown\n");
72 }
73 }
74
75 static void
v3d_reset_by_bridge(struct v3d_dev * v3d)76 v3d_reset_by_bridge(struct v3d_dev *v3d)
77 {
78 int version = V3D_BRIDGE_READ(V3D_TOP_GR_BRIDGE_REVISION);
79
80 if (V3D_GET_FIELD(version, V3D_TOP_GR_BRIDGE_MAJOR) == 2) {
81 V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_0,
82 V3D_TOP_GR_BRIDGE_SW_INIT_0_V3D_CLK_108_SW_INIT);
83 V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_0, 0);
84
85 /* GFXH-1383: The SW_INIT may cause a stray write to address 0
86 * of the unit, so reset it to its power-on value here.
87 */
88 V3D_WRITE(V3D_HUB_AXICFG, V3D_HUB_AXICFG_MAX_LEN_MASK);
89 } else {
90 WARN_ON_ONCE(V3D_GET_FIELD(version,
91 V3D_TOP_GR_BRIDGE_MAJOR) != 7);
92 V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_1,
93 V3D_TOP_GR_BRIDGE_SW_INIT_1_V3D_CLK_108_SW_INIT);
94 V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_1, 0);
95 }
96 }
97
98 static void
v3d_reset_v3d(struct v3d_dev * v3d)99 v3d_reset_v3d(struct v3d_dev *v3d)
100 {
101 if (v3d->reset)
102 reset_control_reset(v3d->reset);
103 else
104 v3d_reset_by_bridge(v3d);
105
106 v3d_init_hw_state(v3d);
107 }
108
109 void
v3d_reset(struct v3d_dev * v3d)110 v3d_reset(struct v3d_dev *v3d)
111 {
112 struct drm_device *dev = &v3d->drm;
113
114 DRM_DEV_ERROR(dev->dev, "Resetting GPU for hang.\n");
115 DRM_DEV_ERROR(dev->dev, "V3D_ERR_STAT: 0x%08x\n",
116 V3D_CORE_READ(0, V3D_ERR_STAT));
117 trace_v3d_reset_begin(dev);
118
119 /* XXX: only needed for safe powerdown, not reset. */
120 if (false)
121 v3d_idle_axi(v3d, 0);
122
123 v3d_idle_gca(v3d);
124 v3d_reset_v3d(v3d);
125
126 v3d_mmu_set_page_table(v3d);
127 v3d_irq_reset(v3d);
128
129 trace_v3d_reset_end(dev);
130 }
131
132 static void
v3d_flush_l3(struct v3d_dev * v3d)133 v3d_flush_l3(struct v3d_dev *v3d)
134 {
135 if (v3d->ver < 41) {
136 u32 gca_ctrl = V3D_GCA_READ(V3D_GCA_CACHE_CTRL);
137
138 V3D_GCA_WRITE(V3D_GCA_CACHE_CTRL,
139 gca_ctrl | V3D_GCA_CACHE_CTRL_FLUSH);
140
141 if (v3d->ver < 33) {
142 V3D_GCA_WRITE(V3D_GCA_CACHE_CTRL,
143 gca_ctrl & ~V3D_GCA_CACHE_CTRL_FLUSH);
144 }
145 }
146 }
147
148 /* Invalidates the (read-only) L2C cache. This was the L2 cache for
149 * uniforms and instructions on V3D 3.2.
150 */
151 static void
v3d_invalidate_l2c(struct v3d_dev * v3d,int core)152 v3d_invalidate_l2c(struct v3d_dev *v3d, int core)
153 {
154 if (v3d->ver > 32)
155 return;
156
157 V3D_CORE_WRITE(core, V3D_CTL_L2CACTL,
158 V3D_L2CACTL_L2CCLR |
159 V3D_L2CACTL_L2CENA);
160 }
161
162 /* Invalidates texture L2 cachelines */
163 static void
v3d_flush_l2t(struct v3d_dev * v3d,int core)164 v3d_flush_l2t(struct v3d_dev *v3d, int core)
165 {
166 /* While there is a busy bit (V3D_L2TCACTL_L2TFLS), we don't
167 * need to wait for completion before dispatching the job --
168 * L2T accesses will be stalled until the flush has completed.
169 * However, we do need to make sure we don't try to trigger a
170 * new flush while the L2_CLEAN queue is trying to
171 * synchronously clean after a job.
172 */
173 mutex_lock(&v3d->cache_clean_lock);
174 V3D_CORE_WRITE(core, V3D_CTL_L2TCACTL,
175 V3D_L2TCACTL_L2TFLS |
176 V3D_SET_FIELD(V3D_L2TCACTL_FLM_FLUSH, V3D_L2TCACTL_FLM));
177 mutex_unlock(&v3d->cache_clean_lock);
178 }
179
180 /* Cleans texture L1 and L2 cachelines (writing back dirty data).
181 *
182 * For cleaning, which happens from the CACHE_CLEAN queue after CSD has
183 * executed, we need to make sure that the clean is done before
184 * signaling job completion. So, we synchronously wait before
185 * returning, and we make sure that L2 invalidates don't happen in the
186 * meantime to confuse our are-we-done checks.
187 */
188 void
v3d_clean_caches(struct v3d_dev * v3d)189 v3d_clean_caches(struct v3d_dev *v3d)
190 {
191 struct drm_device *dev = &v3d->drm;
192 int core = 0;
193
194 trace_v3d_cache_clean_begin(dev);
195
196 V3D_CORE_WRITE(core, V3D_CTL_L2TCACTL, V3D_L2TCACTL_TMUWCF);
197 if (wait_for(!(V3D_CORE_READ(core, V3D_CTL_L2TCACTL) &
198 V3D_L2TCACTL_TMUWCF), 100)) {
199 DRM_ERROR("Timeout waiting for TMU write combiner flush\n");
200 }
201
202 mutex_lock(&v3d->cache_clean_lock);
203 V3D_CORE_WRITE(core, V3D_CTL_L2TCACTL,
204 V3D_L2TCACTL_L2TFLS |
205 V3D_SET_FIELD(V3D_L2TCACTL_FLM_CLEAN, V3D_L2TCACTL_FLM));
206
207 if (wait_for(!(V3D_CORE_READ(core, V3D_CTL_L2TCACTL) &
208 V3D_L2TCACTL_L2TFLS), 100)) {
209 DRM_ERROR("Timeout waiting for L2T clean\n");
210 }
211
212 mutex_unlock(&v3d->cache_clean_lock);
213
214 trace_v3d_cache_clean_end(dev);
215 }
216
217 /* Invalidates the slice caches. These are read-only caches. */
218 static void
v3d_invalidate_slices(struct v3d_dev * v3d,int core)219 v3d_invalidate_slices(struct v3d_dev *v3d, int core)
220 {
221 V3D_CORE_WRITE(core, V3D_CTL_SLCACTL,
222 V3D_SET_FIELD(0xf, V3D_SLCACTL_TVCCS) |
223 V3D_SET_FIELD(0xf, V3D_SLCACTL_TDCCS) |
224 V3D_SET_FIELD(0xf, V3D_SLCACTL_UCC) |
225 V3D_SET_FIELD(0xf, V3D_SLCACTL_ICC));
226 }
227
228 void
v3d_invalidate_caches(struct v3d_dev * v3d)229 v3d_invalidate_caches(struct v3d_dev *v3d)
230 {
231 /* Invalidate the caches from the outside in. That way if
232 * another CL's concurrent use of nearby memory were to pull
233 * an invalidated cacheline back in, we wouldn't leave stale
234 * data in the inner cache.
235 */
236 v3d_flush_l3(v3d);
237 v3d_invalidate_l2c(v3d, 0);
238 v3d_flush_l2t(v3d, 0);
239 v3d_invalidate_slices(v3d, 0);
240 }
241
242 /* Takes the reservation lock on all the BOs being referenced, so that
243 * at queue submit time we can update the reservations.
244 *
245 * We don't lock the RCL the tile alloc/state BOs, or overflow memory
246 * (all of which are on exec->unref_list). They're entirely private
247 * to v3d, so we don't attach dma-buf fences to them.
248 */
249 static int
v3d_lock_bo_reservations(struct v3d_job * job,struct ww_acquire_ctx * acquire_ctx)250 v3d_lock_bo_reservations(struct v3d_job *job,
251 struct ww_acquire_ctx *acquire_ctx)
252 {
253 int i, ret;
254
255 ret = drm_gem_lock_reservations(job->bo, job->bo_count, acquire_ctx);
256 if (ret)
257 return ret;
258
259 for (i = 0; i < job->bo_count; i++) {
260 ret = drm_gem_fence_array_add_implicit(&job->deps,
261 job->bo[i], true);
262 if (ret) {
263 drm_gem_unlock_reservations(job->bo, job->bo_count,
264 acquire_ctx);
265 return ret;
266 }
267 }
268
269 return 0;
270 }
271
272 /**
273 * v3d_lookup_bos() - Sets up job->bo[] with the GEM objects
274 * referenced by the job.
275 * @dev: DRM device
276 * @file_priv: DRM file for this fd
277 * @job: V3D job being set up
278 *
279 * The command validator needs to reference BOs by their index within
280 * the submitted job's BO list. This does the validation of the job's
281 * BO list and reference counting for the lifetime of the job.
282 *
283 * Note that this function doesn't need to unreference the BOs on
284 * failure, because that will happen at v3d_exec_cleanup() time.
285 */
286 static int
v3d_lookup_bos(struct drm_device * dev,struct drm_file * file_priv,struct v3d_job * job,u64 bo_handles,u32 bo_count)287 v3d_lookup_bos(struct drm_device *dev,
288 struct drm_file *file_priv,
289 struct v3d_job *job,
290 u64 bo_handles,
291 u32 bo_count)
292 {
293 u32 *handles;
294 int ret = 0;
295 int i;
296
297 job->bo_count = bo_count;
298
299 if (!job->bo_count) {
300 /* See comment on bo_index for why we have to check
301 * this.
302 */
303 DRM_DEBUG("Rendering requires BOs\n");
304 return -EINVAL;
305 }
306
307 job->bo = kvmalloc_array(job->bo_count,
308 sizeof(struct drm_gem_cma_object *),
309 GFP_KERNEL | __GFP_ZERO);
310 if (!job->bo) {
311 DRM_DEBUG("Failed to allocate validated BO pointers\n");
312 return -ENOMEM;
313 }
314
315 handles = kvmalloc_array(job->bo_count, sizeof(u32), GFP_KERNEL);
316 if (!handles) {
317 ret = -ENOMEM;
318 DRM_DEBUG("Failed to allocate incoming GEM handles\n");
319 goto fail;
320 }
321
322 if (copy_from_user(handles,
323 (void __user *)(uintptr_t)bo_handles,
324 job->bo_count * sizeof(u32))) {
325 ret = -EFAULT;
326 DRM_DEBUG("Failed to copy in GEM handles\n");
327 goto fail;
328 }
329
330 spin_lock(&file_priv->table_lock);
331 for (i = 0; i < job->bo_count; i++) {
332 struct drm_gem_object *bo = idr_find(&file_priv->object_idr,
333 handles[i]);
334 if (!bo) {
335 DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
336 i, handles[i]);
337 ret = -ENOENT;
338 spin_unlock(&file_priv->table_lock);
339 goto fail;
340 }
341 drm_gem_object_get(bo);
342 job->bo[i] = bo;
343 }
344 spin_unlock(&file_priv->table_lock);
345
346 fail:
347 kvfree(handles);
348 return ret;
349 }
350
351 static void
v3d_job_free(struct kref * ref)352 v3d_job_free(struct kref *ref)
353 {
354 struct v3d_job *job = container_of(ref, struct v3d_job, refcount);
355 unsigned long index;
356 struct dma_fence *fence;
357 int i;
358
359 for (i = 0; i < job->bo_count; i++) {
360 if (job->bo[i])
361 drm_gem_object_put(job->bo[i]);
362 }
363 kvfree(job->bo);
364
365 xa_for_each(&job->deps, index, fence) {
366 dma_fence_put(fence);
367 }
368 xa_destroy(&job->deps);
369
370 dma_fence_put(job->irq_fence);
371 dma_fence_put(job->done_fence);
372
373 pm_runtime_mark_last_busy(job->v3d->drm.dev);
374 pm_runtime_put_autosuspend(job->v3d->drm.dev);
375
376 kfree(job);
377 }
378
379 static void
v3d_render_job_free(struct kref * ref)380 v3d_render_job_free(struct kref *ref)
381 {
382 struct v3d_render_job *job = container_of(ref, struct v3d_render_job,
383 base.refcount);
384 struct v3d_bo *bo, *save;
385
386 list_for_each_entry_safe(bo, save, &job->unref_list, unref_head) {
387 drm_gem_object_put(&bo->base.base);
388 }
389
390 v3d_job_free(ref);
391 }
392
v3d_job_put(struct v3d_job * job)393 void v3d_job_put(struct v3d_job *job)
394 {
395 kref_put(&job->refcount, job->free);
396 }
397
398 int
v3d_wait_bo_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)399 v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
400 struct drm_file *file_priv)
401 {
402 int ret;
403 struct drm_v3d_wait_bo *args = data;
404 ktime_t start = ktime_get();
405 u64 delta_ns;
406 unsigned long timeout_jiffies =
407 nsecs_to_jiffies_timeout(args->timeout_ns);
408
409 if (args->pad != 0)
410 return -EINVAL;
411
412 ret = drm_gem_dma_resv_wait(file_priv, args->handle,
413 true, timeout_jiffies);
414
415 /* Decrement the user's timeout, in case we got interrupted
416 * such that the ioctl will be restarted.
417 */
418 delta_ns = ktime_to_ns(ktime_sub(ktime_get(), start));
419 if (delta_ns < args->timeout_ns)
420 args->timeout_ns -= delta_ns;
421 else
422 args->timeout_ns = 0;
423
424 /* Asked to wait beyond the jiffie/scheduler precision? */
425 if (ret == -ETIME && args->timeout_ns)
426 ret = -EAGAIN;
427
428 return ret;
429 }
430
431 static int
v3d_job_init(struct v3d_dev * v3d,struct drm_file * file_priv,struct v3d_job * job,void (* free)(struct kref * ref),u32 in_sync)432 v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
433 struct v3d_job *job, void (*free)(struct kref *ref),
434 u32 in_sync)
435 {
436 struct dma_fence *in_fence = NULL;
437 int ret;
438
439 job->v3d = v3d;
440 job->free = free;
441
442 ret = pm_runtime_get_sync(v3d->drm.dev);
443 if (ret < 0)
444 return ret;
445
446 xa_init_flags(&job->deps, XA_FLAGS_ALLOC);
447
448 ret = drm_syncobj_find_fence(file_priv, in_sync, 0, 0, &in_fence);
449 if (ret == -EINVAL)
450 goto fail;
451
452 ret = drm_gem_fence_array_add(&job->deps, in_fence);
453 if (ret)
454 goto fail;
455
456 kref_init(&job->refcount);
457
458 return 0;
459 fail:
460 xa_destroy(&job->deps);
461 pm_runtime_put_autosuspend(v3d->drm.dev);
462 return ret;
463 }
464
465 static int
v3d_push_job(struct v3d_file_priv * v3d_priv,struct v3d_job * job,enum v3d_queue queue)466 v3d_push_job(struct v3d_file_priv *v3d_priv,
467 struct v3d_job *job, enum v3d_queue queue)
468 {
469 int ret;
470
471 ret = drm_sched_job_init(&job->base, &v3d_priv->sched_entity[queue],
472 v3d_priv);
473 if (ret)
474 return ret;
475
476 job->done_fence = dma_fence_get(&job->base.s_fence->finished);
477
478 /* put by scheduler job completion */
479 kref_get(&job->refcount);
480
481 drm_sched_entity_push_job(&job->base, &v3d_priv->sched_entity[queue]);
482
483 return 0;
484 }
485
486 static void
v3d_attach_fences_and_unlock_reservation(struct drm_file * file_priv,struct v3d_job * job,struct ww_acquire_ctx * acquire_ctx,u32 out_sync,struct dma_fence * done_fence)487 v3d_attach_fences_and_unlock_reservation(struct drm_file *file_priv,
488 struct v3d_job *job,
489 struct ww_acquire_ctx *acquire_ctx,
490 u32 out_sync,
491 struct dma_fence *done_fence)
492 {
493 struct drm_syncobj *sync_out;
494 int i;
495
496 for (i = 0; i < job->bo_count; i++) {
497 /* XXX: Use shared fences for read-only objects. */
498 dma_resv_add_excl_fence(job->bo[i]->resv,
499 job->done_fence);
500 }
501
502 drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx);
503
504 /* Update the return sync object for the job */
505 sync_out = drm_syncobj_find(file_priv, out_sync);
506 if (sync_out) {
507 drm_syncobj_replace_fence(sync_out, done_fence);
508 drm_syncobj_put(sync_out);
509 }
510 }
511
512 /**
513 * v3d_submit_cl_ioctl() - Submits a job (frame) to the V3D.
514 * @dev: DRM device
515 * @data: ioctl argument
516 * @file_priv: DRM file for this fd
517 *
518 * This is the main entrypoint for userspace to submit a 3D frame to
519 * the GPU. Userspace provides the binner command list (if
520 * applicable), and the kernel sets up the render command list to draw
521 * to the framebuffer described in the ioctl, using the command lists
522 * that the 3D engine's binner will produce.
523 */
524 int
v3d_submit_cl_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)525 v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
526 struct drm_file *file_priv)
527 {
528 struct v3d_dev *v3d = to_v3d_dev(dev);
529 struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
530 struct drm_v3d_submit_cl *args = data;
531 struct v3d_bin_job *bin = NULL;
532 struct v3d_render_job *render;
533 struct v3d_job *clean_job = NULL;
534 struct v3d_job *last_job;
535 struct ww_acquire_ctx acquire_ctx;
536 int ret = 0;
537
538 trace_v3d_submit_cl_ioctl(&v3d->drm, args->rcl_start, args->rcl_end);
539
540 if (args->flags != 0 &&
541 args->flags != DRM_V3D_SUBMIT_CL_FLUSH_CACHE) {
542 DRM_INFO("invalid flags: %d\n", args->flags);
543 return -EINVAL;
544 }
545
546 render = kcalloc(1, sizeof(*render), GFP_KERNEL);
547 if (!render)
548 return -ENOMEM;
549
550 render->start = args->rcl_start;
551 render->end = args->rcl_end;
552 INIT_LIST_HEAD(&render->unref_list);
553
554 ret = v3d_job_init(v3d, file_priv, &render->base,
555 v3d_render_job_free, args->in_sync_rcl);
556 if (ret) {
557 kfree(render);
558 return ret;
559 }
560
561 if (args->bcl_start != args->bcl_end) {
562 bin = kcalloc(1, sizeof(*bin), GFP_KERNEL);
563 if (!bin) {
564 v3d_job_put(&render->base);
565 return -ENOMEM;
566 }
567
568 ret = v3d_job_init(v3d, file_priv, &bin->base,
569 v3d_job_free, args->in_sync_bcl);
570 if (ret) {
571 v3d_job_put(&render->base);
572 kfree(bin);
573 return ret;
574 }
575
576 bin->start = args->bcl_start;
577 bin->end = args->bcl_end;
578 bin->qma = args->qma;
579 bin->qms = args->qms;
580 bin->qts = args->qts;
581 bin->render = render;
582 }
583
584 if (args->flags & DRM_V3D_SUBMIT_CL_FLUSH_CACHE) {
585 clean_job = kcalloc(1, sizeof(*clean_job), GFP_KERNEL);
586 if (!clean_job) {
587 ret = -ENOMEM;
588 goto fail;
589 }
590
591 ret = v3d_job_init(v3d, file_priv, clean_job, v3d_job_free, 0);
592 if (ret) {
593 kfree(clean_job);
594 clean_job = NULL;
595 goto fail;
596 }
597
598 last_job = clean_job;
599 } else {
600 last_job = &render->base;
601 }
602
603 ret = v3d_lookup_bos(dev, file_priv, last_job,
604 args->bo_handles, args->bo_handle_count);
605 if (ret)
606 goto fail;
607
608 ret = v3d_lock_bo_reservations(last_job, &acquire_ctx);
609 if (ret)
610 goto fail;
611
612 mutex_lock(&v3d->sched_lock);
613 if (bin) {
614 ret = v3d_push_job(v3d_priv, &bin->base, V3D_BIN);
615 if (ret)
616 goto fail_unreserve;
617
618 ret = drm_gem_fence_array_add(&render->base.deps,
619 dma_fence_get(bin->base.done_fence));
620 if (ret)
621 goto fail_unreserve;
622 }
623
624 ret = v3d_push_job(v3d_priv, &render->base, V3D_RENDER);
625 if (ret)
626 goto fail_unreserve;
627
628 if (clean_job) {
629 struct dma_fence *render_fence =
630 dma_fence_get(render->base.done_fence);
631 ret = drm_gem_fence_array_add(&clean_job->deps, render_fence);
632 if (ret)
633 goto fail_unreserve;
634 ret = v3d_push_job(v3d_priv, clean_job, V3D_CACHE_CLEAN);
635 if (ret)
636 goto fail_unreserve;
637 }
638
639 mutex_unlock(&v3d->sched_lock);
640
641 v3d_attach_fences_and_unlock_reservation(file_priv,
642 last_job,
643 &acquire_ctx,
644 args->out_sync,
645 last_job->done_fence);
646
647 if (bin)
648 v3d_job_put(&bin->base);
649 v3d_job_put(&render->base);
650 if (clean_job)
651 v3d_job_put(clean_job);
652
653 return 0;
654
655 fail_unreserve:
656 mutex_unlock(&v3d->sched_lock);
657 drm_gem_unlock_reservations(last_job->bo,
658 last_job->bo_count, &acquire_ctx);
659 fail:
660 if (bin)
661 v3d_job_put(&bin->base);
662 v3d_job_put(&render->base);
663 if (clean_job)
664 v3d_job_put(clean_job);
665
666 return ret;
667 }
668
669 /**
670 * v3d_submit_tfu_ioctl() - Submits a TFU (texture formatting) job to the V3D.
671 * @dev: DRM device
672 * @data: ioctl argument
673 * @file_priv: DRM file for this fd
674 *
675 * Userspace provides the register setup for the TFU, which we don't
676 * need to validate since the TFU is behind the MMU.
677 */
678 int
v3d_submit_tfu_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)679 v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
680 struct drm_file *file_priv)
681 {
682 struct v3d_dev *v3d = to_v3d_dev(dev);
683 struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
684 struct drm_v3d_submit_tfu *args = data;
685 struct v3d_tfu_job *job;
686 struct ww_acquire_ctx acquire_ctx;
687 int ret = 0;
688
689 trace_v3d_submit_tfu_ioctl(&v3d->drm, args->iia);
690
691 job = kcalloc(1, sizeof(*job), GFP_KERNEL);
692 if (!job)
693 return -ENOMEM;
694
695 ret = v3d_job_init(v3d, file_priv, &job->base,
696 v3d_job_free, args->in_sync);
697 if (ret) {
698 kfree(job);
699 return ret;
700 }
701
702 job->base.bo = kcalloc(ARRAY_SIZE(args->bo_handles),
703 sizeof(*job->base.bo), GFP_KERNEL);
704 if (!job->base.bo) {
705 v3d_job_put(&job->base);
706 return -ENOMEM;
707 }
708
709 job->args = *args;
710
711 spin_lock(&file_priv->table_lock);
712 for (job->base.bo_count = 0;
713 job->base.bo_count < ARRAY_SIZE(args->bo_handles);
714 job->base.bo_count++) {
715 struct drm_gem_object *bo;
716
717 if (!args->bo_handles[job->base.bo_count])
718 break;
719
720 bo = idr_find(&file_priv->object_idr,
721 args->bo_handles[job->base.bo_count]);
722 if (!bo) {
723 DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
724 job->base.bo_count,
725 args->bo_handles[job->base.bo_count]);
726 ret = -ENOENT;
727 spin_unlock(&file_priv->table_lock);
728 goto fail;
729 }
730 drm_gem_object_get(bo);
731 job->base.bo[job->base.bo_count] = bo;
732 }
733 spin_unlock(&file_priv->table_lock);
734
735 ret = v3d_lock_bo_reservations(&job->base, &acquire_ctx);
736 if (ret)
737 goto fail;
738
739 mutex_lock(&v3d->sched_lock);
740 ret = v3d_push_job(v3d_priv, &job->base, V3D_TFU);
741 if (ret)
742 goto fail_unreserve;
743 mutex_unlock(&v3d->sched_lock);
744
745 v3d_attach_fences_and_unlock_reservation(file_priv,
746 &job->base, &acquire_ctx,
747 args->out_sync,
748 job->base.done_fence);
749
750 v3d_job_put(&job->base);
751
752 return 0;
753
754 fail_unreserve:
755 mutex_unlock(&v3d->sched_lock);
756 drm_gem_unlock_reservations(job->base.bo, job->base.bo_count,
757 &acquire_ctx);
758 fail:
759 v3d_job_put(&job->base);
760
761 return ret;
762 }
763
764 /**
765 * v3d_submit_csd_ioctl() - Submits a CSD (texture formatting) job to the V3D.
766 * @dev: DRM device
767 * @data: ioctl argument
768 * @file_priv: DRM file for this fd
769 *
770 * Userspace provides the register setup for the CSD, which we don't
771 * need to validate since the CSD is behind the MMU.
772 */
773 int
v3d_submit_csd_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)774 v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
775 struct drm_file *file_priv)
776 {
777 struct v3d_dev *v3d = to_v3d_dev(dev);
778 struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
779 struct drm_v3d_submit_csd *args = data;
780 struct v3d_csd_job *job;
781 struct v3d_job *clean_job;
782 struct ww_acquire_ctx acquire_ctx;
783 int ret;
784
785 trace_v3d_submit_csd_ioctl(&v3d->drm, args->cfg[5], args->cfg[6]);
786
787 if (!v3d_has_csd(v3d)) {
788 DRM_DEBUG("Attempting CSD submit on non-CSD hardware\n");
789 return -EINVAL;
790 }
791
792 job = kcalloc(1, sizeof(*job), GFP_KERNEL);
793 if (!job)
794 return -ENOMEM;
795
796 ret = v3d_job_init(v3d, file_priv, &job->base,
797 v3d_job_free, args->in_sync);
798 if (ret) {
799 kfree(job);
800 return ret;
801 }
802
803 clean_job = kcalloc(1, sizeof(*clean_job), GFP_KERNEL);
804 if (!clean_job) {
805 v3d_job_put(&job->base);
806 kfree(job);
807 return -ENOMEM;
808 }
809
810 ret = v3d_job_init(v3d, file_priv, clean_job, v3d_job_free, 0);
811 if (ret) {
812 v3d_job_put(&job->base);
813 kfree(clean_job);
814 return ret;
815 }
816
817 job->args = *args;
818
819 ret = v3d_lookup_bos(dev, file_priv, clean_job,
820 args->bo_handles, args->bo_handle_count);
821 if (ret)
822 goto fail;
823
824 ret = v3d_lock_bo_reservations(clean_job, &acquire_ctx);
825 if (ret)
826 goto fail;
827
828 mutex_lock(&v3d->sched_lock);
829 ret = v3d_push_job(v3d_priv, &job->base, V3D_CSD);
830 if (ret)
831 goto fail_unreserve;
832
833 ret = drm_gem_fence_array_add(&clean_job->deps,
834 dma_fence_get(job->base.done_fence));
835 if (ret)
836 goto fail_unreserve;
837
838 ret = v3d_push_job(v3d_priv, clean_job, V3D_CACHE_CLEAN);
839 if (ret)
840 goto fail_unreserve;
841 mutex_unlock(&v3d->sched_lock);
842
843 v3d_attach_fences_and_unlock_reservation(file_priv,
844 clean_job,
845 &acquire_ctx,
846 args->out_sync,
847 clean_job->done_fence);
848
849 v3d_job_put(&job->base);
850 v3d_job_put(clean_job);
851
852 return 0;
853
854 fail_unreserve:
855 mutex_unlock(&v3d->sched_lock);
856 drm_gem_unlock_reservations(clean_job->bo, clean_job->bo_count,
857 &acquire_ctx);
858 fail:
859 v3d_job_put(&job->base);
860 v3d_job_put(clean_job);
861
862 return ret;
863 }
864
865 int
v3d_gem_init(struct drm_device * dev)866 v3d_gem_init(struct drm_device *dev)
867 {
868 struct v3d_dev *v3d = to_v3d_dev(dev);
869 u32 pt_size = 4096 * 1024;
870 int ret, i;
871
872 for (i = 0; i < V3D_MAX_QUEUES; i++)
873 v3d->queue[i].fence_context = dma_fence_context_alloc(1);
874
875 spin_lock_init(&v3d->mm_lock);
876 spin_lock_init(&v3d->job_lock);
877 mutex_init(&v3d->bo_lock);
878 mutex_init(&v3d->reset_lock);
879 mutex_init(&v3d->sched_lock);
880 mutex_init(&v3d->cache_clean_lock);
881
882 /* Note: We don't allocate address 0. Various bits of HW
883 * treat 0 as special, such as the occlusion query counters
884 * where 0 means "disabled".
885 */
886 drm_mm_init(&v3d->mm, 1, pt_size / sizeof(u32) - 1);
887
888 v3d->pt = dma_alloc_wc(v3d->drm.dev, pt_size,
889 &v3d->pt_paddr,
890 GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
891 if (!v3d->pt) {
892 drm_mm_takedown(&v3d->mm);
893 dev_err(v3d->drm.dev,
894 "Failed to allocate page tables. "
895 "Please ensure you have CMA enabled.\n");
896 return -ENOMEM;
897 }
898
899 v3d_init_hw_state(v3d);
900 v3d_mmu_set_page_table(v3d);
901
902 ret = v3d_sched_init(v3d);
903 if (ret) {
904 drm_mm_takedown(&v3d->mm);
905 dma_free_coherent(v3d->drm.dev, 4096 * 1024, (void *)v3d->pt,
906 v3d->pt_paddr);
907 }
908
909 return 0;
910 }
911
912 void
v3d_gem_destroy(struct drm_device * dev)913 v3d_gem_destroy(struct drm_device *dev)
914 {
915 struct v3d_dev *v3d = to_v3d_dev(dev);
916
917 v3d_sched_fini(v3d);
918
919 /* Waiting for jobs to finish would need to be done before
920 * unregistering V3D.
921 */
922 WARN_ON(v3d->bin_job);
923 WARN_ON(v3d->render_job);
924
925 drm_mm_takedown(&v3d->mm);
926
927 dma_free_coherent(v3d->drm.dev, 4096 * 1024, (void *)v3d->pt,
928 v3d->pt_paddr);
929 }
930