1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
5 */
6
7 #include <linux/file.h>
8 #include <linux/sync_file.h>
9 #include <linux/uaccess.h>
10
11 #include <drm/drm_drv.h>
12 #include <drm/drm_file.h>
13 #include <drm/drm_syncobj.h>
14
15 #include "msm_drv.h"
16 #include "msm_gpu.h"
17 #include "msm_gem.h"
18 #include "msm_gpu_trace.h"
19
20 /* For userspace errors, use DRM_UT_DRIVER.. so that userspace can enable
21 * error msgs for debugging, but we don't spam dmesg by default
22 */
23 #define SUBMIT_ERROR(submit, fmt, ...) \
24 DRM_DEV_DEBUG_DRIVER((submit)->dev->dev, fmt, ##__VA_ARGS__)
25
26 /*
27 * Cmdstream submission:
28 */
29
submit_create(struct drm_device * dev,struct msm_gpu * gpu,struct msm_gpu_submitqueue * queue,uint32_t nr_bos,uint32_t nr_cmds)30 static struct msm_gem_submit *submit_create(struct drm_device *dev,
31 struct msm_gpu *gpu,
32 struct msm_gpu_submitqueue *queue, uint32_t nr_bos,
33 uint32_t nr_cmds)
34 {
35 static atomic_t ident = ATOMIC_INIT(0);
36 struct msm_gem_submit *submit;
37 uint64_t sz;
38 int ret;
39
40 sz = struct_size(submit, bos, nr_bos) +
41 ((u64)nr_cmds * sizeof(submit->cmd[0]));
42
43 if (sz > SIZE_MAX)
44 return ERR_PTR(-ENOMEM);
45
46 submit = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN);
47 if (!submit)
48 return ERR_PTR(-ENOMEM);
49
50 submit->hw_fence = msm_fence_alloc();
51 if (IS_ERR(submit->hw_fence)) {
52 ret = PTR_ERR(submit->hw_fence);
53 kfree(submit);
54 return ERR_PTR(ret);
55 }
56
57 ret = drm_sched_job_init(&submit->base, queue->entity, 1, queue);
58 if (ret) {
59 kfree(submit->hw_fence);
60 kfree(submit);
61 return ERR_PTR(ret);
62 }
63
64 kref_init(&submit->ref);
65 submit->dev = dev;
66 submit->aspace = queue->ctx->aspace;
67 submit->gpu = gpu;
68 submit->cmd = (void *)&submit->bos[nr_bos];
69 submit->queue = queue;
70 submit->pid = get_pid(task_pid(current));
71 submit->ring = gpu->rb[queue->ring_nr];
72 submit->fault_dumped = false;
73
74 /* Get a unique identifier for the submission for logging purposes */
75 submit->ident = atomic_inc_return(&ident) - 1;
76
77 INIT_LIST_HEAD(&submit->node);
78
79 return submit;
80 }
81
__msm_gem_submit_destroy(struct kref * kref)82 void __msm_gem_submit_destroy(struct kref *kref)
83 {
84 struct msm_gem_submit *submit =
85 container_of(kref, struct msm_gem_submit, ref);
86 unsigned i;
87
88 /*
89 * In error paths, we could unref the submit without calling
90 * drm_sched_entity_push_job(), so msm_job_free() will never
91 * get called. Since drm_sched_job_cleanup() will NULL out
92 * s_fence, we can use that to detect this case.
93 */
94 if (submit->base.s_fence)
95 drm_sched_job_cleanup(&submit->base);
96
97 if (submit->fence_id) {
98 spin_lock(&submit->queue->idr_lock);
99 idr_remove(&submit->queue->fence_idr, submit->fence_id);
100 spin_unlock(&submit->queue->idr_lock);
101 }
102
103 dma_fence_put(submit->user_fence);
104
105 /*
106 * If the submit is freed before msm_job_run(), then hw_fence is
107 * just some pre-allocated memory, not a reference counted fence.
108 * Once the job runs and the hw_fence is initialized, it will
109 * have a refcount of at least one, since the submit holds a ref
110 * to the hw_fence.
111 */
112 if (kref_read(&submit->hw_fence->refcount) == 0) {
113 kfree(submit->hw_fence);
114 } else {
115 dma_fence_put(submit->hw_fence);
116 }
117
118 put_pid(submit->pid);
119 msm_submitqueue_put(submit->queue);
120
121 for (i = 0; i < submit->nr_cmds; i++)
122 kfree(submit->cmd[i].relocs);
123
124 kfree(submit);
125 }
126
submit_lookup_objects(struct msm_gem_submit * submit,struct drm_msm_gem_submit * args,struct drm_file * file)127 static int submit_lookup_objects(struct msm_gem_submit *submit,
128 struct drm_msm_gem_submit *args, struct drm_file *file)
129 {
130 unsigned i;
131 int ret = 0;
132
133 for (i = 0; i < args->nr_bos; i++) {
134 struct drm_msm_gem_submit_bo submit_bo;
135 void __user *userptr =
136 u64_to_user_ptr(args->bos + (i * sizeof(submit_bo)));
137
138 /* make sure we don't have garbage flags, in case we hit
139 * error path before flags is initialized:
140 */
141 submit->bos[i].flags = 0;
142
143 if (copy_from_user(&submit_bo, userptr, sizeof(submit_bo))) {
144 ret = -EFAULT;
145 i = 0;
146 goto out;
147 }
148
149 /* at least one of READ and/or WRITE flags should be set: */
150 #define MANDATORY_FLAGS (MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE)
151
152 if ((submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) ||
153 !(submit_bo.flags & MANDATORY_FLAGS)) {
154 SUBMIT_ERROR(submit, "invalid flags: %x\n", submit_bo.flags);
155 ret = -EINVAL;
156 i = 0;
157 goto out;
158 }
159
160 submit->bos[i].handle = submit_bo.handle;
161 submit->bos[i].flags = submit_bo.flags;
162 }
163
164 spin_lock(&file->table_lock);
165
166 for (i = 0; i < args->nr_bos; i++) {
167 struct drm_gem_object *obj;
168
169 /* normally use drm_gem_object_lookup(), but for bulk lookup
170 * all under single table_lock just hit object_idr directly:
171 */
172 obj = idr_find(&file->object_idr, submit->bos[i].handle);
173 if (!obj) {
174 SUBMIT_ERROR(submit, "invalid handle %u at index %u\n", submit->bos[i].handle, i);
175 ret = -EINVAL;
176 goto out_unlock;
177 }
178
179 drm_gem_object_get(obj);
180
181 submit->bos[i].obj = obj;
182 }
183
184 out_unlock:
185 spin_unlock(&file->table_lock);
186
187 out:
188 submit->nr_bos = i;
189
190 return ret;
191 }
192
submit_lookup_cmds(struct msm_gem_submit * submit,struct drm_msm_gem_submit * args,struct drm_file * file)193 static int submit_lookup_cmds(struct msm_gem_submit *submit,
194 struct drm_msm_gem_submit *args, struct drm_file *file)
195 {
196 unsigned i;
197 size_t sz;
198 int ret = 0;
199
200 for (i = 0; i < args->nr_cmds; i++) {
201 struct drm_msm_gem_submit_cmd submit_cmd;
202 void __user *userptr =
203 u64_to_user_ptr(args->cmds + (i * sizeof(submit_cmd)));
204
205 ret = copy_from_user(&submit_cmd, userptr, sizeof(submit_cmd));
206 if (ret) {
207 ret = -EFAULT;
208 goto out;
209 }
210
211 /* validate input from userspace: */
212 switch (submit_cmd.type) {
213 case MSM_SUBMIT_CMD_BUF:
214 case MSM_SUBMIT_CMD_IB_TARGET_BUF:
215 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
216 break;
217 default:
218 SUBMIT_ERROR(submit, "invalid type: %08x\n", submit_cmd.type);
219 return -EINVAL;
220 }
221
222 if (submit_cmd.size % 4) {
223 SUBMIT_ERROR(submit, "non-aligned cmdstream buffer size: %u\n",
224 submit_cmd.size);
225 ret = -EINVAL;
226 goto out;
227 }
228
229 submit->cmd[i].type = submit_cmd.type;
230 submit->cmd[i].size = submit_cmd.size / 4;
231 submit->cmd[i].offset = submit_cmd.submit_offset / 4;
232 submit->cmd[i].idx = submit_cmd.submit_idx;
233 submit->cmd[i].nr_relocs = submit_cmd.nr_relocs;
234
235 userptr = u64_to_user_ptr(submit_cmd.relocs);
236
237 sz = array_size(submit_cmd.nr_relocs,
238 sizeof(struct drm_msm_gem_submit_reloc));
239 /* check for overflow: */
240 if (sz == SIZE_MAX) {
241 ret = -ENOMEM;
242 goto out;
243 }
244 submit->cmd[i].relocs = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN);
245 if (!submit->cmd[i].relocs) {
246 ret = -ENOMEM;
247 goto out;
248 }
249 ret = copy_from_user(submit->cmd[i].relocs, userptr, sz);
250 if (ret) {
251 ret = -EFAULT;
252 goto out;
253 }
254 }
255
256 out:
257 return ret;
258 }
259
260 /* This is where we make sure all the bo's are reserved and pin'd: */
submit_lock_objects(struct msm_gem_submit * submit)261 static int submit_lock_objects(struct msm_gem_submit *submit)
262 {
263 int ret;
264
265 drm_exec_init(&submit->exec, DRM_EXEC_INTERRUPTIBLE_WAIT, submit->nr_bos);
266
267 drm_exec_until_all_locked (&submit->exec) {
268 for (unsigned i = 0; i < submit->nr_bos; i++) {
269 struct drm_gem_object *obj = submit->bos[i].obj;
270 ret = drm_exec_prepare_obj(&submit->exec, obj, 1);
271 drm_exec_retry_on_contention(&submit->exec);
272 if (ret)
273 goto error;
274 }
275 }
276
277 return 0;
278
279 error:
280 return ret;
281 }
282
submit_fence_sync(struct msm_gem_submit * submit)283 static int submit_fence_sync(struct msm_gem_submit *submit)
284 {
285 int i, ret = 0;
286
287 for (i = 0; i < submit->nr_bos; i++) {
288 struct drm_gem_object *obj = submit->bos[i].obj;
289 bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE;
290
291 /* Otherwise userspace can ask for implicit sync to be
292 * disabled on specific buffers. This is useful for internal
293 * usermode driver managed buffers, suballocation, etc.
294 */
295 if (submit->bos[i].flags & MSM_SUBMIT_BO_NO_IMPLICIT)
296 continue;
297
298 ret = drm_sched_job_add_implicit_dependencies(&submit->base,
299 obj,
300 write);
301 if (ret)
302 break;
303 }
304
305 return ret;
306 }
307
submit_pin_objects(struct msm_gem_submit * submit)308 static int submit_pin_objects(struct msm_gem_submit *submit)
309 {
310 struct msm_drm_private *priv = submit->dev->dev_private;
311 int i, ret = 0;
312
313 for (i = 0; i < submit->nr_bos; i++) {
314 struct drm_gem_object *obj = submit->bos[i].obj;
315 struct msm_gem_vma *vma;
316
317 /* if locking succeeded, pin bo: */
318 vma = msm_gem_get_vma_locked(obj, submit->aspace);
319 if (IS_ERR(vma)) {
320 ret = PTR_ERR(vma);
321 break;
322 }
323
324 ret = msm_gem_pin_vma_locked(obj, vma);
325 if (ret)
326 break;
327
328 submit->bos[i].iova = vma->iova;
329 }
330
331 /*
332 * A second loop while holding the LRU lock (a) avoids acquiring/dropping
333 * the LRU lock for each individual bo, while (b) avoiding holding the
334 * LRU lock while calling msm_gem_pin_vma_locked() (which could trigger
335 * get_pages() which could trigger reclaim.. and if we held the LRU lock
336 * could trigger deadlock with the shrinker).
337 */
338 mutex_lock(&priv->lru.lock);
339 for (i = 0; i < submit->nr_bos; i++) {
340 msm_gem_pin_obj_locked(submit->bos[i].obj);
341 }
342 mutex_unlock(&priv->lru.lock);
343
344 submit->bos_pinned = true;
345
346 return ret;
347 }
348
submit_unpin_objects(struct msm_gem_submit * submit)349 static void submit_unpin_objects(struct msm_gem_submit *submit)
350 {
351 if (!submit->bos_pinned)
352 return;
353
354 for (int i = 0; i < submit->nr_bos; i++) {
355 struct drm_gem_object *obj = submit->bos[i].obj;
356
357 msm_gem_unpin_locked(obj);
358 }
359
360 submit->bos_pinned = false;
361 }
362
submit_attach_object_fences(struct msm_gem_submit * submit)363 static void submit_attach_object_fences(struct msm_gem_submit *submit)
364 {
365 int i;
366
367 for (i = 0; i < submit->nr_bos; i++) {
368 struct drm_gem_object *obj = submit->bos[i].obj;
369
370 if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
371 dma_resv_add_fence(obj->resv, submit->user_fence,
372 DMA_RESV_USAGE_WRITE);
373 else if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
374 dma_resv_add_fence(obj->resv, submit->user_fence,
375 DMA_RESV_USAGE_READ);
376 }
377 }
378
submit_bo(struct msm_gem_submit * submit,uint32_t idx,struct drm_gem_object ** obj,uint64_t * iova)379 static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
380 struct drm_gem_object **obj, uint64_t *iova)
381 {
382 if (idx >= submit->nr_bos) {
383 SUBMIT_ERROR(submit, "invalid buffer index: %u (out of %u)\n",
384 idx, submit->nr_bos);
385 return -EINVAL;
386 }
387
388 if (obj)
389 *obj = submit->bos[idx].obj;
390 if (iova)
391 *iova = submit->bos[idx].iova;
392
393 return 0;
394 }
395
396 /* process the reloc's and patch up the cmdstream as needed: */
submit_reloc(struct msm_gem_submit * submit,struct drm_gem_object * obj,uint32_t offset,uint32_t nr_relocs,struct drm_msm_gem_submit_reloc * relocs)397 static int submit_reloc(struct msm_gem_submit *submit, struct drm_gem_object *obj,
398 uint32_t offset, uint32_t nr_relocs, struct drm_msm_gem_submit_reloc *relocs)
399 {
400 uint32_t i, last_offset = 0;
401 uint32_t *ptr;
402 int ret = 0;
403
404 if (offset % 4) {
405 SUBMIT_ERROR(submit, "non-aligned cmdstream buffer: %u\n", offset);
406 return -EINVAL;
407 }
408
409 /* For now, just map the entire thing. Eventually we probably
410 * to do it page-by-page, w/ kmap() if not vmap()d..
411 */
412 ptr = msm_gem_get_vaddr_locked(obj);
413
414 if (IS_ERR(ptr)) {
415 ret = PTR_ERR(ptr);
416 DBG("failed to map: %d", ret);
417 return ret;
418 }
419
420 for (i = 0; i < nr_relocs; i++) {
421 struct drm_msm_gem_submit_reloc submit_reloc = relocs[i];
422 uint32_t off;
423 uint64_t iova;
424
425 if (submit_reloc.submit_offset % 4) {
426 SUBMIT_ERROR(submit, "non-aligned reloc offset: %u\n",
427 submit_reloc.submit_offset);
428 ret = -EINVAL;
429 goto out;
430 }
431
432 /* offset in dwords: */
433 off = submit_reloc.submit_offset / 4;
434
435 if ((off >= (obj->size / 4)) ||
436 (off < last_offset)) {
437 SUBMIT_ERROR(submit, "invalid offset %u at reloc %u\n", off, i);
438 ret = -EINVAL;
439 goto out;
440 }
441
442 ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova);
443 if (ret)
444 goto out;
445
446 iova += submit_reloc.reloc_offset;
447
448 if (submit_reloc.shift < 0)
449 iova >>= -submit_reloc.shift;
450 else
451 iova <<= submit_reloc.shift;
452
453 ptr[off] = iova | submit_reloc.or;
454
455 last_offset = off;
456 }
457
458 out:
459 msm_gem_put_vaddr_locked(obj);
460
461 return ret;
462 }
463
464 /* Cleanup submit at end of ioctl. In the error case, this also drops
465 * references, unpins, and drops active refcnt. In the non-error case,
466 * this is done when the submit is retired.
467 */
submit_cleanup(struct msm_gem_submit * submit,bool error)468 static void submit_cleanup(struct msm_gem_submit *submit, bool error)
469 {
470 if (error) {
471 submit_unpin_objects(submit);
472 /* job wasn't enqueued to scheduler, so early retirement: */
473 msm_submit_retire(submit);
474 }
475
476 if (submit->exec.objects)
477 drm_exec_fini(&submit->exec);
478 }
479
msm_submit_retire(struct msm_gem_submit * submit)480 void msm_submit_retire(struct msm_gem_submit *submit)
481 {
482 int i;
483
484 for (i = 0; i < submit->nr_bos; i++) {
485 struct drm_gem_object *obj = submit->bos[i].obj;
486
487 drm_gem_object_put(obj);
488 }
489 }
490
491 struct msm_submit_post_dep {
492 struct drm_syncobj *syncobj;
493 uint64_t point;
494 struct dma_fence_chain *chain;
495 };
496
msm_parse_deps(struct msm_gem_submit * submit,struct drm_file * file,uint64_t in_syncobjs_addr,uint32_t nr_in_syncobjs,size_t syncobj_stride)497 static struct drm_syncobj **msm_parse_deps(struct msm_gem_submit *submit,
498 struct drm_file *file,
499 uint64_t in_syncobjs_addr,
500 uint32_t nr_in_syncobjs,
501 size_t syncobj_stride)
502 {
503 struct drm_syncobj **syncobjs = NULL;
504 struct drm_msm_gem_submit_syncobj syncobj_desc = {0};
505 int ret = 0;
506 uint32_t i, j;
507
508 syncobjs = kcalloc(nr_in_syncobjs, sizeof(*syncobjs),
509 GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
510 if (!syncobjs)
511 return ERR_PTR(-ENOMEM);
512
513 for (i = 0; i < nr_in_syncobjs; ++i) {
514 uint64_t address = in_syncobjs_addr + i * syncobj_stride;
515
516 if (copy_from_user(&syncobj_desc,
517 u64_to_user_ptr(address),
518 min(syncobj_stride, sizeof(syncobj_desc)))) {
519 ret = -EFAULT;
520 break;
521 }
522
523 if (syncobj_desc.point &&
524 !drm_core_check_feature(submit->dev, DRIVER_SYNCOBJ_TIMELINE)) {
525 ret = -EOPNOTSUPP;
526 break;
527 }
528
529 if (syncobj_desc.flags & ~MSM_SUBMIT_SYNCOBJ_FLAGS) {
530 ret = -EINVAL;
531 break;
532 }
533
534 ret = drm_sched_job_add_syncobj_dependency(&submit->base, file,
535 syncobj_desc.handle, syncobj_desc.point);
536 if (ret)
537 break;
538
539 if (syncobj_desc.flags & MSM_SUBMIT_SYNCOBJ_RESET) {
540 syncobjs[i] =
541 drm_syncobj_find(file, syncobj_desc.handle);
542 if (!syncobjs[i]) {
543 ret = -EINVAL;
544 break;
545 }
546 }
547 }
548
549 if (ret) {
550 for (j = 0; j <= i; ++j) {
551 if (syncobjs[j])
552 drm_syncobj_put(syncobjs[j]);
553 }
554 kfree(syncobjs);
555 return ERR_PTR(ret);
556 }
557 return syncobjs;
558 }
559
msm_reset_syncobjs(struct drm_syncobj ** syncobjs,uint32_t nr_syncobjs)560 static void msm_reset_syncobjs(struct drm_syncobj **syncobjs,
561 uint32_t nr_syncobjs)
562 {
563 uint32_t i;
564
565 for (i = 0; syncobjs && i < nr_syncobjs; ++i) {
566 if (syncobjs[i])
567 drm_syncobj_replace_fence(syncobjs[i], NULL);
568 }
569 }
570
msm_parse_post_deps(struct drm_device * dev,struct drm_file * file,uint64_t syncobjs_addr,uint32_t nr_syncobjs,size_t syncobj_stride)571 static struct msm_submit_post_dep *msm_parse_post_deps(struct drm_device *dev,
572 struct drm_file *file,
573 uint64_t syncobjs_addr,
574 uint32_t nr_syncobjs,
575 size_t syncobj_stride)
576 {
577 struct msm_submit_post_dep *post_deps;
578 struct drm_msm_gem_submit_syncobj syncobj_desc = {0};
579 int ret = 0;
580 uint32_t i, j;
581
582 post_deps = kcalloc(nr_syncobjs, sizeof(*post_deps),
583 GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
584 if (!post_deps)
585 return ERR_PTR(-ENOMEM);
586
587 for (i = 0; i < nr_syncobjs; ++i) {
588 uint64_t address = syncobjs_addr + i * syncobj_stride;
589
590 if (copy_from_user(&syncobj_desc,
591 u64_to_user_ptr(address),
592 min(syncobj_stride, sizeof(syncobj_desc)))) {
593 ret = -EFAULT;
594 break;
595 }
596
597 post_deps[i].point = syncobj_desc.point;
598
599 if (syncobj_desc.flags) {
600 ret = -EINVAL;
601 break;
602 }
603
604 if (syncobj_desc.point) {
605 if (!drm_core_check_feature(dev,
606 DRIVER_SYNCOBJ_TIMELINE)) {
607 ret = -EOPNOTSUPP;
608 break;
609 }
610
611 post_deps[i].chain = dma_fence_chain_alloc();
612 if (!post_deps[i].chain) {
613 ret = -ENOMEM;
614 break;
615 }
616 }
617
618 post_deps[i].syncobj =
619 drm_syncobj_find(file, syncobj_desc.handle);
620 if (!post_deps[i].syncobj) {
621 ret = -EINVAL;
622 break;
623 }
624 }
625
626 if (ret) {
627 for (j = 0; j <= i; ++j) {
628 dma_fence_chain_free(post_deps[j].chain);
629 if (post_deps[j].syncobj)
630 drm_syncobj_put(post_deps[j].syncobj);
631 }
632
633 kfree(post_deps);
634 return ERR_PTR(ret);
635 }
636
637 return post_deps;
638 }
639
msm_process_post_deps(struct msm_submit_post_dep * post_deps,uint32_t count,struct dma_fence * fence)640 static void msm_process_post_deps(struct msm_submit_post_dep *post_deps,
641 uint32_t count, struct dma_fence *fence)
642 {
643 uint32_t i;
644
645 for (i = 0; post_deps && i < count; ++i) {
646 if (post_deps[i].chain) {
647 drm_syncobj_add_point(post_deps[i].syncobj,
648 post_deps[i].chain,
649 fence, post_deps[i].point);
650 post_deps[i].chain = NULL;
651 } else {
652 drm_syncobj_replace_fence(post_deps[i].syncobj,
653 fence);
654 }
655 }
656 }
657
msm_ioctl_gem_submit(struct drm_device * dev,void * data,struct drm_file * file)658 int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
659 struct drm_file *file)
660 {
661 struct msm_drm_private *priv = dev->dev_private;
662 struct drm_msm_gem_submit *args = data;
663 struct msm_file_private *ctx = file->driver_priv;
664 struct msm_gem_submit *submit = NULL;
665 struct msm_gpu *gpu = priv->gpu;
666 struct msm_gpu_submitqueue *queue;
667 struct msm_ringbuffer *ring;
668 struct msm_submit_post_dep *post_deps = NULL;
669 struct drm_syncobj **syncobjs_to_reset = NULL;
670 struct sync_file *sync_file = NULL;
671 int out_fence_fd = -1;
672 unsigned i;
673 int ret;
674
675 if (!gpu)
676 return -ENXIO;
677
678 if (args->pad)
679 return -EINVAL;
680
681 if (unlikely(!ctx->aspace) && !capable(CAP_SYS_RAWIO)) {
682 DRM_ERROR_RATELIMITED("IOMMU support or CAP_SYS_RAWIO required!\n");
683 return -EPERM;
684 }
685
686 /* for now, we just have 3d pipe.. eventually this would need to
687 * be more clever to dispatch to appropriate gpu module:
688 */
689 if (MSM_PIPE_ID(args->flags) != MSM_PIPE_3D0)
690 return -EINVAL;
691
692 if (MSM_PIPE_FLAGS(args->flags) & ~MSM_SUBMIT_FLAGS)
693 return -EINVAL;
694
695 if (args->flags & MSM_SUBMIT_SUDO) {
696 if (!IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) ||
697 !capable(CAP_SYS_RAWIO))
698 return -EINVAL;
699 }
700
701 queue = msm_submitqueue_get(ctx, args->queueid);
702 if (!queue)
703 return -ENOENT;
704
705 ring = gpu->rb[queue->ring_nr];
706
707 if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
708 out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
709 if (out_fence_fd < 0) {
710 ret = out_fence_fd;
711 goto out_post_unlock;
712 }
713 }
714
715 submit = submit_create(dev, gpu, queue, args->nr_bos, args->nr_cmds);
716 if (IS_ERR(submit)) {
717 ret = PTR_ERR(submit);
718 goto out_post_unlock;
719 }
720
721 trace_msm_gpu_submit(pid_nr(submit->pid), ring->id, submit->ident,
722 args->nr_bos, args->nr_cmds);
723
724 ret = mutex_lock_interruptible(&queue->lock);
725 if (ret)
726 goto out_post_unlock;
727
728 if (args->flags & MSM_SUBMIT_SUDO)
729 submit->in_rb = true;
730
731 if (args->flags & MSM_SUBMIT_FENCE_FD_IN) {
732 struct dma_fence *in_fence;
733
734 in_fence = sync_file_get_fence(args->fence_fd);
735
736 if (!in_fence) {
737 ret = -EINVAL;
738 goto out_unlock;
739 }
740
741 ret = drm_sched_job_add_dependency(&submit->base, in_fence);
742 if (ret)
743 goto out_unlock;
744 }
745
746 if (args->flags & MSM_SUBMIT_SYNCOBJ_IN) {
747 syncobjs_to_reset = msm_parse_deps(submit, file,
748 args->in_syncobjs,
749 args->nr_in_syncobjs,
750 args->syncobj_stride);
751 if (IS_ERR(syncobjs_to_reset)) {
752 ret = PTR_ERR(syncobjs_to_reset);
753 goto out_unlock;
754 }
755 }
756
757 if (args->flags & MSM_SUBMIT_SYNCOBJ_OUT) {
758 post_deps = msm_parse_post_deps(dev, file,
759 args->out_syncobjs,
760 args->nr_out_syncobjs,
761 args->syncobj_stride);
762 if (IS_ERR(post_deps)) {
763 ret = PTR_ERR(post_deps);
764 goto out_unlock;
765 }
766 }
767
768 ret = submit_lookup_objects(submit, args, file);
769 if (ret)
770 goto out;
771
772 ret = submit_lookup_cmds(submit, args, file);
773 if (ret)
774 goto out;
775
776 /* copy_*_user while holding a ww ticket upsets lockdep */
777 ret = submit_lock_objects(submit);
778 if (ret)
779 goto out;
780
781 if (!(args->flags & MSM_SUBMIT_NO_IMPLICIT)) {
782 ret = submit_fence_sync(submit);
783 if (ret)
784 goto out;
785 }
786
787 ret = submit_pin_objects(submit);
788 if (ret)
789 goto out;
790
791 for (i = 0; i < args->nr_cmds; i++) {
792 struct drm_gem_object *obj;
793 uint64_t iova;
794
795 ret = submit_bo(submit, submit->cmd[i].idx, &obj, &iova);
796 if (ret)
797 goto out;
798
799 if (!submit->cmd[i].size ||
800 (size_add(submit->cmd[i].size, submit->cmd[i].offset) > obj->size / 4)) {
801 SUBMIT_ERROR(submit, "invalid cmdstream size: %u\n", submit->cmd[i].size * 4);
802 ret = -EINVAL;
803 goto out;
804 }
805
806 submit->cmd[i].iova = iova + (submit->cmd[i].offset * 4);
807
808 if (likely(!submit->cmd[i].nr_relocs))
809 continue;
810
811 if (!gpu->allow_relocs) {
812 SUBMIT_ERROR(submit, "relocs not allowed\n");
813 ret = -EINVAL;
814 goto out;
815 }
816
817 ret = submit_reloc(submit, obj, submit->cmd[i].offset * 4,
818 submit->cmd[i].nr_relocs, submit->cmd[i].relocs);
819 if (ret)
820 goto out;
821 }
822
823 submit->nr_cmds = i;
824
825 idr_preload(GFP_KERNEL);
826
827 spin_lock(&queue->idr_lock);
828
829 /*
830 * If using userspace provided seqno fence, validate that the id
831 * is available before arming sched job. Since access to fence_idr
832 * is serialized on the queue lock, the slot should be still avail
833 * after the job is armed
834 */
835 if ((args->flags & MSM_SUBMIT_FENCE_SN_IN) &&
836 (!args->fence || idr_find(&queue->fence_idr, args->fence))) {
837 spin_unlock(&queue->idr_lock);
838 idr_preload_end();
839 ret = -EINVAL;
840 goto out;
841 }
842
843 drm_sched_job_arm(&submit->base);
844
845 submit->user_fence = dma_fence_get(&submit->base.s_fence->finished);
846
847 if (args->flags & MSM_SUBMIT_FENCE_SN_IN) {
848 /*
849 * Userspace has assigned the seqno fence that it wants
850 * us to use. It is an error to pick a fence sequence
851 * number that is not available.
852 */
853 submit->fence_id = args->fence;
854 ret = idr_alloc_u32(&queue->fence_idr, submit->user_fence,
855 &submit->fence_id, submit->fence_id,
856 GFP_NOWAIT);
857 /*
858 * We've already validated that the fence_id slot is valid,
859 * so if idr_alloc_u32 failed, it is a kernel bug
860 */
861 WARN_ON(ret);
862 } else {
863 /*
864 * Allocate an id which can be used by WAIT_FENCE ioctl to map
865 * back to the underlying fence.
866 */
867 submit->fence_id = idr_alloc_cyclic(&queue->fence_idr,
868 submit->user_fence, 1,
869 INT_MAX, GFP_NOWAIT);
870 }
871
872 spin_unlock(&queue->idr_lock);
873 idr_preload_end();
874
875 if (submit->fence_id < 0) {
876 ret = submit->fence_id;
877 submit->fence_id = 0;
878 }
879
880 if (ret == 0 && args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
881 sync_file = sync_file_create(submit->user_fence);
882 if (!sync_file)
883 ret = -ENOMEM;
884 }
885
886 if (ret)
887 goto out;
888
889 submit_attach_object_fences(submit);
890
891 /* The scheduler owns a ref now: */
892 msm_gem_submit_get(submit);
893
894 msm_rd_dump_submit(priv->rd, submit, NULL);
895
896 drm_sched_entity_push_job(&submit->base);
897
898 args->fence = submit->fence_id;
899 queue->last_fence = submit->fence_id;
900
901 msm_reset_syncobjs(syncobjs_to_reset, args->nr_in_syncobjs);
902 msm_process_post_deps(post_deps, args->nr_out_syncobjs,
903 submit->user_fence);
904
905
906 out:
907 submit_cleanup(submit, !!ret);
908 out_unlock:
909 mutex_unlock(&queue->lock);
910 out_post_unlock:
911 if (ret) {
912 if (out_fence_fd >= 0)
913 put_unused_fd(out_fence_fd);
914 if (sync_file)
915 fput(sync_file->file);
916 } else if (sync_file) {
917 fd_install(out_fence_fd, sync_file->file);
918 args->fence_fd = out_fence_fd;
919 }
920
921 if (!IS_ERR_OR_NULL(submit)) {
922 msm_gem_submit_put(submit);
923 } else {
924 /*
925 * If the submit hasn't yet taken ownership of the queue
926 * then we need to drop the reference ourself:
927 */
928 msm_submitqueue_put(queue);
929 }
930 if (!IS_ERR_OR_NULL(post_deps)) {
931 for (i = 0; i < args->nr_out_syncobjs; ++i) {
932 kfree(post_deps[i].chain);
933 drm_syncobj_put(post_deps[i].syncobj);
934 }
935 kfree(post_deps);
936 }
937
938 if (!IS_ERR_OR_NULL(syncobjs_to_reset)) {
939 for (i = 0; i < args->nr_in_syncobjs; ++i) {
940 if (syncobjs_to_reset[i])
941 drm_syncobj_put(syncobjs_to_reset[i]);
942 }
943 kfree(syncobjs_to_reset);
944 }
945
946 return ret;
947 }
948