• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2008 Ben Skeggs.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining
6  * a copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sublicense, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the
14  * next paragraph) shall be included in all copies or substantial
15  * portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20  * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24  *
25  */
26 
27 #include "nouveau_drm.h"
28 #include "nouveau_dma.h"
29 #include "nouveau_fence.h"
30 #include "nouveau_abi16.h"
31 
32 #include "nouveau_ttm.h"
33 #include "nouveau_gem.h"
34 
35 void
nouveau_gem_object_del(struct drm_gem_object * gem)36 nouveau_gem_object_del(struct drm_gem_object *gem)
37 {
38 	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
39 	struct ttm_buffer_object *bo = &nvbo->bo;
40 
41 	if (gem->import_attach)
42 		drm_prime_gem_destroy(gem, nvbo->bo.sg);
43 
44 	drm_gem_object_release(gem);
45 
46 	/* reset filp so nouveau_bo_del_ttm() can test for it */
47 	gem->filp = NULL;
48 	ttm_bo_unref(&bo);
49 }
50 
51 int
nouveau_gem_object_open(struct drm_gem_object * gem,struct drm_file * file_priv)52 nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
53 {
54 	struct nouveau_cli *cli = nouveau_cli(file_priv);
55 	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
56 	struct nouveau_vma *vma;
57 	int ret;
58 
59 	if (!cli->vm)
60 		return 0;
61 
62 	ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL);
63 	if (ret)
64 		return ret;
65 
66 	vma = nouveau_bo_vma_find(nvbo, cli->vm);
67 	if (!vma) {
68 		vma = kzalloc(sizeof(*vma), GFP_KERNEL);
69 		if (!vma) {
70 			ret = -ENOMEM;
71 			goto out;
72 		}
73 
74 		ret = nouveau_bo_vma_add(nvbo, cli->vm, vma);
75 		if (ret) {
76 			kfree(vma);
77 			goto out;
78 		}
79 	} else {
80 		vma->refcount++;
81 	}
82 
83 out:
84 	ttm_bo_unreserve(&nvbo->bo);
85 	return ret;
86 }
87 
88 static void
nouveau_gem_object_delete(void * data)89 nouveau_gem_object_delete(void *data)
90 {
91 	struct nouveau_vma *vma = data;
92 	nouveau_vm_unmap(vma);
93 	nouveau_vm_put(vma);
94 	kfree(vma);
95 }
96 
97 static void
nouveau_gem_object_unmap(struct nouveau_bo * nvbo,struct nouveau_vma * vma)98 nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
99 {
100 	const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM;
101 	struct reservation_object *resv = nvbo->bo.resv;
102 	struct reservation_object_list *fobj;
103 	struct fence *fence = NULL;
104 
105 	fobj = reservation_object_get_list(resv);
106 
107 	list_del(&vma->head);
108 
109 	if (fobj && fobj->shared_count > 1)
110 		ttm_bo_wait(&nvbo->bo, true, false, false);
111 	else if (fobj && fobj->shared_count == 1)
112 		fence = rcu_dereference_protected(fobj->shared[0],
113 						reservation_object_held(resv));
114 	else
115 		fence = reservation_object_get_excl(nvbo->bo.resv);
116 
117 	if (fence && mapped) {
118 		nouveau_fence_work(fence, nouveau_gem_object_delete, vma);
119 	} else {
120 		if (mapped)
121 			nouveau_vm_unmap(vma);
122 		nouveau_vm_put(vma);
123 		kfree(vma);
124 	}
125 }
126 
127 void
nouveau_gem_object_close(struct drm_gem_object * gem,struct drm_file * file_priv)128 nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
129 {
130 	struct nouveau_cli *cli = nouveau_cli(file_priv);
131 	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
132 	struct nouveau_vma *vma;
133 	int ret;
134 
135 	if (!cli->vm)
136 		return;
137 
138 	ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL);
139 	if (ret)
140 		return;
141 
142 	vma = nouveau_bo_vma_find(nvbo, cli->vm);
143 	if (vma) {
144 		if (--vma->refcount == 0)
145 			nouveau_gem_object_unmap(nvbo, vma);
146 	}
147 	ttm_bo_unreserve(&nvbo->bo);
148 }
149 
150 int
nouveau_gem_new(struct drm_device * dev,int size,int align,uint32_t domain,uint32_t tile_mode,uint32_t tile_flags,struct nouveau_bo ** pnvbo)151 nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
152 		uint32_t tile_mode, uint32_t tile_flags,
153 		struct nouveau_bo **pnvbo)
154 {
155 	struct nouveau_drm *drm = nouveau_drm(dev);
156 	struct nouveau_bo *nvbo;
157 	u32 flags = 0;
158 	int ret;
159 
160 	if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
161 		flags |= TTM_PL_FLAG_VRAM;
162 	if (domain & NOUVEAU_GEM_DOMAIN_GART)
163 		flags |= TTM_PL_FLAG_TT;
164 	if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU)
165 		flags |= TTM_PL_FLAG_SYSTEM;
166 
167 	ret = nouveau_bo_new(dev, size, align, flags, tile_mode,
168 			     tile_flags, NULL, NULL, pnvbo);
169 	if (ret)
170 		return ret;
171 	nvbo = *pnvbo;
172 
173 	/* we restrict allowed domains on nv50+ to only the types
174 	 * that were requested at creation time.  not possibly on
175 	 * earlier chips without busting the ABI.
176 	 */
177 	nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
178 			      NOUVEAU_GEM_DOMAIN_GART;
179 	if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
180 		nvbo->valid_domains &= domain;
181 
182 	/* Initialize the embedded gem-object. We return a single gem-reference
183 	 * to the caller, instead of a normal nouveau_bo ttm reference. */
184 	ret = drm_gem_object_init(dev, &nvbo->gem, nvbo->bo.mem.size);
185 	if (ret) {
186 		nouveau_bo_ref(NULL, pnvbo);
187 		return -ENOMEM;
188 	}
189 
190 	nvbo->bo.persistent_swap_storage = nvbo->gem.filp;
191 	return 0;
192 }
193 
194 static int
nouveau_gem_info(struct drm_file * file_priv,struct drm_gem_object * gem,struct drm_nouveau_gem_info * rep)195 nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
196 		 struct drm_nouveau_gem_info *rep)
197 {
198 	struct nouveau_cli *cli = nouveau_cli(file_priv);
199 	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
200 	struct nouveau_vma *vma;
201 
202 	if (is_power_of_2(nvbo->valid_domains))
203 		rep->domain = nvbo->valid_domains;
204 	else if (nvbo->bo.mem.mem_type == TTM_PL_TT)
205 		rep->domain = NOUVEAU_GEM_DOMAIN_GART;
206 	else
207 		rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
208 	rep->offset = nvbo->bo.offset;
209 	if (cli->vm) {
210 		vma = nouveau_bo_vma_find(nvbo, cli->vm);
211 		if (!vma)
212 			return -EINVAL;
213 
214 		rep->offset = vma->offset;
215 	}
216 
217 	rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
218 	rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.vma_node);
219 	rep->tile_mode = nvbo->tile_mode;
220 	rep->tile_flags = nvbo->tile_flags;
221 	return 0;
222 }
223 
224 int
nouveau_gem_ioctl_new(struct drm_device * dev,void * data,struct drm_file * file_priv)225 nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
226 		      struct drm_file *file_priv)
227 {
228 	struct nouveau_drm *drm = nouveau_drm(dev);
229 	struct nouveau_cli *cli = nouveau_cli(file_priv);
230 	struct nouveau_fb *pfb = nvkm_fb(&drm->device);
231 	struct drm_nouveau_gem_new *req = data;
232 	struct nouveau_bo *nvbo = NULL;
233 	int ret = 0;
234 
235 	if (!pfb->memtype_valid(pfb, req->info.tile_flags)) {
236 		NV_PRINTK(error, cli, "bad page flags: 0x%08x\n", req->info.tile_flags);
237 		return -EINVAL;
238 	}
239 
240 	ret = nouveau_gem_new(dev, req->info.size, req->align,
241 			      req->info.domain, req->info.tile_mode,
242 			      req->info.tile_flags, &nvbo);
243 	if (ret)
244 		return ret;
245 
246 	ret = drm_gem_handle_create(file_priv, &nvbo->gem, &req->info.handle);
247 	if (ret == 0) {
248 		ret = nouveau_gem_info(file_priv, &nvbo->gem, &req->info);
249 		if (ret)
250 			drm_gem_handle_delete(file_priv, req->info.handle);
251 	}
252 
253 	/* drop reference from allocate - handle holds it now */
254 	drm_gem_object_unreference_unlocked(&nvbo->gem);
255 	return ret;
256 }
257 
258 static int
nouveau_gem_set_domain(struct drm_gem_object * gem,uint32_t read_domains,uint32_t write_domains,uint32_t valid_domains)259 nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
260 		       uint32_t write_domains, uint32_t valid_domains)
261 {
262 	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
263 	struct ttm_buffer_object *bo = &nvbo->bo;
264 	uint32_t domains = valid_domains & nvbo->valid_domains &
265 		(write_domains ? write_domains : read_domains);
266 	uint32_t pref_flags = 0, valid_flags = 0;
267 
268 	if (!domains)
269 		return -EINVAL;
270 
271 	if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
272 		valid_flags |= TTM_PL_FLAG_VRAM;
273 
274 	if (valid_domains & NOUVEAU_GEM_DOMAIN_GART)
275 		valid_flags |= TTM_PL_FLAG_TT;
276 
277 	if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
278 	    bo->mem.mem_type == TTM_PL_VRAM)
279 		pref_flags |= TTM_PL_FLAG_VRAM;
280 
281 	else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
282 		 bo->mem.mem_type == TTM_PL_TT)
283 		pref_flags |= TTM_PL_FLAG_TT;
284 
285 	else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
286 		pref_flags |= TTM_PL_FLAG_VRAM;
287 
288 	else
289 		pref_flags |= TTM_PL_FLAG_TT;
290 
291 	nouveau_bo_placement_set(nvbo, pref_flags, valid_flags);
292 
293 	return 0;
294 }
295 
296 struct validate_op {
297 	struct list_head list;
298 	struct ww_acquire_ctx ticket;
299 };
300 
301 static void
validate_fini_no_ticket(struct validate_op * op,struct nouveau_fence * fence,struct drm_nouveau_gem_pushbuf_bo * pbbo)302 validate_fini_no_ticket(struct validate_op *op, struct nouveau_fence *fence,
303 			struct drm_nouveau_gem_pushbuf_bo *pbbo)
304 {
305 	struct nouveau_bo *nvbo;
306 	struct drm_nouveau_gem_pushbuf_bo *b;
307 
308 	while (!list_empty(&op->list)) {
309 		nvbo = list_entry(op->list.next, struct nouveau_bo, entry);
310 		b = &pbbo[nvbo->pbbo_index];
311 
312 		if (likely(fence))
313 			nouveau_bo_fence(nvbo, fence, !!b->write_domains);
314 
315 		if (unlikely(nvbo->validate_mapped)) {
316 			ttm_bo_kunmap(&nvbo->kmap);
317 			nvbo->validate_mapped = false;
318 		}
319 
320 		list_del(&nvbo->entry);
321 		nvbo->reserved_by = NULL;
322 		ttm_bo_unreserve_ticket(&nvbo->bo, &op->ticket);
323 		drm_gem_object_unreference_unlocked(&nvbo->gem);
324 	}
325 }
326 
327 static void
validate_fini(struct validate_op * op,struct nouveau_fence * fence,struct drm_nouveau_gem_pushbuf_bo * pbbo)328 validate_fini(struct validate_op *op, struct nouveau_fence *fence,
329 	      struct drm_nouveau_gem_pushbuf_bo *pbbo)
330 {
331 	validate_fini_no_ticket(op, fence, pbbo);
332 	ww_acquire_fini(&op->ticket);
333 }
334 
335 static int
validate_init(struct nouveau_channel * chan,struct drm_file * file_priv,struct drm_nouveau_gem_pushbuf_bo * pbbo,int nr_buffers,struct validate_op * op)336 validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
337 	      struct drm_nouveau_gem_pushbuf_bo *pbbo,
338 	      int nr_buffers, struct validate_op *op)
339 {
340 	struct nouveau_cli *cli = nouveau_cli(file_priv);
341 	struct drm_device *dev = chan->drm->dev;
342 	int trycnt = 0;
343 	int ret, i;
344 	struct nouveau_bo *res_bo = NULL;
345 	LIST_HEAD(gart_list);
346 	LIST_HEAD(vram_list);
347 	LIST_HEAD(both_list);
348 
349 	ww_acquire_init(&op->ticket, &reservation_ww_class);
350 retry:
351 	if (++trycnt > 100000) {
352 		NV_PRINTK(error, cli, "%s failed and gave up.\n", __func__);
353 		return -EINVAL;
354 	}
355 
356 	for (i = 0; i < nr_buffers; i++) {
357 		struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i];
358 		struct drm_gem_object *gem;
359 		struct nouveau_bo *nvbo;
360 
361 		gem = drm_gem_object_lookup(dev, file_priv, b->handle);
362 		if (!gem) {
363 			NV_PRINTK(error, cli, "Unknown handle 0x%08x\n", b->handle);
364 			ret = -ENOENT;
365 			break;
366 		}
367 		nvbo = nouveau_gem_object(gem);
368 		if (nvbo == res_bo) {
369 			res_bo = NULL;
370 			drm_gem_object_unreference_unlocked(gem);
371 			continue;
372 		}
373 
374 		if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
375 			NV_PRINTK(error, cli, "multiple instances of buffer %d on "
376 				      "validation list\n", b->handle);
377 			drm_gem_object_unreference_unlocked(gem);
378 			ret = -EINVAL;
379 			break;
380 		}
381 
382 		ret = ttm_bo_reserve(&nvbo->bo, true, false, true, &op->ticket);
383 		if (ret) {
384 			list_splice_tail_init(&vram_list, &op->list);
385 			list_splice_tail_init(&gart_list, &op->list);
386 			list_splice_tail_init(&both_list, &op->list);
387 			validate_fini_no_ticket(op, NULL, NULL);
388 			if (unlikely(ret == -EDEADLK)) {
389 				ret = ttm_bo_reserve_slowpath(&nvbo->bo, true,
390 							      &op->ticket);
391 				if (!ret)
392 					res_bo = nvbo;
393 			}
394 			if (unlikely(ret)) {
395 				if (ret != -ERESTARTSYS)
396 					NV_PRINTK(error, cli, "fail reserve\n");
397 				break;
398 			}
399 		}
400 
401 		b->user_priv = (uint64_t)(unsigned long)nvbo;
402 		nvbo->reserved_by = file_priv;
403 		nvbo->pbbo_index = i;
404 		if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
405 		    (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
406 			list_add_tail(&nvbo->entry, &both_list);
407 		else
408 		if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
409 			list_add_tail(&nvbo->entry, &vram_list);
410 		else
411 		if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
412 			list_add_tail(&nvbo->entry, &gart_list);
413 		else {
414 			NV_PRINTK(error, cli, "invalid valid domains: 0x%08x\n",
415 				 b->valid_domains);
416 			list_add_tail(&nvbo->entry, &both_list);
417 			ret = -EINVAL;
418 			break;
419 		}
420 		if (nvbo == res_bo)
421 			goto retry;
422 	}
423 
424 	ww_acquire_done(&op->ticket);
425 	list_splice_tail(&vram_list, &op->list);
426 	list_splice_tail(&gart_list, &op->list);
427 	list_splice_tail(&both_list, &op->list);
428 	if (ret)
429 		validate_fini(op, NULL, NULL);
430 	return ret;
431 
432 }
433 
434 static int
validate_list(struct nouveau_channel * chan,struct nouveau_cli * cli,struct list_head * list,struct drm_nouveau_gem_pushbuf_bo * pbbo,uint64_t user_pbbo_ptr)435 validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
436 	      struct list_head *list, struct drm_nouveau_gem_pushbuf_bo *pbbo,
437 	      uint64_t user_pbbo_ptr)
438 {
439 	struct nouveau_drm *drm = chan->drm;
440 	struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
441 				(void __force __user *)(uintptr_t)user_pbbo_ptr;
442 	struct nouveau_bo *nvbo;
443 	int ret, relocs = 0;
444 
445 	list_for_each_entry(nvbo, list, entry) {
446 		struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
447 
448 		ret = nouveau_gem_set_domain(&nvbo->gem, b->read_domains,
449 					     b->write_domains,
450 					     b->valid_domains);
451 		if (unlikely(ret)) {
452 			NV_PRINTK(error, cli, "fail set_domain\n");
453 			return ret;
454 		}
455 
456 		ret = nouveau_bo_validate(nvbo, true, false);
457 		if (unlikely(ret)) {
458 			if (ret != -ERESTARTSYS)
459 				NV_PRINTK(error, cli, "fail ttm_validate\n");
460 			return ret;
461 		}
462 
463 		ret = nouveau_fence_sync(nvbo, chan, !!b->write_domains, true);
464 		if (unlikely(ret)) {
465 			if (ret != -ERESTARTSYS)
466 				NV_PRINTK(error, cli, "fail post-validate sync\n");
467 			return ret;
468 		}
469 
470 		if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
471 			if (nvbo->bo.offset == b->presumed.offset &&
472 			    ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
473 			      b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
474 			     (nvbo->bo.mem.mem_type == TTM_PL_TT &&
475 			      b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
476 				continue;
477 
478 			if (nvbo->bo.mem.mem_type == TTM_PL_TT)
479 				b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
480 			else
481 				b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
482 			b->presumed.offset = nvbo->bo.offset;
483 			b->presumed.valid = 0;
484 			relocs++;
485 
486 			if (copy_to_user(&upbbo[nvbo->pbbo_index].presumed,
487 					     &b->presumed, sizeof(b->presumed)))
488 				return -EFAULT;
489 		}
490 	}
491 
492 	return relocs;
493 }
494 
495 static int
nouveau_gem_pushbuf_validate(struct nouveau_channel * chan,struct drm_file * file_priv,struct drm_nouveau_gem_pushbuf_bo * pbbo,uint64_t user_buffers,int nr_buffers,struct validate_op * op,int * apply_relocs)496 nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
497 			     struct drm_file *file_priv,
498 			     struct drm_nouveau_gem_pushbuf_bo *pbbo,
499 			     uint64_t user_buffers, int nr_buffers,
500 			     struct validate_op *op, int *apply_relocs)
501 {
502 	struct nouveau_cli *cli = nouveau_cli(file_priv);
503 	int ret;
504 
505 	INIT_LIST_HEAD(&op->list);
506 
507 	if (nr_buffers == 0)
508 		return 0;
509 
510 	ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
511 	if (unlikely(ret)) {
512 		if (ret != -ERESTARTSYS)
513 			NV_PRINTK(error, cli, "validate_init\n");
514 		return ret;
515 	}
516 
517 	ret = validate_list(chan, cli, &op->list, pbbo, user_buffers);
518 	if (unlikely(ret < 0)) {
519 		if (ret != -ERESTARTSYS)
520 			NV_PRINTK(error, cli, "validating bo list\n");
521 		validate_fini(op, NULL, NULL);
522 		return ret;
523 	}
524 	*apply_relocs = ret;
525 	return 0;
526 }
527 
528 static inline void
u_free(void * addr)529 u_free(void *addr)
530 {
531 	if (!is_vmalloc_addr(addr))
532 		kfree(addr);
533 	else
534 		vfree(addr);
535 }
536 
537 static inline void *
u_memcpya(uint64_t user,unsigned nmemb,unsigned size)538 u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
539 {
540 	void *mem;
541 	void __user *userptr = (void __force __user *)(uintptr_t)user;
542 
543 	size *= nmemb;
544 
545 	mem = kmalloc(size, GFP_KERNEL | __GFP_NOWARN);
546 	if (!mem)
547 		mem = vmalloc(size);
548 	if (!mem)
549 		return ERR_PTR(-ENOMEM);
550 
551 	if (copy_from_user(mem, userptr, size)) {
552 		u_free(mem);
553 		return ERR_PTR(-EFAULT);
554 	}
555 
556 	return mem;
557 }
558 
559 static int
nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli * cli,struct drm_nouveau_gem_pushbuf * req,struct drm_nouveau_gem_pushbuf_bo * bo)560 nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
561 				struct drm_nouveau_gem_pushbuf *req,
562 				struct drm_nouveau_gem_pushbuf_bo *bo)
563 {
564 	struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
565 	int ret = 0;
566 	unsigned i;
567 
568 	reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
569 	if (IS_ERR(reloc))
570 		return PTR_ERR(reloc);
571 
572 	for (i = 0; i < req->nr_relocs; i++) {
573 		struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
574 		struct drm_nouveau_gem_pushbuf_bo *b;
575 		struct nouveau_bo *nvbo;
576 		uint32_t data;
577 
578 		if (unlikely(r->bo_index > req->nr_buffers)) {
579 			NV_PRINTK(error, cli, "reloc bo index invalid\n");
580 			ret = -EINVAL;
581 			break;
582 		}
583 
584 		b = &bo[r->bo_index];
585 		if (b->presumed.valid)
586 			continue;
587 
588 		if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
589 			NV_PRINTK(error, cli, "reloc container bo index invalid\n");
590 			ret = -EINVAL;
591 			break;
592 		}
593 		nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;
594 
595 		if (unlikely(r->reloc_bo_offset + 4 >
596 			     nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
597 			NV_PRINTK(error, cli, "reloc outside of bo\n");
598 			ret = -EINVAL;
599 			break;
600 		}
601 
602 		if (!nvbo->kmap.virtual) {
603 			ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
604 					  &nvbo->kmap);
605 			if (ret) {
606 				NV_PRINTK(error, cli, "failed kmap for reloc\n");
607 				break;
608 			}
609 			nvbo->validate_mapped = true;
610 		}
611 
612 		if (r->flags & NOUVEAU_GEM_RELOC_LOW)
613 			data = b->presumed.offset + r->data;
614 		else
615 		if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
616 			data = (b->presumed.offset + r->data) >> 32;
617 		else
618 			data = r->data;
619 
620 		if (r->flags & NOUVEAU_GEM_RELOC_OR) {
621 			if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART)
622 				data |= r->tor;
623 			else
624 				data |= r->vor;
625 		}
626 
627 		ret = ttm_bo_wait(&nvbo->bo, true, false, false);
628 		if (ret) {
629 			NV_PRINTK(error, cli, "reloc wait_idle failed: %d\n", ret);
630 			break;
631 		}
632 
633 		nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
634 	}
635 
636 	u_free(reloc);
637 	return ret;
638 }
639 
640 int
nouveau_gem_ioctl_pushbuf(struct drm_device * dev,void * data,struct drm_file * file_priv)641 nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
642 			  struct drm_file *file_priv)
643 {
644 	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
645 	struct nouveau_cli *cli = nouveau_cli(file_priv);
646 	struct nouveau_abi16_chan *temp;
647 	struct nouveau_drm *drm = nouveau_drm(dev);
648 	struct drm_nouveau_gem_pushbuf *req = data;
649 	struct drm_nouveau_gem_pushbuf_push *push;
650 	struct drm_nouveau_gem_pushbuf_bo *bo;
651 	struct nouveau_channel *chan = NULL;
652 	struct validate_op op;
653 	struct nouveau_fence *fence = NULL;
654 	int i, j, ret = 0, do_reloc = 0;
655 
656 	if (unlikely(!abi16))
657 		return -ENOMEM;
658 
659 	list_for_each_entry(temp, &abi16->channels, head) {
660 		if (temp->chan->object->handle == (NVDRM_CHAN | req->channel)) {
661 			chan = temp->chan;
662 			break;
663 		}
664 	}
665 
666 	if (!chan)
667 		return nouveau_abi16_put(abi16, -ENOENT);
668 
669 	req->vram_available = drm->gem.vram_available;
670 	req->gart_available = drm->gem.gart_available;
671 	if (unlikely(req->nr_push == 0))
672 		goto out_next;
673 
674 	if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
675 		NV_PRINTK(error, cli, "pushbuf push count exceeds limit: %d max %d\n",
676 			 req->nr_push, NOUVEAU_GEM_MAX_PUSH);
677 		return nouveau_abi16_put(abi16, -EINVAL);
678 	}
679 
680 	if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
681 		NV_PRINTK(error, cli, "pushbuf bo count exceeds limit: %d max %d\n",
682 			 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
683 		return nouveau_abi16_put(abi16, -EINVAL);
684 	}
685 
686 	if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
687 		NV_PRINTK(error, cli, "pushbuf reloc count exceeds limit: %d max %d\n",
688 			 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
689 		return nouveau_abi16_put(abi16, -EINVAL);
690 	}
691 
692 	push = u_memcpya(req->push, req->nr_push, sizeof(*push));
693 	if (IS_ERR(push))
694 		return nouveau_abi16_put(abi16, PTR_ERR(push));
695 
696 	bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
697 	if (IS_ERR(bo)) {
698 		u_free(push);
699 		return nouveau_abi16_put(abi16, PTR_ERR(bo));
700 	}
701 
702 	/* Ensure all push buffers are on validate list */
703 	for (i = 0; i < req->nr_push; i++) {
704 		if (push[i].bo_index >= req->nr_buffers) {
705 			NV_PRINTK(error, cli, "push %d buffer not in list\n", i);
706 			ret = -EINVAL;
707 			goto out_prevalid;
708 		}
709 	}
710 
711 	/* Validate buffer list */
712 	ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
713 					   req->nr_buffers, &op, &do_reloc);
714 	if (ret) {
715 		if (ret != -ERESTARTSYS)
716 			NV_PRINTK(error, cli, "validate: %d\n", ret);
717 		goto out_prevalid;
718 	}
719 
720 	/* Apply any relocations that are required */
721 	if (do_reloc) {
722 		ret = nouveau_gem_pushbuf_reloc_apply(cli, req, bo);
723 		if (ret) {
724 			NV_PRINTK(error, cli, "reloc apply: %d\n", ret);
725 			goto out;
726 		}
727 	}
728 
729 	if (chan->dma.ib_max) {
730 		ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
731 		if (ret) {
732 			NV_PRINTK(error, cli, "nv50cal_space: %d\n", ret);
733 			goto out;
734 		}
735 
736 		for (i = 0; i < req->nr_push; i++) {
737 			struct nouveau_bo *nvbo = (void *)(unsigned long)
738 				bo[push[i].bo_index].user_priv;
739 
740 			nv50_dma_push(chan, nvbo, push[i].offset,
741 				      push[i].length);
742 		}
743 	} else
744 	if (drm->device.info.chipset >= 0x25) {
745 		ret = RING_SPACE(chan, req->nr_push * 2);
746 		if (ret) {
747 			NV_PRINTK(error, cli, "cal_space: %d\n", ret);
748 			goto out;
749 		}
750 
751 		for (i = 0; i < req->nr_push; i++) {
752 			struct nouveau_bo *nvbo = (void *)(unsigned long)
753 				bo[push[i].bo_index].user_priv;
754 
755 			OUT_RING(chan, (nvbo->bo.offset + push[i].offset) | 2);
756 			OUT_RING(chan, 0);
757 		}
758 	} else {
759 		ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
760 		if (ret) {
761 			NV_PRINTK(error, cli, "jmp_space: %d\n", ret);
762 			goto out;
763 		}
764 
765 		for (i = 0; i < req->nr_push; i++) {
766 			struct nouveau_bo *nvbo = (void *)(unsigned long)
767 				bo[push[i].bo_index].user_priv;
768 			uint32_t cmd;
769 
770 			cmd = chan->push.vma.offset + ((chan->dma.cur + 2) << 2);
771 			cmd |= 0x20000000;
772 			if (unlikely(cmd != req->suffix0)) {
773 				if (!nvbo->kmap.virtual) {
774 					ret = ttm_bo_kmap(&nvbo->bo, 0,
775 							  nvbo->bo.mem.
776 							  num_pages,
777 							  &nvbo->kmap);
778 					if (ret) {
779 						WIND_RING(chan);
780 						goto out;
781 					}
782 					nvbo->validate_mapped = true;
783 				}
784 
785 				nouveau_bo_wr32(nvbo, (push[i].offset +
786 						push[i].length - 8) / 4, cmd);
787 			}
788 
789 			OUT_RING(chan, 0x20000000 |
790 				      (nvbo->bo.offset + push[i].offset));
791 			OUT_RING(chan, 0);
792 			for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
793 				OUT_RING(chan, 0);
794 		}
795 	}
796 
797 	ret = nouveau_fence_new(chan, false, &fence);
798 	if (ret) {
799 		NV_PRINTK(error, cli, "error fencing pushbuf: %d\n", ret);
800 		WIND_RING(chan);
801 		goto out;
802 	}
803 
804 out:
805 	validate_fini(&op, fence, bo);
806 	nouveau_fence_unref(&fence);
807 
808 out_prevalid:
809 	u_free(bo);
810 	u_free(push);
811 
812 out_next:
813 	if (chan->dma.ib_max) {
814 		req->suffix0 = 0x00000000;
815 		req->suffix1 = 0x00000000;
816 	} else
817 	if (drm->device.info.chipset >= 0x25) {
818 		req->suffix0 = 0x00020000;
819 		req->suffix1 = 0x00000000;
820 	} else {
821 		req->suffix0 = 0x20000000 |
822 			      (chan->push.vma.offset + ((chan->dma.cur + 2) << 2));
823 		req->suffix1 = 0x00000000;
824 	}
825 
826 	return nouveau_abi16_put(abi16, ret);
827 }
828 
829 static inline uint32_t
domain_to_ttm(struct nouveau_bo * nvbo,uint32_t domain)830 domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain)
831 {
832 	uint32_t flags = 0;
833 
834 	if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
835 		flags |= TTM_PL_FLAG_VRAM;
836 	if (domain & NOUVEAU_GEM_DOMAIN_GART)
837 		flags |= TTM_PL_FLAG_TT;
838 
839 	return flags;
840 }
841 
842 int
nouveau_gem_ioctl_cpu_prep(struct drm_device * dev,void * data,struct drm_file * file_priv)843 nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
844 			   struct drm_file *file_priv)
845 {
846 	struct drm_nouveau_gem_cpu_prep *req = data;
847 	struct drm_gem_object *gem;
848 	struct nouveau_bo *nvbo;
849 	bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
850 	bool write = !!(req->flags & NOUVEAU_GEM_CPU_PREP_WRITE);
851 	int ret;
852 
853 	gem = drm_gem_object_lookup(dev, file_priv, req->handle);
854 	if (!gem)
855 		return -ENOENT;
856 	nvbo = nouveau_gem_object(gem);
857 
858 	if (no_wait)
859 		ret = reservation_object_test_signaled_rcu(nvbo->bo.resv, write) ? 0 : -EBUSY;
860 	else {
861 		long lret;
862 
863 		lret = reservation_object_wait_timeout_rcu(nvbo->bo.resv, write, true, 30 * HZ);
864 		if (!lret)
865 			ret = -EBUSY;
866 		else if (lret > 0)
867 			ret = 0;
868 		else
869 			ret = lret;
870 	}
871 	drm_gem_object_unreference_unlocked(gem);
872 
873 	return ret;
874 }
875 
876 int
nouveau_gem_ioctl_cpu_fini(struct drm_device * dev,void * data,struct drm_file * file_priv)877 nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
878 			   struct drm_file *file_priv)
879 {
880 	return 0;
881 }
882 
883 int
nouveau_gem_ioctl_info(struct drm_device * dev,void * data,struct drm_file * file_priv)884 nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
885 		       struct drm_file *file_priv)
886 {
887 	struct drm_nouveau_gem_info *req = data;
888 	struct drm_gem_object *gem;
889 	int ret;
890 
891 	gem = drm_gem_object_lookup(dev, file_priv, req->handle);
892 	if (!gem)
893 		return -ENOENT;
894 
895 	ret = nouveau_gem_info(file_priv, gem, req);
896 	drm_gem_object_unreference_unlocked(gem);
897 	return ret;
898 }
899 
900