• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2008 Ben Skeggs.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining
6  * a copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sublicense, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the
14  * next paragraph) shall be included in all copies or substantial
15  * portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20  * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24  *
25  */
26 
27 #include <subdev/fb.h>
28 
29 #include "nouveau_drm.h"
30 #include "nouveau_dma.h"
31 #include "nouveau_fence.h"
32 #include "nouveau_abi16.h"
33 
34 #include "nouveau_ttm.h"
35 #include "nouveau_gem.h"
36 
37 int
nouveau_gem_object_new(struct drm_gem_object * gem)38 nouveau_gem_object_new(struct drm_gem_object *gem)
39 {
40 	return 0;
41 }
42 
43 void
nouveau_gem_object_del(struct drm_gem_object * gem)44 nouveau_gem_object_del(struct drm_gem_object *gem)
45 {
46 	struct nouveau_bo *nvbo = gem->driver_private;
47 	struct ttm_buffer_object *bo = &nvbo->bo;
48 
49 	if (!nvbo)
50 		return;
51 	nvbo->gem = NULL;
52 
53 	if (unlikely(nvbo->pin_refcnt)) {
54 		nvbo->pin_refcnt = 1;
55 		nouveau_bo_unpin(nvbo);
56 	}
57 
58 	if (gem->import_attach)
59 		drm_prime_gem_destroy(gem, nvbo->bo.sg);
60 
61 	ttm_bo_unref(&bo);
62 
63 	drm_gem_object_release(gem);
64 	kfree(gem);
65 }
66 
67 int
nouveau_gem_object_open(struct drm_gem_object * gem,struct drm_file * file_priv)68 nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
69 {
70 	struct nouveau_cli *cli = nouveau_cli(file_priv);
71 	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
72 	struct nouveau_vma *vma;
73 	int ret;
74 
75 	if (!cli->base.vm)
76 		return 0;
77 
78 	ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
79 	if (ret)
80 		return ret;
81 
82 	vma = nouveau_bo_vma_find(nvbo, cli->base.vm);
83 	if (!vma) {
84 		vma = kzalloc(sizeof(*vma), GFP_KERNEL);
85 		if (!vma) {
86 			ret = -ENOMEM;
87 			goto out;
88 		}
89 
90 		ret = nouveau_bo_vma_add(nvbo, cli->base.vm, vma);
91 		if (ret) {
92 			kfree(vma);
93 			goto out;
94 		}
95 	} else {
96 		vma->refcount++;
97 	}
98 
99 out:
100 	ttm_bo_unreserve(&nvbo->bo);
101 	return ret;
102 }
103 
104 void
nouveau_gem_object_close(struct drm_gem_object * gem,struct drm_file * file_priv)105 nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
106 {
107 	struct nouveau_cli *cli = nouveau_cli(file_priv);
108 	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
109 	struct nouveau_vma *vma;
110 	int ret;
111 
112 	if (!cli->base.vm)
113 		return;
114 
115 	ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
116 	if (ret)
117 		return;
118 
119 	vma = nouveau_bo_vma_find(nvbo, cli->base.vm);
120 	if (vma) {
121 		if (--vma->refcount == 0) {
122 			nouveau_bo_vma_del(nvbo, vma);
123 			kfree(vma);
124 		}
125 	}
126 	ttm_bo_unreserve(&nvbo->bo);
127 }
128 
129 int
nouveau_gem_new(struct drm_device * dev,int size,int align,uint32_t domain,uint32_t tile_mode,uint32_t tile_flags,struct nouveau_bo ** pnvbo)130 nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
131 		uint32_t tile_mode, uint32_t tile_flags,
132 		struct nouveau_bo **pnvbo)
133 {
134 	struct nouveau_drm *drm = nouveau_drm(dev);
135 	struct nouveau_bo *nvbo;
136 	u32 flags = 0;
137 	int ret;
138 
139 	if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
140 		flags |= TTM_PL_FLAG_VRAM;
141 	if (domain & NOUVEAU_GEM_DOMAIN_GART)
142 		flags |= TTM_PL_FLAG_TT;
143 	if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU)
144 		flags |= TTM_PL_FLAG_SYSTEM;
145 
146 	ret = nouveau_bo_new(dev, size, align, flags, tile_mode,
147 			     tile_flags, NULL, pnvbo);
148 	if (ret)
149 		return ret;
150 	nvbo = *pnvbo;
151 
152 	/* we restrict allowed domains on nv50+ to only the types
153 	 * that were requested at creation time.  not possibly on
154 	 * earlier chips without busting the ABI.
155 	 */
156 	nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
157 			      NOUVEAU_GEM_DOMAIN_GART;
158 	if (nv_device(drm->device)->card_type >= NV_50)
159 		nvbo->valid_domains &= domain;
160 
161 	nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
162 	if (!nvbo->gem) {
163 		nouveau_bo_ref(NULL, pnvbo);
164 		return -ENOMEM;
165 	}
166 
167 	nvbo->bo.persistent_swap_storage = nvbo->gem->filp;
168 	nvbo->gem->driver_private = nvbo;
169 	return 0;
170 }
171 
172 static int
nouveau_gem_info(struct drm_file * file_priv,struct drm_gem_object * gem,struct drm_nouveau_gem_info * rep)173 nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
174 		 struct drm_nouveau_gem_info *rep)
175 {
176 	struct nouveau_cli *cli = nouveau_cli(file_priv);
177 	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
178 	struct nouveau_vma *vma;
179 
180 	if (nvbo->bo.mem.mem_type == TTM_PL_TT)
181 		rep->domain = NOUVEAU_GEM_DOMAIN_GART;
182 	else
183 		rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
184 
185 	rep->offset = nvbo->bo.offset;
186 	if (cli->base.vm) {
187 		vma = nouveau_bo_vma_find(nvbo, cli->base.vm);
188 		if (!vma)
189 			return -EINVAL;
190 
191 		rep->offset = vma->offset;
192 	}
193 
194 	rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
195 	rep->map_handle = nvbo->bo.addr_space_offset;
196 	rep->tile_mode = nvbo->tile_mode;
197 	rep->tile_flags = nvbo->tile_flags;
198 	return 0;
199 }
200 
201 int
nouveau_gem_ioctl_new(struct drm_device * dev,void * data,struct drm_file * file_priv)202 nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
203 		      struct drm_file *file_priv)
204 {
205 	struct nouveau_drm *drm = nouveau_drm(dev);
206 	struct nouveau_cli *cli = nouveau_cli(file_priv);
207 	struct nouveau_fb *pfb = nouveau_fb(drm->device);
208 	struct drm_nouveau_gem_new *req = data;
209 	struct nouveau_bo *nvbo = NULL;
210 	int ret = 0;
211 
212 	drm->ttm.bdev.dev_mapping = drm->dev->dev_mapping;
213 
214 	if (!pfb->memtype_valid(pfb, req->info.tile_flags)) {
215 		NV_ERROR(cli, "bad page flags: 0x%08x\n", req->info.tile_flags);
216 		return -EINVAL;
217 	}
218 
219 	ret = nouveau_gem_new(dev, req->info.size, req->align,
220 			      req->info.domain, req->info.tile_mode,
221 			      req->info.tile_flags, &nvbo);
222 	if (ret)
223 		return ret;
224 
225 	ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);
226 	if (ret == 0) {
227 		ret = nouveau_gem_info(file_priv, nvbo->gem, &req->info);
228 		if (ret)
229 			drm_gem_handle_delete(file_priv, req->info.handle);
230 	}
231 
232 	/* drop reference from allocate - handle holds it now */
233 	drm_gem_object_unreference_unlocked(nvbo->gem);
234 	return ret;
235 }
236 
237 static int
nouveau_gem_set_domain(struct drm_gem_object * gem,uint32_t read_domains,uint32_t write_domains,uint32_t valid_domains)238 nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
239 		       uint32_t write_domains, uint32_t valid_domains)
240 {
241 	struct nouveau_bo *nvbo = gem->driver_private;
242 	struct ttm_buffer_object *bo = &nvbo->bo;
243 	uint32_t domains = valid_domains & nvbo->valid_domains &
244 		(write_domains ? write_domains : read_domains);
245 	uint32_t pref_flags = 0, valid_flags = 0;
246 
247 	if (!domains)
248 		return -EINVAL;
249 
250 	if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
251 		valid_flags |= TTM_PL_FLAG_VRAM;
252 
253 	if (valid_domains & NOUVEAU_GEM_DOMAIN_GART)
254 		valid_flags |= TTM_PL_FLAG_TT;
255 
256 	if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
257 	    bo->mem.mem_type == TTM_PL_VRAM)
258 		pref_flags |= TTM_PL_FLAG_VRAM;
259 
260 	else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
261 		 bo->mem.mem_type == TTM_PL_TT)
262 		pref_flags |= TTM_PL_FLAG_TT;
263 
264 	else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
265 		pref_flags |= TTM_PL_FLAG_VRAM;
266 
267 	else
268 		pref_flags |= TTM_PL_FLAG_TT;
269 
270 	nouveau_bo_placement_set(nvbo, pref_flags, valid_flags);
271 
272 	return 0;
273 }
274 
275 struct validate_op {
276 	struct list_head vram_list;
277 	struct list_head gart_list;
278 	struct list_head both_list;
279 };
280 
281 static void
validate_fini_list(struct list_head * list,struct nouveau_fence * fence)282 validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
283 {
284 	struct list_head *entry, *tmp;
285 	struct nouveau_bo *nvbo;
286 
287 	list_for_each_safe(entry, tmp, list) {
288 		nvbo = list_entry(entry, struct nouveau_bo, entry);
289 
290 		nouveau_bo_fence(nvbo, fence);
291 
292 		if (unlikely(nvbo->validate_mapped)) {
293 			ttm_bo_kunmap(&nvbo->kmap);
294 			nvbo->validate_mapped = false;
295 		}
296 
297 		list_del(&nvbo->entry);
298 		nvbo->reserved_by = NULL;
299 		ttm_bo_unreserve(&nvbo->bo);
300 		drm_gem_object_unreference_unlocked(nvbo->gem);
301 	}
302 }
303 
304 static void
validate_fini(struct validate_op * op,struct nouveau_fence * fence)305 validate_fini(struct validate_op *op, struct nouveau_fence* fence)
306 {
307 	validate_fini_list(&op->vram_list, fence);
308 	validate_fini_list(&op->gart_list, fence);
309 	validate_fini_list(&op->both_list, fence);
310 }
311 
312 static int
validate_init(struct nouveau_channel * chan,struct drm_file * file_priv,struct drm_nouveau_gem_pushbuf_bo * pbbo,int nr_buffers,struct validate_op * op)313 validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
314 	      struct drm_nouveau_gem_pushbuf_bo *pbbo,
315 	      int nr_buffers, struct validate_op *op)
316 {
317 	struct nouveau_cli *cli = nouveau_cli(file_priv);
318 	struct drm_device *dev = chan->drm->dev;
319 	struct nouveau_drm *drm = nouveau_drm(dev);
320 	uint32_t sequence;
321 	int trycnt = 0;
322 	int ret, i;
323 	struct nouveau_bo *res_bo = NULL;
324 
325 	sequence = atomic_add_return(1, &drm->ttm.validate_sequence);
326 retry:
327 	if (++trycnt > 100000) {
328 		NV_ERROR(cli, "%s failed and gave up.\n", __func__);
329 		return -EINVAL;
330 	}
331 
332 	for (i = 0; i < nr_buffers; i++) {
333 		struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i];
334 		struct drm_gem_object *gem;
335 		struct nouveau_bo *nvbo;
336 
337 		gem = drm_gem_object_lookup(dev, file_priv, b->handle);
338 		if (!gem) {
339 			NV_ERROR(cli, "Unknown handle 0x%08x\n", b->handle);
340 			validate_fini(op, NULL);
341 			return -ENOENT;
342 		}
343 		nvbo = gem->driver_private;
344 		if (nvbo == res_bo) {
345 			res_bo = NULL;
346 			drm_gem_object_unreference_unlocked(gem);
347 			continue;
348 		}
349 
350 		if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
351 			NV_ERROR(cli, "multiple instances of buffer %d on "
352 				      "validation list\n", b->handle);
353 			drm_gem_object_unreference_unlocked(gem);
354 			validate_fini(op, NULL);
355 			return -EINVAL;
356 		}
357 
358 		ret = ttm_bo_reserve(&nvbo->bo, true, false, true, sequence);
359 		if (ret) {
360 			validate_fini(op, NULL);
361 			if (unlikely(ret == -EAGAIN)) {
362 				sequence = atomic_add_return(1, &drm->ttm.validate_sequence);
363 				ret = ttm_bo_reserve_slowpath(&nvbo->bo, true,
364 							      sequence);
365 				if (!ret)
366 					res_bo = nvbo;
367 			}
368 			if (unlikely(ret)) {
369 				drm_gem_object_unreference_unlocked(gem);
370 				if (ret != -ERESTARTSYS)
371 					NV_ERROR(cli, "fail reserve\n");
372 				return ret;
373 			}
374 		}
375 
376 		b->user_priv = (uint64_t)(unsigned long)nvbo;
377 		nvbo->reserved_by = file_priv;
378 		nvbo->pbbo_index = i;
379 		if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
380 		    (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
381 			list_add_tail(&nvbo->entry, &op->both_list);
382 		else
383 		if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
384 			list_add_tail(&nvbo->entry, &op->vram_list);
385 		else
386 		if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
387 			list_add_tail(&nvbo->entry, &op->gart_list);
388 		else {
389 			NV_ERROR(cli, "invalid valid domains: 0x%08x\n",
390 				 b->valid_domains);
391 			list_add_tail(&nvbo->entry, &op->both_list);
392 			validate_fini(op, NULL);
393 			return -EINVAL;
394 		}
395 		if (nvbo == res_bo)
396 			goto retry;
397 	}
398 
399 	return 0;
400 }
401 
402 static int
validate_sync(struct nouveau_channel * chan,struct nouveau_bo * nvbo)403 validate_sync(struct nouveau_channel *chan, struct nouveau_bo *nvbo)
404 {
405 	struct nouveau_fence *fence = NULL;
406 	int ret = 0;
407 
408 	spin_lock(&nvbo->bo.bdev->fence_lock);
409 	if (nvbo->bo.sync_obj)
410 		fence = nouveau_fence_ref(nvbo->bo.sync_obj);
411 	spin_unlock(&nvbo->bo.bdev->fence_lock);
412 
413 	if (fence) {
414 		ret = nouveau_fence_sync(fence, chan);
415 		nouveau_fence_unref(&fence);
416 	}
417 
418 	return ret;
419 }
420 
421 static int
validate_list(struct nouveau_channel * chan,struct nouveau_cli * cli,struct list_head * list,struct drm_nouveau_gem_pushbuf_bo * pbbo,uint64_t user_pbbo_ptr)422 validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
423 	      struct list_head *list, struct drm_nouveau_gem_pushbuf_bo *pbbo,
424 	      uint64_t user_pbbo_ptr)
425 {
426 	struct nouveau_drm *drm = chan->drm;
427 	struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
428 				(void __force __user *)(uintptr_t)user_pbbo_ptr;
429 	struct nouveau_bo *nvbo;
430 	int ret, relocs = 0;
431 
432 	list_for_each_entry(nvbo, list, entry) {
433 		struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
434 
435 		ret = validate_sync(chan, nvbo);
436 		if (unlikely(ret)) {
437 			NV_ERROR(cli, "fail pre-validate sync\n");
438 			return ret;
439 		}
440 
441 		ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains,
442 					     b->write_domains,
443 					     b->valid_domains);
444 		if (unlikely(ret)) {
445 			NV_ERROR(cli, "fail set_domain\n");
446 			return ret;
447 		}
448 
449 		ret = nouveau_bo_validate(nvbo, true, false);
450 		if (unlikely(ret)) {
451 			if (ret != -ERESTARTSYS)
452 				NV_ERROR(cli, "fail ttm_validate\n");
453 			return ret;
454 		}
455 
456 		ret = validate_sync(chan, nvbo);
457 		if (unlikely(ret)) {
458 			NV_ERROR(cli, "fail post-validate sync\n");
459 			return ret;
460 		}
461 
462 		if (nv_device(drm->device)->card_type < NV_50) {
463 			if (nvbo->bo.offset == b->presumed.offset &&
464 			    ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
465 			      b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
466 			     (nvbo->bo.mem.mem_type == TTM_PL_TT &&
467 			      b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
468 				continue;
469 
470 			if (nvbo->bo.mem.mem_type == TTM_PL_TT)
471 				b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
472 			else
473 				b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
474 			b->presumed.offset = nvbo->bo.offset;
475 			b->presumed.valid = 0;
476 			relocs++;
477 
478 			if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index].presumed,
479 					     &b->presumed, sizeof(b->presumed)))
480 				return -EFAULT;
481 		}
482 	}
483 
484 	return relocs;
485 }
486 
487 static int
nouveau_gem_pushbuf_validate(struct nouveau_channel * chan,struct drm_file * file_priv,struct drm_nouveau_gem_pushbuf_bo * pbbo,uint64_t user_buffers,int nr_buffers,struct validate_op * op,int * apply_relocs)488 nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
489 			     struct drm_file *file_priv,
490 			     struct drm_nouveau_gem_pushbuf_bo *pbbo,
491 			     uint64_t user_buffers, int nr_buffers,
492 			     struct validate_op *op, int *apply_relocs)
493 {
494 	struct nouveau_cli *cli = nouveau_cli(file_priv);
495 	int ret, relocs = 0;
496 
497 	INIT_LIST_HEAD(&op->vram_list);
498 	INIT_LIST_HEAD(&op->gart_list);
499 	INIT_LIST_HEAD(&op->both_list);
500 
501 	if (nr_buffers == 0)
502 		return 0;
503 
504 	ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
505 	if (unlikely(ret)) {
506 		if (ret != -ERESTARTSYS)
507 			NV_ERROR(cli, "validate_init\n");
508 		return ret;
509 	}
510 
511 	ret = validate_list(chan, cli, &op->vram_list, pbbo, user_buffers);
512 	if (unlikely(ret < 0)) {
513 		if (ret != -ERESTARTSYS)
514 			NV_ERROR(cli, "validate vram_list\n");
515 		validate_fini(op, NULL);
516 		return ret;
517 	}
518 	relocs += ret;
519 
520 	ret = validate_list(chan, cli, &op->gart_list, pbbo, user_buffers);
521 	if (unlikely(ret < 0)) {
522 		if (ret != -ERESTARTSYS)
523 			NV_ERROR(cli, "validate gart_list\n");
524 		validate_fini(op, NULL);
525 		return ret;
526 	}
527 	relocs += ret;
528 
529 	ret = validate_list(chan, cli, &op->both_list, pbbo, user_buffers);
530 	if (unlikely(ret < 0)) {
531 		if (ret != -ERESTARTSYS)
532 			NV_ERROR(cli, "validate both_list\n");
533 		validate_fini(op, NULL);
534 		return ret;
535 	}
536 	relocs += ret;
537 
538 	*apply_relocs = relocs;
539 	return 0;
540 }
541 
542 static inline void *
u_memcpya(uint64_t user,unsigned nmemb,unsigned size)543 u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
544 {
545 	void *mem;
546 	void __user *userptr = (void __force __user *)(uintptr_t)user;
547 
548 	mem = kmalloc(nmemb * size, GFP_KERNEL);
549 	if (!mem)
550 		return ERR_PTR(-ENOMEM);
551 
552 	if (DRM_COPY_FROM_USER(mem, userptr, nmemb * size)) {
553 		kfree(mem);
554 		return ERR_PTR(-EFAULT);
555 	}
556 
557 	return mem;
558 }
559 
560 static int
nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli * cli,struct drm_nouveau_gem_pushbuf * req,struct drm_nouveau_gem_pushbuf_bo * bo)561 nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
562 				struct drm_nouveau_gem_pushbuf *req,
563 				struct drm_nouveau_gem_pushbuf_bo *bo)
564 {
565 	struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
566 	int ret = 0;
567 	unsigned i;
568 
569 	reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
570 	if (IS_ERR(reloc))
571 		return PTR_ERR(reloc);
572 
573 	for (i = 0; i < req->nr_relocs; i++) {
574 		struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
575 		struct drm_nouveau_gem_pushbuf_bo *b;
576 		struct nouveau_bo *nvbo;
577 		uint32_t data;
578 
579 		if (unlikely(r->bo_index > req->nr_buffers)) {
580 			NV_ERROR(cli, "reloc bo index invalid\n");
581 			ret = -EINVAL;
582 			break;
583 		}
584 
585 		b = &bo[r->bo_index];
586 		if (b->presumed.valid)
587 			continue;
588 
589 		if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
590 			NV_ERROR(cli, "reloc container bo index invalid\n");
591 			ret = -EINVAL;
592 			break;
593 		}
594 		nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;
595 
596 		if (unlikely(r->reloc_bo_offset + 4 >
597 			     nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
598 			NV_ERROR(cli, "reloc outside of bo\n");
599 			ret = -EINVAL;
600 			break;
601 		}
602 
603 		if (!nvbo->kmap.virtual) {
604 			ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
605 					  &nvbo->kmap);
606 			if (ret) {
607 				NV_ERROR(cli, "failed kmap for reloc\n");
608 				break;
609 			}
610 			nvbo->validate_mapped = true;
611 		}
612 
613 		if (r->flags & NOUVEAU_GEM_RELOC_LOW)
614 			data = b->presumed.offset + r->data;
615 		else
616 		if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
617 			data = (b->presumed.offset + r->data) >> 32;
618 		else
619 			data = r->data;
620 
621 		if (r->flags & NOUVEAU_GEM_RELOC_OR) {
622 			if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART)
623 				data |= r->tor;
624 			else
625 				data |= r->vor;
626 		}
627 
628 		spin_lock(&nvbo->bo.bdev->fence_lock);
629 		ret = ttm_bo_wait(&nvbo->bo, false, false, false);
630 		spin_unlock(&nvbo->bo.bdev->fence_lock);
631 		if (ret) {
632 			NV_ERROR(cli, "reloc wait_idle failed: %d\n", ret);
633 			break;
634 		}
635 
636 		nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
637 	}
638 
639 	kfree(reloc);
640 	return ret;
641 }
642 
643 int
nouveau_gem_ioctl_pushbuf(struct drm_device * dev,void * data,struct drm_file * file_priv)644 nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
645 			  struct drm_file *file_priv)
646 {
647 	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
648 	struct nouveau_cli *cli = nouveau_cli(file_priv);
649 	struct nouveau_abi16_chan *temp;
650 	struct nouveau_drm *drm = nouveau_drm(dev);
651 	struct drm_nouveau_gem_pushbuf *req = data;
652 	struct drm_nouveau_gem_pushbuf_push *push;
653 	struct drm_nouveau_gem_pushbuf_bo *bo;
654 	struct nouveau_channel *chan = NULL;
655 	struct validate_op op;
656 	struct nouveau_fence *fence = NULL;
657 	int i, j, ret = 0, do_reloc = 0;
658 
659 	if (unlikely(!abi16))
660 		return -ENOMEM;
661 
662 	list_for_each_entry(temp, &abi16->channels, head) {
663 		if (temp->chan->handle == (NVDRM_CHAN | req->channel)) {
664 			chan = temp->chan;
665 			break;
666 		}
667 	}
668 
669 	if (!chan)
670 		return nouveau_abi16_put(abi16, -ENOENT);
671 
672 	req->vram_available = drm->gem.vram_available;
673 	req->gart_available = drm->gem.gart_available;
674 	if (unlikely(req->nr_push == 0))
675 		goto out_next;
676 
677 	if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
678 		NV_ERROR(cli, "pushbuf push count exceeds limit: %d max %d\n",
679 			 req->nr_push, NOUVEAU_GEM_MAX_PUSH);
680 		return nouveau_abi16_put(abi16, -EINVAL);
681 	}
682 
683 	if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
684 		NV_ERROR(cli, "pushbuf bo count exceeds limit: %d max %d\n",
685 			 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
686 		return nouveau_abi16_put(abi16, -EINVAL);
687 	}
688 
689 	if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
690 		NV_ERROR(cli, "pushbuf reloc count exceeds limit: %d max %d\n",
691 			 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
692 		return nouveau_abi16_put(abi16, -EINVAL);
693 	}
694 
695 	push = u_memcpya(req->push, req->nr_push, sizeof(*push));
696 	if (IS_ERR(push))
697 		return nouveau_abi16_put(abi16, PTR_ERR(push));
698 
699 	bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
700 	if (IS_ERR(bo)) {
701 		kfree(push);
702 		return nouveau_abi16_put(abi16, PTR_ERR(bo));
703 	}
704 
705 	/* Ensure all push buffers are on validate list */
706 	for (i = 0; i < req->nr_push; i++) {
707 		if (push[i].bo_index >= req->nr_buffers) {
708 			NV_ERROR(cli, "push %d buffer not in list\n", i);
709 			ret = -EINVAL;
710 			goto out_prevalid;
711 		}
712 	}
713 
714 	/* Validate buffer list */
715 	ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
716 					   req->nr_buffers, &op, &do_reloc);
717 	if (ret) {
718 		if (ret != -ERESTARTSYS)
719 			NV_ERROR(cli, "validate: %d\n", ret);
720 		goto out_prevalid;
721 	}
722 
723 	/* Apply any relocations that are required */
724 	if (do_reloc) {
725 		ret = nouveau_gem_pushbuf_reloc_apply(cli, req, bo);
726 		if (ret) {
727 			NV_ERROR(cli, "reloc apply: %d\n", ret);
728 			goto out;
729 		}
730 	}
731 
732 	if (chan->dma.ib_max) {
733 		ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
734 		if (ret) {
735 			NV_ERROR(cli, "nv50cal_space: %d\n", ret);
736 			goto out;
737 		}
738 
739 		for (i = 0; i < req->nr_push; i++) {
740 			struct nouveau_bo *nvbo = (void *)(unsigned long)
741 				bo[push[i].bo_index].user_priv;
742 
743 			nv50_dma_push(chan, nvbo, push[i].offset,
744 				      push[i].length);
745 		}
746 	} else
747 	if (nv_device(drm->device)->chipset >= 0x25) {
748 		ret = RING_SPACE(chan, req->nr_push * 2);
749 		if (ret) {
750 			NV_ERROR(cli, "cal_space: %d\n", ret);
751 			goto out;
752 		}
753 
754 		for (i = 0; i < req->nr_push; i++) {
755 			struct nouveau_bo *nvbo = (void *)(unsigned long)
756 				bo[push[i].bo_index].user_priv;
757 
758 			OUT_RING(chan, (nvbo->bo.offset + push[i].offset) | 2);
759 			OUT_RING(chan, 0);
760 		}
761 	} else {
762 		ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
763 		if (ret) {
764 			NV_ERROR(cli, "jmp_space: %d\n", ret);
765 			goto out;
766 		}
767 
768 		for (i = 0; i < req->nr_push; i++) {
769 			struct nouveau_bo *nvbo = (void *)(unsigned long)
770 				bo[push[i].bo_index].user_priv;
771 			uint32_t cmd;
772 
773 			cmd = chan->push.vma.offset + ((chan->dma.cur + 2) << 2);
774 			cmd |= 0x20000000;
775 			if (unlikely(cmd != req->suffix0)) {
776 				if (!nvbo->kmap.virtual) {
777 					ret = ttm_bo_kmap(&nvbo->bo, 0,
778 							  nvbo->bo.mem.
779 							  num_pages,
780 							  &nvbo->kmap);
781 					if (ret) {
782 						WIND_RING(chan);
783 						goto out;
784 					}
785 					nvbo->validate_mapped = true;
786 				}
787 
788 				nouveau_bo_wr32(nvbo, (push[i].offset +
789 						push[i].length - 8) / 4, cmd);
790 			}
791 
792 			OUT_RING(chan, 0x20000000 |
793 				      (nvbo->bo.offset + push[i].offset));
794 			OUT_RING(chan, 0);
795 			for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
796 				OUT_RING(chan, 0);
797 		}
798 	}
799 
800 	ret = nouveau_fence_new(chan, false, &fence);
801 	if (ret) {
802 		NV_ERROR(cli, "error fencing pushbuf: %d\n", ret);
803 		WIND_RING(chan);
804 		goto out;
805 	}
806 
807 out:
808 	validate_fini(&op, fence);
809 	nouveau_fence_unref(&fence);
810 
811 out_prevalid:
812 	kfree(bo);
813 	kfree(push);
814 
815 out_next:
816 	if (chan->dma.ib_max) {
817 		req->suffix0 = 0x00000000;
818 		req->suffix1 = 0x00000000;
819 	} else
820 	if (nv_device(drm->device)->chipset >= 0x25) {
821 		req->suffix0 = 0x00020000;
822 		req->suffix1 = 0x00000000;
823 	} else {
824 		req->suffix0 = 0x20000000 |
825 			      (chan->push.vma.offset + ((chan->dma.cur + 2) << 2));
826 		req->suffix1 = 0x00000000;
827 	}
828 
829 	return nouveau_abi16_put(abi16, ret);
830 }
831 
832 static inline uint32_t
domain_to_ttm(struct nouveau_bo * nvbo,uint32_t domain)833 domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain)
834 {
835 	uint32_t flags = 0;
836 
837 	if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
838 		flags |= TTM_PL_FLAG_VRAM;
839 	if (domain & NOUVEAU_GEM_DOMAIN_GART)
840 		flags |= TTM_PL_FLAG_TT;
841 
842 	return flags;
843 }
844 
845 int
nouveau_gem_ioctl_cpu_prep(struct drm_device * dev,void * data,struct drm_file * file_priv)846 nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
847 			   struct drm_file *file_priv)
848 {
849 	struct drm_nouveau_gem_cpu_prep *req = data;
850 	struct drm_gem_object *gem;
851 	struct nouveau_bo *nvbo;
852 	bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
853 	int ret = -EINVAL;
854 
855 	gem = drm_gem_object_lookup(dev, file_priv, req->handle);
856 	if (!gem)
857 		return -ENOENT;
858 	nvbo = nouveau_gem_object(gem);
859 
860 	spin_lock(&nvbo->bo.bdev->fence_lock);
861 	ret = ttm_bo_wait(&nvbo->bo, true, true, no_wait);
862 	spin_unlock(&nvbo->bo.bdev->fence_lock);
863 	drm_gem_object_unreference_unlocked(gem);
864 	return ret;
865 }
866 
867 int
nouveau_gem_ioctl_cpu_fini(struct drm_device * dev,void * data,struct drm_file * file_priv)868 nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
869 			   struct drm_file *file_priv)
870 {
871 	return 0;
872 }
873 
874 int
nouveau_gem_ioctl_info(struct drm_device * dev,void * data,struct drm_file * file_priv)875 nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
876 		       struct drm_file *file_priv)
877 {
878 	struct drm_nouveau_gem_info *req = data;
879 	struct drm_gem_object *gem;
880 	int ret;
881 
882 	gem = drm_gem_object_lookup(dev, file_priv, req->handle);
883 	if (!gem)
884 		return -ENOENT;
885 
886 	ret = nouveau_gem_info(file_priv, gem, req);
887 	drm_gem_object_unreference_unlocked(gem);
888 	return ret;
889 }
890 
891