• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 
25 #include <stdio.h>
26 #include <stdlib.h>
27 #include <stdint.h>
28 #include <stdbool.h>
29 #include <string.h>
30 #include <assert.h>
31 #include <errno.h>
32 #include <inttypes.h>
33 
34 #include <xf86drm.h>
35 #include <xf86atomic.h>
36 #include "libdrm_lists.h"
37 #include "nouveau_drm.h"
38 
39 #include "nouveau.h"
40 #include "private.h"
41 
42 struct nouveau_pushbuf_krec {
43 	struct nouveau_pushbuf_krec *next;
44 	struct drm_nouveau_gem_pushbuf_bo buffer[NOUVEAU_GEM_MAX_BUFFERS];
45 	struct drm_nouveau_gem_pushbuf_reloc reloc[NOUVEAU_GEM_MAX_RELOCS];
46 	struct drm_nouveau_gem_pushbuf_push push[NOUVEAU_GEM_MAX_PUSH];
47 	int nr_buffer;
48 	int nr_reloc;
49 	int nr_push;
50 	uint64_t vram_used;
51 	uint64_t gart_used;
52 };
53 
54 struct nouveau_pushbuf_priv {
55 	struct nouveau_pushbuf base;
56 	struct nouveau_pushbuf_krec *list;
57 	struct nouveau_pushbuf_krec *krec;
58 	struct nouveau_list bctx_list;
59 	struct nouveau_bo *bo;
60 	uint32_t type;
61 	uint32_t suffix0;
62 	uint32_t suffix1;
63 	uint32_t *ptr;
64 	uint32_t *bgn;
65 	int bo_next;
66 	int bo_nr;
67 	struct nouveau_bo *bos[];
68 };
69 
70 static inline struct nouveau_pushbuf_priv *
nouveau_pushbuf(struct nouveau_pushbuf * push)71 nouveau_pushbuf(struct nouveau_pushbuf *push)
72 {
73 	return (struct nouveau_pushbuf_priv *)push;
74 }
75 
76 static int pushbuf_validate(struct nouveau_pushbuf *, bool);
77 static int pushbuf_flush(struct nouveau_pushbuf *);
78 
79 static bool
pushbuf_kref_fits(struct nouveau_pushbuf * push,struct nouveau_bo * bo,uint32_t * domains)80 pushbuf_kref_fits(struct nouveau_pushbuf *push, struct nouveau_bo *bo,
81 		  uint32_t *domains)
82 {
83 	struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
84 	struct nouveau_pushbuf_krec *krec = nvpb->krec;
85 	struct nouveau_device *dev = push->client->device;
86 	struct nouveau_bo *kbo;
87 	struct drm_nouveau_gem_pushbuf_bo *kref;
88 	int i;
89 
90 	/* VRAM is the only valid domain.  GART and VRAM|GART buffers
91 	 * are all accounted to GART, so if this doesn't fit in VRAM
92 	 * straight up, a flush is needed.
93 	 */
94 	if (*domains == NOUVEAU_GEM_DOMAIN_VRAM) {
95 		if (krec->vram_used + bo->size > dev->vram_limit)
96 			return false;
97 		krec->vram_used += bo->size;
98 		return true;
99 	}
100 
101 	/* GART or VRAM|GART buffer.  Account both of these buffer types
102 	 * to GART only for the moment, which simplifies things.  If the
103 	 * buffer can fit already, we're done here.
104 	 */
105 	if (krec->gart_used + bo->size <= dev->gart_limit) {
106 		krec->gart_used += bo->size;
107 		return true;
108 	}
109 
110 	/* Ran out of GART space, if it's a VRAM|GART buffer and it'll
111 	 * fit into available VRAM, turn it into a VRAM buffer
112 	 */
113 	if ((*domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
114 	    krec->vram_used + bo->size <= dev->vram_limit) {
115 		*domains &= NOUVEAU_GEM_DOMAIN_VRAM;
116 		krec->vram_used += bo->size;
117 		return true;
118 	}
119 
120 	/* Still couldn't fit the buffer in anywhere, so as a last resort;
121 	 * scan the buffer list for VRAM|GART buffers and turn them into
122 	 * VRAM buffers until we have enough space in GART for this one
123 	 */
124 	kref = krec->buffer;
125 	for (i = 0; i < krec->nr_buffer; i++, kref++) {
126 		if (!(kref->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
127 			continue;
128 
129 		kbo = (void *)(unsigned long)kref->user_priv;
130 		if (!(kref->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) ||
131 		    krec->vram_used + kbo->size > dev->vram_limit)
132 			continue;
133 
134 		kref->valid_domains &= NOUVEAU_GEM_DOMAIN_VRAM;
135 		krec->gart_used -= kbo->size;
136 		krec->vram_used += kbo->size;
137 		if (krec->gart_used + bo->size <= dev->gart_limit) {
138 			krec->gart_used += bo->size;
139 			return true;
140 		}
141 	}
142 
143 	/* Couldn't resolve a placement, need to force a flush */
144 	return false;
145 }
146 
147 static struct drm_nouveau_gem_pushbuf_bo *
pushbuf_kref(struct nouveau_pushbuf * push,struct nouveau_bo * bo,uint32_t flags)148 pushbuf_kref(struct nouveau_pushbuf *push, struct nouveau_bo *bo,
149 	     uint32_t flags)
150 {
151 	struct nouveau_device *dev = push->client->device;
152 	struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
153 	struct nouveau_pushbuf_krec *krec = nvpb->krec;
154 	struct nouveau_pushbuf *fpush;
155 	struct drm_nouveau_gem_pushbuf_bo *kref;
156 	uint32_t domains, domains_wr, domains_rd;
157 
158 	domains = 0;
159 	if (flags & NOUVEAU_BO_VRAM)
160 		domains |= NOUVEAU_GEM_DOMAIN_VRAM;
161 	if (flags & NOUVEAU_BO_GART)
162 		domains |= NOUVEAU_GEM_DOMAIN_GART;
163 	domains_wr = domains * !!(flags & NOUVEAU_BO_WR);
164 	domains_rd = domains * !!(flags & NOUVEAU_BO_RD);
165 
166 	/* if buffer is referenced on another pushbuf that is owned by the
167 	 * same client, we need to flush the other pushbuf first to ensure
168 	 * the correct ordering of commands
169 	 */
170 	fpush = cli_push_get(push->client, bo);
171 	if (fpush && fpush != push)
172 		pushbuf_flush(fpush);
173 
174 	kref = cli_kref_get(push->client, bo);
175 	if (kref) {
176 		/* possible conflict in memory types - flush and retry */
177 		if (!(kref->valid_domains & domains))
178 			return NULL;
179 
180 		/* VRAM|GART buffer turning into a VRAM buffer.  Make sure
181 		 * it'll fit in VRAM and force a flush if not.
182 		 */
183 		if ((kref->valid_domains  & NOUVEAU_GEM_DOMAIN_GART) &&
184 		    (            domains == NOUVEAU_GEM_DOMAIN_VRAM)) {
185 			if (krec->vram_used + bo->size > dev->vram_limit)
186 				return NULL;
187 			krec->vram_used += bo->size;
188 			krec->gart_used -= bo->size;
189 		}
190 
191 		kref->valid_domains &= domains;
192 		kref->write_domains |= domains_wr;
193 		kref->read_domains  |= domains_rd;
194 	} else {
195 		if (krec->nr_buffer == NOUVEAU_GEM_MAX_BUFFERS ||
196 		    !pushbuf_kref_fits(push, bo, &domains))
197 			return NULL;
198 
199 		kref = &krec->buffer[krec->nr_buffer++];
200 		kref->user_priv = (unsigned long)bo;
201 		kref->handle = bo->handle;
202 		kref->valid_domains = domains;
203 		kref->write_domains = domains_wr;
204 		kref->read_domains = domains_rd;
205 		kref->presumed.valid = 1;
206 		kref->presumed.offset = bo->offset;
207 		if (bo->flags & NOUVEAU_BO_VRAM)
208 			kref->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
209 		else
210 			kref->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
211 
212 		cli_kref_set(push->client, bo, kref, push);
213 		atomic_inc(&nouveau_bo(bo)->refcnt);
214 	}
215 
216 	return kref;
217 }
218 
219 static uint32_t
pushbuf_krel(struct nouveau_pushbuf * push,struct nouveau_bo * bo,uint32_t data,uint32_t flags,uint32_t vor,uint32_t tor)220 pushbuf_krel(struct nouveau_pushbuf *push, struct nouveau_bo *bo,
221 	     uint32_t data, uint32_t flags, uint32_t vor, uint32_t tor)
222 {
223 	struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
224 	struct nouveau_pushbuf_krec *krec = nvpb->krec;
225 	struct drm_nouveau_gem_pushbuf_reloc *krel;
226 	struct drm_nouveau_gem_pushbuf_bo *pkref;
227 	struct drm_nouveau_gem_pushbuf_bo *bkref;
228 	uint32_t reloc = data;
229 
230 	pkref = cli_kref_get(push->client, nvpb->bo);
231 	bkref = cli_kref_get(push->client, bo);
232 	krel  = &krec->reloc[krec->nr_reloc++];
233 
234 	assert(pkref);
235 	assert(bkref);
236 	krel->reloc_bo_index = pkref - krec->buffer;
237 	krel->reloc_bo_offset = (push->cur - nvpb->ptr) * 4;
238 	krel->bo_index = bkref - krec->buffer;
239 	krel->flags = 0;
240 	krel->data = data;
241 	krel->vor = vor;
242 	krel->tor = tor;
243 
244 	if (flags & NOUVEAU_BO_LOW) {
245 		reloc = (bkref->presumed.offset + data);
246 		krel->flags |= NOUVEAU_GEM_RELOC_LOW;
247 	} else
248 	if (flags & NOUVEAU_BO_HIGH) {
249 		reloc = (bkref->presumed.offset + data) >> 32;
250 		krel->flags |= NOUVEAU_GEM_RELOC_HIGH;
251 	}
252 	if (flags & NOUVEAU_BO_OR) {
253 		if (bkref->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM)
254 			reloc |= vor;
255 		else
256 			reloc |= tor;
257 		krel->flags |= NOUVEAU_GEM_RELOC_OR;
258 	}
259 
260 	return reloc;
261 }
262 
263 static void
pushbuf_dump(struct nouveau_pushbuf_krec * krec,int krec_id,int chid)264 pushbuf_dump(struct nouveau_pushbuf_krec *krec, int krec_id, int chid)
265 {
266 	struct drm_nouveau_gem_pushbuf_reloc *krel;
267 	struct drm_nouveau_gem_pushbuf_push *kpsh;
268 	struct drm_nouveau_gem_pushbuf_bo *kref;
269 	struct nouveau_bo *bo;
270 	uint32_t *bgn, *end;
271 	int i;
272 
273 	err("ch%d: krec %d pushes %d bufs %d relocs %d\n", chid,
274 	    krec_id, krec->nr_push, krec->nr_buffer, krec->nr_reloc);
275 
276 	kref = krec->buffer;
277 	for (i = 0; i < krec->nr_buffer; i++, kref++) {
278 		bo = (void *)(uintptr_t)kref->user_priv;
279 		err("ch%d: buf %08x %08x %08x %08x %08x %p 0x%"PRIx64" 0x%"PRIx64"\n", chid, i,
280 		    kref->handle, kref->valid_domains,
281 		    kref->read_domains, kref->write_domains, bo->map, bo->offset, bo->size);
282 	}
283 
284 	krel = krec->reloc;
285 	for (i = 0; i < krec->nr_reloc; i++, krel++) {
286 		err("ch%d: rel %08x %08x %08x %08x %08x %08x %08x\n",
287 		    chid, krel->reloc_bo_index, krel->reloc_bo_offset,
288 		    krel->bo_index, krel->flags, krel->data,
289 		    krel->vor, krel->tor);
290 	}
291 
292 	kpsh = krec->push;
293 	for (i = 0; i < krec->nr_push; i++, kpsh++) {
294 		kref = krec->buffer + kpsh->bo_index;
295 		bo = (void *)(unsigned long)kref->user_priv;
296 		bgn = (uint32_t *)((char *)bo->map + kpsh->offset);
297 		end = bgn + ((kpsh->length & 0x7fffff) /4);
298 
299 		err("ch%d: psh %s%08x %010llx %010llx\n", chid,
300 		    bo->map ? "" : "(unmapped) ", kpsh->bo_index,
301 		    (unsigned long long)kpsh->offset,
302 		    (unsigned long long)(kpsh->offset + kpsh->length));
303 		if (!bo->map)
304 			continue;
305 		while (bgn < end)
306 			err("\t0x%08x\n", *bgn++);
307 	}
308 }
309 
310 static int
pushbuf_submit(struct nouveau_pushbuf * push,struct nouveau_object * chan)311 pushbuf_submit(struct nouveau_pushbuf *push, struct nouveau_object *chan)
312 {
313 	struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
314 	struct nouveau_pushbuf_krec *krec = nvpb->list;
315 	struct nouveau_device *dev = push->client->device;
316 	struct nouveau_drm *drm = nouveau_drm(&dev->object);
317 	struct drm_nouveau_gem_pushbuf_bo_presumed *info;
318 	struct drm_nouveau_gem_pushbuf_bo *kref;
319 	struct drm_nouveau_gem_pushbuf req;
320 	struct nouveau_fifo *fifo = chan->data;
321 	struct nouveau_bo *bo;
322 	int krec_id = 0;
323 	int ret = 0, i;
324 
325 	if (chan->oclass != NOUVEAU_FIFO_CHANNEL_CLASS)
326 		return -EINVAL;
327 
328 	if (push->kick_notify)
329 		push->kick_notify(push);
330 
331 	nouveau_pushbuf_data(push, NULL, 0, 0);
332 
333 	while (krec && krec->nr_push) {
334 		req.channel = fifo->channel;
335 		req.nr_buffers = krec->nr_buffer;
336 		req.buffers = (uint64_t)(unsigned long)krec->buffer;
337 		req.nr_relocs = krec->nr_reloc;
338 		req.nr_push = krec->nr_push;
339 		req.relocs = (uint64_t)(unsigned long)krec->reloc;
340 		req.push = (uint64_t)(unsigned long)krec->push;
341 		req.suffix0 = nvpb->suffix0;
342 		req.suffix1 = nvpb->suffix1;
343 		req.vram_available = 0; /* for valgrind */
344 		if (dbg_on(1))
345 			req.vram_available |= NOUVEAU_GEM_PUSHBUF_SYNC;
346 		req.gart_available = 0;
347 
348 		if (dbg_on(0))
349 			pushbuf_dump(krec, krec_id++, fifo->channel);
350 
351 #ifndef SIMULATE
352 		ret = drmCommandWriteRead(drm->fd, DRM_NOUVEAU_GEM_PUSHBUF,
353 					  &req, sizeof(req));
354 		nvpb->suffix0 = req.suffix0;
355 		nvpb->suffix1 = req.suffix1;
356 		dev->vram_limit = (req.vram_available *
357 				nouveau_device(dev)->vram_limit_percent) / 100;
358 		dev->gart_limit = (req.gart_available *
359 				nouveau_device(dev)->gart_limit_percent) / 100;
360 #else
361 		if (dbg_on(31))
362 			ret = -EINVAL;
363 #endif
364 
365 		if (ret) {
366 			err("kernel rejected pushbuf: %s\n", strerror(-ret));
367 			pushbuf_dump(krec, krec_id++, fifo->channel);
368 			break;
369 		}
370 
371 		kref = krec->buffer;
372 		for (i = 0; i < krec->nr_buffer; i++, kref++) {
373 			bo = (void *)(unsigned long)kref->user_priv;
374 
375 			info = &kref->presumed;
376 			if (!info->valid) {
377 				bo->flags &= ~NOUVEAU_BO_APER;
378 				if (info->domain == NOUVEAU_GEM_DOMAIN_VRAM)
379 					bo->flags |= NOUVEAU_BO_VRAM;
380 				else
381 					bo->flags |= NOUVEAU_BO_GART;
382 				bo->offset = info->offset;
383 			}
384 
385 			if (kref->write_domains)
386 				nouveau_bo(bo)->access |= NOUVEAU_BO_WR;
387 			if (kref->read_domains)
388 				nouveau_bo(bo)->access |= NOUVEAU_BO_RD;
389 		}
390 
391 		krec = krec->next;
392 	}
393 
394 	return ret;
395 }
396 
397 static int
pushbuf_flush(struct nouveau_pushbuf * push)398 pushbuf_flush(struct nouveau_pushbuf *push)
399 {
400 	struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
401 	struct nouveau_pushbuf_krec *krec = nvpb->krec;
402 	struct drm_nouveau_gem_pushbuf_bo *kref;
403 	struct nouveau_bufctx *bctx, *btmp;
404 	struct nouveau_bo *bo;
405 	int ret = 0, i;
406 
407 	if (push->channel) {
408 		ret = pushbuf_submit(push, push->channel);
409 	} else {
410 		nouveau_pushbuf_data(push, NULL, 0, 0);
411 		krec->next = malloc(sizeof(*krec));
412 		nvpb->krec = krec->next;
413 	}
414 
415 	kref = krec->buffer;
416 	for (i = 0; i < krec->nr_buffer; i++, kref++) {
417 		bo = (void *)(unsigned long)kref->user_priv;
418 		cli_kref_set(push->client, bo, NULL, NULL);
419 		if (push->channel)
420 			nouveau_bo_ref(NULL, &bo);
421 	}
422 
423 	krec = nvpb->krec;
424 	krec->vram_used = 0;
425 	krec->gart_used = 0;
426 	krec->nr_buffer = 0;
427 	krec->nr_reloc = 0;
428 	krec->nr_push = 0;
429 
430 	DRMLISTFOREACHENTRYSAFE(bctx, btmp, &nvpb->bctx_list, head) {
431 		DRMLISTJOIN(&bctx->current, &bctx->pending);
432 		DRMINITLISTHEAD(&bctx->current);
433 		DRMLISTDELINIT(&bctx->head);
434 	}
435 
436 	return ret;
437 }
438 
439 static void
pushbuf_refn_fail(struct nouveau_pushbuf * push,int sref,int srel)440 pushbuf_refn_fail(struct nouveau_pushbuf *push, int sref, int srel)
441 {
442 	struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
443 	struct nouveau_pushbuf_krec *krec = nvpb->krec;
444 	struct drm_nouveau_gem_pushbuf_bo *kref;
445 
446 	kref = krec->buffer + sref;
447 	while (krec->nr_buffer-- > sref) {
448 		struct nouveau_bo *bo = (void *)(unsigned long)kref->user_priv;
449 		cli_kref_set(push->client, bo, NULL, NULL);
450 		nouveau_bo_ref(NULL, &bo);
451 		kref++;
452 	}
453 	krec->nr_buffer = sref;
454 	krec->nr_reloc = srel;
455 }
456 
457 static int
pushbuf_refn(struct nouveau_pushbuf * push,bool retry,struct nouveau_pushbuf_refn * refs,int nr)458 pushbuf_refn(struct nouveau_pushbuf *push, bool retry,
459 	     struct nouveau_pushbuf_refn *refs, int nr)
460 {
461 	struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
462 	struct nouveau_pushbuf_krec *krec = nvpb->krec;
463 	struct drm_nouveau_gem_pushbuf_bo *kref;
464 	int sref = krec->nr_buffer;
465 	int ret = 0, i;
466 
467 	for (i = 0; i < nr; i++) {
468 		kref = pushbuf_kref(push, refs[i].bo, refs[i].flags);
469 		if (!kref) {
470 			ret = -ENOSPC;
471 			break;
472 		}
473 	}
474 
475 	if (ret) {
476 		pushbuf_refn_fail(push, sref, krec->nr_reloc);
477 		if (retry) {
478 			pushbuf_flush(push);
479 			nouveau_pushbuf_space(push, 0, 0, 0);
480 			return pushbuf_refn(push, false, refs, nr);
481 		}
482 	}
483 
484 	return ret;
485 }
486 
487 static int
pushbuf_validate(struct nouveau_pushbuf * push,bool retry)488 pushbuf_validate(struct nouveau_pushbuf *push, bool retry)
489 {
490 	struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
491 	struct nouveau_pushbuf_krec *krec = nvpb->krec;
492 	struct drm_nouveau_gem_pushbuf_bo *kref;
493 	struct nouveau_bufctx *bctx = push->bufctx;
494 	struct nouveau_bufref *bref;
495 	int relocs = bctx ? bctx->relocs * 2: 0;
496 	int sref, srel, ret;
497 
498 	ret = nouveau_pushbuf_space(push, relocs, relocs, 0);
499 	if (ret || bctx == NULL)
500 		return ret;
501 
502 	sref = krec->nr_buffer;
503 	srel = krec->nr_reloc;
504 
505 	DRMLISTDEL(&bctx->head);
506 	DRMLISTADD(&bctx->head, &nvpb->bctx_list);
507 
508 	DRMLISTFOREACHENTRY(bref, &bctx->pending, thead) {
509 		kref = pushbuf_kref(push, bref->bo, bref->flags);
510 		if (!kref) {
511 			ret = -ENOSPC;
512 			break;
513 		}
514 
515 		if (bref->packet) {
516 			pushbuf_krel(push, bref->bo, bref->packet, 0, 0, 0);
517 			*push->cur++ = 0;
518 			pushbuf_krel(push, bref->bo, bref->data, bref->flags,
519 					   bref->vor, bref->tor);
520 			*push->cur++ = 0;
521 		}
522 	}
523 
524 	DRMLISTJOIN(&bctx->pending, &bctx->current);
525 	DRMINITLISTHEAD(&bctx->pending);
526 
527 	if (ret) {
528 		pushbuf_refn_fail(push, sref, srel);
529 		if (retry) {
530 			pushbuf_flush(push);
531 			return pushbuf_validate(push, false);
532 		}
533 	}
534 
535 	return ret;
536 }
537 
538 drm_public int
nouveau_pushbuf_new(struct nouveau_client * client,struct nouveau_object * chan,int nr,uint32_t size,bool immediate,struct nouveau_pushbuf ** ppush)539 nouveau_pushbuf_new(struct nouveau_client *client, struct nouveau_object *chan,
540 		    int nr, uint32_t size, bool immediate,
541 		    struct nouveau_pushbuf **ppush)
542 {
543 	struct nouveau_drm *drm = nouveau_drm(&client->device->object);
544 	struct nouveau_fifo *fifo = chan->data;
545 	struct nouveau_pushbuf_priv *nvpb;
546 	struct nouveau_pushbuf *push;
547 	struct drm_nouveau_gem_pushbuf req = {};
548 	int ret;
549 
550 	if (chan->oclass != NOUVEAU_FIFO_CHANNEL_CLASS)
551 		return -EINVAL;
552 
553 	/* nop pushbuf call, to get the current "return to main" sequence
554 	 * we need to append to the pushbuf on early chipsets
555 	 */
556 	req.channel = fifo->channel;
557 	req.nr_push = 0;
558 	ret = drmCommandWriteRead(drm->fd, DRM_NOUVEAU_GEM_PUSHBUF,
559 				  &req, sizeof(req));
560 	if (ret)
561 		return ret;
562 
563 	nvpb = calloc(1, sizeof(*nvpb) + nr * sizeof(*nvpb->bos));
564 	if (!nvpb)
565 		return -ENOMEM;
566 
567 #ifndef SIMULATE
568 	nvpb->suffix0 = req.suffix0;
569 	nvpb->suffix1 = req.suffix1;
570 #else
571 	nvpb->suffix0 = 0xffffffff;
572 	nvpb->suffix1 = 0xffffffff;
573 #endif
574 	nvpb->krec = calloc(1, sizeof(*nvpb->krec));
575 	nvpb->list = nvpb->krec;
576 	if (!nvpb->krec) {
577 		free(nvpb);
578 		return -ENOMEM;
579 	}
580 
581 	push = &nvpb->base;
582 	push->client = client;
583 	push->channel = immediate ? chan : NULL;
584 	push->flags = NOUVEAU_BO_RD;
585 	if (fifo->pushbuf & NOUVEAU_GEM_DOMAIN_GART) {
586 		push->flags |= NOUVEAU_BO_GART;
587 		nvpb->type   = NOUVEAU_BO_GART;
588 	} else
589 	if (fifo->pushbuf & NOUVEAU_GEM_DOMAIN_VRAM) {
590 		push->flags |= NOUVEAU_BO_VRAM;
591 		nvpb->type   = NOUVEAU_BO_VRAM;
592 	}
593 	nvpb->type |= NOUVEAU_BO_MAP;
594 
595 	for (nvpb->bo_nr = 0; nvpb->bo_nr < nr; nvpb->bo_nr++) {
596 		ret = nouveau_bo_new(client->device, nvpb->type, 0, size,
597 				     NULL, &nvpb->bos[nvpb->bo_nr]);
598 		if (ret) {
599 			nouveau_pushbuf_del(&push);
600 			return ret;
601 		}
602 	}
603 
604 	DRMINITLISTHEAD(&nvpb->bctx_list);
605 	*ppush = push;
606 	return 0;
607 }
608 
609 drm_public void
nouveau_pushbuf_del(struct nouveau_pushbuf ** ppush)610 nouveau_pushbuf_del(struct nouveau_pushbuf **ppush)
611 {
612 	struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(*ppush);
613 	if (nvpb) {
614 		struct drm_nouveau_gem_pushbuf_bo *kref;
615 		struct nouveau_pushbuf_krec *krec;
616 		while ((krec = nvpb->list)) {
617 			kref = krec->buffer;
618 			while (krec->nr_buffer--) {
619 				unsigned long priv = kref++->user_priv;
620 				struct nouveau_bo *bo = (void *)priv;
621 				cli_kref_set(nvpb->base.client, bo, NULL, NULL);
622 				nouveau_bo_ref(NULL, &bo);
623 			}
624 			nvpb->list = krec->next;
625 			free(krec);
626 		}
627 		while (nvpb->bo_nr--)
628 			nouveau_bo_ref(NULL, &nvpb->bos[nvpb->bo_nr]);
629 		nouveau_bo_ref(NULL, &nvpb->bo);
630 		free(nvpb);
631 	}
632 	*ppush = NULL;
633 }
634 
635 drm_public struct nouveau_bufctx *
nouveau_pushbuf_bufctx(struct nouveau_pushbuf * push,struct nouveau_bufctx * ctx)636 nouveau_pushbuf_bufctx(struct nouveau_pushbuf *push, struct nouveau_bufctx *ctx)
637 {
638 	struct nouveau_bufctx *prev = push->bufctx;
639 	push->bufctx = ctx;
640 	return prev;
641 }
642 
643 drm_public int
nouveau_pushbuf_space(struct nouveau_pushbuf * push,uint32_t dwords,uint32_t relocs,uint32_t pushes)644 nouveau_pushbuf_space(struct nouveau_pushbuf *push,
645 		      uint32_t dwords, uint32_t relocs, uint32_t pushes)
646 {
647 	struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
648 	struct nouveau_pushbuf_krec *krec = nvpb->krec;
649 	struct nouveau_client *client = push->client;
650 	struct nouveau_bo *bo = NULL;
651 	bool flushed = false;
652 	int ret = 0;
653 
654 	/* switch to next buffer if insufficient space in the current one */
655 	if (push->cur + dwords >= push->end) {
656 		if (nvpb->bo_next < nvpb->bo_nr) {
657 			nouveau_bo_ref(nvpb->bos[nvpb->bo_next++], &bo);
658 			if (nvpb->bo_next == nvpb->bo_nr && push->channel)
659 				nvpb->bo_next = 0;
660 		} else {
661 			ret = nouveau_bo_new(client->device, nvpb->type, 0,
662 					     nvpb->bos[0]->size, NULL, &bo);
663 			if (ret)
664 				return ret;
665 		}
666 	}
667 
668 	/* make sure there's always enough space to queue up the pending
669 	 * data in the pushbuf proper
670 	 */
671 	pushes++;
672 
673 	/* need to flush if we've run out of space on an immediate pushbuf,
674 	 * if the new buffer won't fit, or if the kernel push/reloc limits
675 	 * have been hit
676 	 */
677 	if ((bo && ( push->channel ||
678 		    !pushbuf_kref(push, bo, push->flags))) ||
679 	    krec->nr_reloc + relocs >= NOUVEAU_GEM_MAX_RELOCS ||
680 	    krec->nr_push + pushes >= NOUVEAU_GEM_MAX_PUSH) {
681 		if (nvpb->bo && krec->nr_buffer)
682 			pushbuf_flush(push);
683 		flushed = true;
684 	}
685 
686 	/* if necessary, switch to new buffer */
687 	if (bo) {
688 		ret = nouveau_bo_map(bo, NOUVEAU_BO_WR, push->client);
689 		if (ret)
690 			return ret;
691 
692 		nouveau_pushbuf_data(push, NULL, 0, 0);
693 		nouveau_bo_ref(bo, &nvpb->bo);
694 		nouveau_bo_ref(NULL, &bo);
695 
696 		nvpb->bgn = nvpb->bo->map;
697 		nvpb->ptr = nvpb->bgn;
698 		push->cur = nvpb->bgn;
699 		push->end = push->cur + (nvpb->bo->size / 4);
700 		push->end -= 2 + push->rsvd_kick; /* space for suffix */
701 	}
702 
703 	pushbuf_kref(push, nvpb->bo, push->flags);
704 	return flushed ? pushbuf_validate(push, false) : 0;
705 }
706 
707 drm_public void
nouveau_pushbuf_data(struct nouveau_pushbuf * push,struct nouveau_bo * bo,uint64_t offset,uint64_t length)708 nouveau_pushbuf_data(struct nouveau_pushbuf *push, struct nouveau_bo *bo,
709 		     uint64_t offset, uint64_t length)
710 {
711 	struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
712 	struct nouveau_pushbuf_krec *krec = nvpb->krec;
713 	struct drm_nouveau_gem_pushbuf_push *kpsh;
714 	struct drm_nouveau_gem_pushbuf_bo *kref;
715 
716 	if (bo != nvpb->bo && nvpb->bgn != push->cur) {
717 		if (nvpb->suffix0 || nvpb->suffix1) {
718 			*push->cur++ = nvpb->suffix0;
719 			*push->cur++ = nvpb->suffix1;
720 		}
721 
722 		nouveau_pushbuf_data(push, nvpb->bo,
723 				     (nvpb->bgn - nvpb->ptr) * 4,
724 				     (push->cur - nvpb->bgn) * 4);
725 		nvpb->bgn = push->cur;
726 	}
727 
728 	if (bo) {
729 		kref = cli_kref_get(push->client, bo);
730 		assert(kref);
731 		kpsh = &krec->push[krec->nr_push++];
732 		kpsh->bo_index = kref - krec->buffer;
733 		kpsh->offset   = offset;
734 		kpsh->length   = length;
735 	}
736 }
737 
738 drm_public int
nouveau_pushbuf_refn(struct nouveau_pushbuf * push,struct nouveau_pushbuf_refn * refs,int nr)739 nouveau_pushbuf_refn(struct nouveau_pushbuf *push,
740 		     struct nouveau_pushbuf_refn *refs, int nr)
741 {
742 	return pushbuf_refn(push, true, refs, nr);
743 }
744 
745 drm_public void
nouveau_pushbuf_reloc(struct nouveau_pushbuf * push,struct nouveau_bo * bo,uint32_t data,uint32_t flags,uint32_t vor,uint32_t tor)746 nouveau_pushbuf_reloc(struct nouveau_pushbuf *push, struct nouveau_bo *bo,
747 		      uint32_t data, uint32_t flags, uint32_t vor, uint32_t tor)
748 {
749 	*push->cur = pushbuf_krel(push, bo, data, flags, vor, tor);
750 	push->cur++;
751 }
752 
753 drm_public int
nouveau_pushbuf_validate(struct nouveau_pushbuf * push)754 nouveau_pushbuf_validate(struct nouveau_pushbuf *push)
755 {
756 	return pushbuf_validate(push, true);
757 }
758 
759 drm_public uint32_t
nouveau_pushbuf_refd(struct nouveau_pushbuf * push,struct nouveau_bo * bo)760 nouveau_pushbuf_refd(struct nouveau_pushbuf *push, struct nouveau_bo *bo)
761 {
762 	struct drm_nouveau_gem_pushbuf_bo *kref;
763 	uint32_t flags = 0;
764 
765 	if (cli_push_get(push->client, bo) == push) {
766 		kref = cli_kref_get(push->client, bo);
767 		assert(kref);
768 		if (kref->read_domains)
769 			flags |= NOUVEAU_BO_RD;
770 		if (kref->write_domains)
771 			flags |= NOUVEAU_BO_WR;
772 	}
773 
774 	return flags;
775 }
776 
777 drm_public int
nouveau_pushbuf_kick(struct nouveau_pushbuf * push,struct nouveau_object * chan)778 nouveau_pushbuf_kick(struct nouveau_pushbuf *push, struct nouveau_object *chan)
779 {
780 	if (!push->channel)
781 		return pushbuf_submit(push, chan);
782 	pushbuf_flush(push);
783 	return pushbuf_validate(push, false);
784 }
785 
786 drm_public bool
nouveau_check_dead_channel(struct nouveau_drm * drm,struct nouveau_object * chan)787 nouveau_check_dead_channel(struct nouveau_drm *drm, struct nouveau_object *chan)
788 {
789 	struct drm_nouveau_gem_pushbuf req = {};
790 	struct nouveau_fifo *fifo = chan->data;
791 	int ret;
792 
793 	req.channel = fifo->channel;
794 	req.nr_push = 0;
795 
796 	ret = drmCommandWriteRead(drm->fd, DRM_NOUVEAU_GEM_PUSHBUF,
797 				  &req, sizeof(req));
798 	/* nouveau returns ENODEV once the channel was killed */
799 	return ret == -ENODEV;
800 }
801