• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 
25 #ifdef HAVE_CONFIG_H
26 #include <config.h>
27 #endif
28 
29 #include <stdio.h>
30 #include <stdlib.h>
31 #include <stdint.h>
32 #include <stdbool.h>
33 #include <string.h>
34 #include <assert.h>
35 #include <errno.h>
36 
37 #include <xf86drm.h>
38 #include <xf86atomic.h>
39 #include "libdrm_lists.h"
40 #include "nouveau_drm.h"
41 
42 #include "nouveau.h"
43 #include "private.h"
44 
45 struct nouveau_pushbuf_krec {
46 	struct nouveau_pushbuf_krec *next;
47 	struct drm_nouveau_gem_pushbuf_bo buffer[NOUVEAU_GEM_MAX_BUFFERS];
48 	struct drm_nouveau_gem_pushbuf_reloc reloc[NOUVEAU_GEM_MAX_RELOCS];
49 	struct drm_nouveau_gem_pushbuf_push push[NOUVEAU_GEM_MAX_PUSH];
50 	int nr_buffer;
51 	int nr_reloc;
52 	int nr_push;
53 	uint64_t vram_used;
54 	uint64_t gart_used;
55 };
56 
57 struct nouveau_pushbuf_priv {
58 	struct nouveau_pushbuf base;
59 	struct nouveau_pushbuf_krec *list;
60 	struct nouveau_pushbuf_krec *krec;
61 	struct nouveau_list bctx_list;
62 	struct nouveau_bo *bo;
63 	uint32_t type;
64 	uint32_t suffix0;
65 	uint32_t suffix1;
66 	uint32_t *ptr;
67 	uint32_t *bgn;
68 	int bo_next;
69 	int bo_nr;
70 	struct nouveau_bo *bos[];
71 };
72 
73 static inline struct nouveau_pushbuf_priv *
nouveau_pushbuf(struct nouveau_pushbuf * push)74 nouveau_pushbuf(struct nouveau_pushbuf *push)
75 {
76 	return (struct nouveau_pushbuf_priv *)push;
77 }
78 
79 static int pushbuf_validate(struct nouveau_pushbuf *, bool);
80 static int pushbuf_flush(struct nouveau_pushbuf *);
81 
82 static bool
pushbuf_kref_fits(struct nouveau_pushbuf * push,struct nouveau_bo * bo,uint32_t * domains)83 pushbuf_kref_fits(struct nouveau_pushbuf *push, struct nouveau_bo *bo,
84 		  uint32_t *domains)
85 {
86 	struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
87 	struct nouveau_pushbuf_krec *krec = nvpb->krec;
88 	struct nouveau_device *dev = push->client->device;
89 	struct nouveau_bo *kbo;
90 	struct drm_nouveau_gem_pushbuf_bo *kref;
91 	int i;
92 
93 	/* VRAM is the only valid domain.  GART and VRAM|GART buffers
94 	 * are all accounted to GART, so if this doesn't fit in VRAM
95 	 * straight up, a flush is needed.
96 	 */
97 	if (*domains == NOUVEAU_GEM_DOMAIN_VRAM) {
98 		if (krec->vram_used + bo->size > dev->vram_limit)
99 			return false;
100 		krec->vram_used += bo->size;
101 		return true;
102 	}
103 
104 	/* GART or VRAM|GART buffer.  Account both of these buffer types
105 	 * to GART only for the moment, which simplifies things.  If the
106 	 * buffer can fit already, we're done here.
107 	 */
108 	if (krec->gart_used + bo->size <= dev->gart_limit) {
109 		krec->gart_used += bo->size;
110 		return true;
111 	}
112 
113 	/* Ran out of GART space, if it's a VRAM|GART buffer and it'll
114 	 * fit into available VRAM, turn it into a VRAM buffer
115 	 */
116 	if ((*domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
117 	    krec->vram_used + bo->size <= dev->vram_limit) {
118 		*domains &= NOUVEAU_GEM_DOMAIN_VRAM;
119 		krec->vram_used += bo->size;
120 		return true;
121 	}
122 
123 	/* Still couldn't fit the buffer in anywhere, so as a last resort;
124 	 * scan the buffer list for VRAM|GART buffers and turn them into
125 	 * VRAM buffers until we have enough space in GART for this one
126 	 */
127 	kref = krec->buffer;
128 	for (i = 0; i < krec->nr_buffer; i++, kref++) {
129 		if (!(kref->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
130 			continue;
131 
132 		kbo = (void *)(unsigned long)kref->user_priv;
133 		if (!(kref->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) ||
134 		    krec->vram_used + kbo->size > dev->vram_limit)
135 			continue;
136 
137 		kref->valid_domains &= NOUVEAU_GEM_DOMAIN_VRAM;
138 		krec->gart_used -= kbo->size;
139 		krec->vram_used += kbo->size;
140 		if (krec->gart_used + bo->size <= dev->gart_limit) {
141 			krec->gart_used += bo->size;
142 			return true;
143 		}
144 	}
145 
146 	/* Couldn't resolve a placement, need to force a flush */
147 	return false;
148 }
149 
150 static struct drm_nouveau_gem_pushbuf_bo *
pushbuf_kref(struct nouveau_pushbuf * push,struct nouveau_bo * bo,uint32_t flags)151 pushbuf_kref(struct nouveau_pushbuf *push, struct nouveau_bo *bo,
152 	     uint32_t flags)
153 {
154 	struct nouveau_device *dev = push->client->device;
155 	struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
156 	struct nouveau_pushbuf_krec *krec = nvpb->krec;
157 	struct nouveau_pushbuf *fpush;
158 	struct drm_nouveau_gem_pushbuf_bo *kref;
159 	uint32_t domains, domains_wr, domains_rd;
160 
161 	domains = 0;
162 	if (flags & NOUVEAU_BO_VRAM)
163 		domains |= NOUVEAU_GEM_DOMAIN_VRAM;
164 	if (flags & NOUVEAU_BO_GART)
165 		domains |= NOUVEAU_GEM_DOMAIN_GART;
166 	domains_wr = domains * !!(flags & NOUVEAU_BO_WR);
167 	domains_rd = domains * !!(flags & NOUVEAU_BO_RD);
168 
169 	/* if buffer is referenced on another pushbuf that is owned by the
170 	 * same client, we need to flush the other pushbuf first to ensure
171 	 * the correct ordering of commands
172 	 */
173 	fpush = cli_push_get(push->client, bo);
174 	if (fpush && fpush != push)
175 		pushbuf_flush(fpush);
176 
177 	kref = cli_kref_get(push->client, bo);
178 	if (kref) {
179 		/* possible conflict in memory types - flush and retry */
180 		if (!(kref->valid_domains & domains))
181 			return NULL;
182 
183 		/* VRAM|GART buffer turning into a VRAM buffer.  Make sure
184 		 * it'll fit in VRAM and force a flush if not.
185 		 */
186 		if ((kref->valid_domains  & NOUVEAU_GEM_DOMAIN_GART) &&
187 		    (            domains == NOUVEAU_GEM_DOMAIN_VRAM)) {
188 			if (krec->vram_used + bo->size > dev->vram_limit)
189 				return NULL;
190 			krec->vram_used += bo->size;
191 			krec->gart_used -= bo->size;
192 		}
193 
194 		kref->valid_domains &= domains;
195 		kref->write_domains |= domains_wr;
196 		kref->read_domains  |= domains_rd;
197 	} else {
198 		if (krec->nr_buffer == NOUVEAU_GEM_MAX_BUFFERS ||
199 		    !pushbuf_kref_fits(push, bo, &domains))
200 			return NULL;
201 
202 		kref = &krec->buffer[krec->nr_buffer++];
203 		kref->user_priv = (unsigned long)bo;
204 		kref->handle = bo->handle;
205 		kref->valid_domains = domains;
206 		kref->write_domains = domains_wr;
207 		kref->read_domains = domains_rd;
208 		kref->presumed.valid = 1;
209 		kref->presumed.offset = bo->offset;
210 		if (bo->flags & NOUVEAU_BO_VRAM)
211 			kref->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
212 		else
213 			kref->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
214 
215 		cli_kref_set(push->client, bo, kref, push);
216 		atomic_inc(&nouveau_bo(bo)->refcnt);
217 	}
218 
219 	return kref;
220 }
221 
222 static uint32_t
pushbuf_krel(struct nouveau_pushbuf * push,struct nouveau_bo * bo,uint32_t data,uint32_t flags,uint32_t vor,uint32_t tor)223 pushbuf_krel(struct nouveau_pushbuf *push, struct nouveau_bo *bo,
224 	     uint32_t data, uint32_t flags, uint32_t vor, uint32_t tor)
225 {
226 	struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
227 	struct nouveau_pushbuf_krec *krec = nvpb->krec;
228 	struct drm_nouveau_gem_pushbuf_reloc *krel;
229 	struct drm_nouveau_gem_pushbuf_bo *pkref;
230 	struct drm_nouveau_gem_pushbuf_bo *bkref;
231 	uint32_t reloc = data;
232 
233 	pkref = cli_kref_get(push->client, nvpb->bo);
234 	bkref = cli_kref_get(push->client, bo);
235 	krel  = &krec->reloc[krec->nr_reloc++];
236 
237 	assert(pkref);
238 	assert(bkref);
239 	krel->reloc_bo_index = pkref - krec->buffer;
240 	krel->reloc_bo_offset = (push->cur - nvpb->ptr) * 4;
241 	krel->bo_index = bkref - krec->buffer;
242 	krel->flags = 0;
243 	krel->data = data;
244 	krel->vor = vor;
245 	krel->tor = tor;
246 
247 	if (flags & NOUVEAU_BO_LOW) {
248 		reloc = (bkref->presumed.offset + data);
249 		krel->flags |= NOUVEAU_GEM_RELOC_LOW;
250 	} else
251 	if (flags & NOUVEAU_BO_HIGH) {
252 		reloc = (bkref->presumed.offset + data) >> 32;
253 		krel->flags |= NOUVEAU_GEM_RELOC_HIGH;
254 	}
255 	if (flags & NOUVEAU_BO_OR) {
256 		if (bkref->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM)
257 			reloc |= vor;
258 		else
259 			reloc |= tor;
260 		krel->flags |= NOUVEAU_GEM_RELOC_OR;
261 	}
262 
263 	return reloc;
264 }
265 
266 static void
pushbuf_dump(struct nouveau_pushbuf_krec * krec,int krec_id,int chid)267 pushbuf_dump(struct nouveau_pushbuf_krec *krec, int krec_id, int chid)
268 {
269 	struct drm_nouveau_gem_pushbuf_reloc *krel;
270 	struct drm_nouveau_gem_pushbuf_push *kpsh;
271 	struct drm_nouveau_gem_pushbuf_bo *kref;
272 	struct nouveau_bo *bo;
273 	uint32_t *bgn, *end;
274 	int i;
275 
276 	err("ch%d: krec %d pushes %d bufs %d relocs %d\n", chid,
277 	    krec_id, krec->nr_push, krec->nr_buffer, krec->nr_reloc);
278 
279 	kref = krec->buffer;
280 	for (i = 0; i < krec->nr_buffer; i++, kref++) {
281 		err("ch%d: buf %08x %08x %08x %08x %08x\n", chid, i,
282 		    kref->handle, kref->valid_domains,
283 		    kref->read_domains, kref->write_domains);
284 	}
285 
286 	krel = krec->reloc;
287 	for (i = 0; i < krec->nr_reloc; i++, krel++) {
288 		err("ch%d: rel %08x %08x %08x %08x %08x %08x %08x\n",
289 		    chid, krel->reloc_bo_index, krel->reloc_bo_offset,
290 		    krel->bo_index, krel->flags, krel->data,
291 		    krel->vor, krel->tor);
292 	}
293 
294 	kpsh = krec->push;
295 	for (i = 0; i < krec->nr_push; i++, kpsh++) {
296 		kref = krec->buffer + kpsh->bo_index;
297 		bo = (void *)(unsigned long)kref->user_priv;
298 		bgn = (uint32_t *)((char *)bo->map + kpsh->offset);
299 		end = bgn + (kpsh->length /4);
300 
301 		err("ch%d: psh %08x %010llx %010llx\n", chid, kpsh->bo_index,
302 		    (unsigned long long)kpsh->offset,
303 		    (unsigned long long)(kpsh->offset + kpsh->length));
304 		while (bgn < end)
305 			err("\t0x%08x\n", *bgn++);
306 	}
307 }
308 
309 static int
pushbuf_submit(struct nouveau_pushbuf * push,struct nouveau_object * chan)310 pushbuf_submit(struct nouveau_pushbuf *push, struct nouveau_object *chan)
311 {
312 	struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
313 	struct nouveau_pushbuf_krec *krec = nvpb->list;
314 	struct nouveau_device *dev = push->client->device;
315 	struct nouveau_drm *drm = nouveau_drm(&dev->object);
316 	struct drm_nouveau_gem_pushbuf_bo_presumed *info;
317 	struct drm_nouveau_gem_pushbuf_bo *kref;
318 	struct drm_nouveau_gem_pushbuf req;
319 	struct nouveau_fifo *fifo = chan->data;
320 	struct nouveau_bo *bo;
321 	int krec_id = 0;
322 	int ret = 0, i;
323 
324 	if (chan->oclass != NOUVEAU_FIFO_CHANNEL_CLASS)
325 		return -EINVAL;
326 
327 	if (push->kick_notify)
328 		push->kick_notify(push);
329 
330 	nouveau_pushbuf_data(push, NULL, 0, 0);
331 
332 	while (krec && krec->nr_push) {
333 		req.channel = fifo->channel;
334 		req.nr_buffers = krec->nr_buffer;
335 		req.buffers = (uint64_t)(unsigned long)krec->buffer;
336 		req.nr_relocs = krec->nr_reloc;
337 		req.nr_push = krec->nr_push;
338 		req.relocs = (uint64_t)(unsigned long)krec->reloc;
339 		req.push = (uint64_t)(unsigned long)krec->push;
340 		req.suffix0 = nvpb->suffix0;
341 		req.suffix1 = nvpb->suffix1;
342 		req.vram_available = 0; /* for valgrind */
343 		req.gart_available = 0;
344 
345 		if (dbg_on(0))
346 			pushbuf_dump(krec, krec_id++, fifo->channel);
347 
348 #ifndef SIMULATE
349 		ret = drmCommandWriteRead(drm->fd, DRM_NOUVEAU_GEM_PUSHBUF,
350 					  &req, sizeof(req));
351 		nvpb->suffix0 = req.suffix0;
352 		nvpb->suffix1 = req.suffix1;
353 		dev->vram_limit = (req.vram_available *
354 				nouveau_device(dev)->vram_limit_percent) / 100;
355 		dev->gart_limit = (req.gart_available *
356 				nouveau_device(dev)->gart_limit_percent) / 100;
357 #else
358 		if (dbg_on(31))
359 			ret = -EINVAL;
360 #endif
361 
362 		if (ret) {
363 			err("kernel rejected pushbuf: %s\n", strerror(-ret));
364 			pushbuf_dump(krec, krec_id++, fifo->channel);
365 			break;
366 		}
367 
368 		kref = krec->buffer;
369 		for (i = 0; i < krec->nr_buffer; i++, kref++) {
370 			bo = (void *)(unsigned long)kref->user_priv;
371 
372 			info = &kref->presumed;
373 			if (!info->valid) {
374 				bo->flags &= ~NOUVEAU_BO_APER;
375 				if (info->domain == NOUVEAU_GEM_DOMAIN_VRAM)
376 					bo->flags |= NOUVEAU_BO_VRAM;
377 				else
378 					bo->flags |= NOUVEAU_BO_GART;
379 				bo->offset = info->offset;
380 			}
381 
382 			if (kref->write_domains)
383 				nouveau_bo(bo)->access |= NOUVEAU_BO_WR;
384 			if (kref->read_domains)
385 				nouveau_bo(bo)->access |= NOUVEAU_BO_RD;
386 		}
387 
388 		krec = krec->next;
389 	}
390 
391 	return ret;
392 }
393 
394 static int
pushbuf_flush(struct nouveau_pushbuf * push)395 pushbuf_flush(struct nouveau_pushbuf *push)
396 {
397 	struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
398 	struct nouveau_pushbuf_krec *krec = nvpb->krec;
399 	struct drm_nouveau_gem_pushbuf_bo *kref;
400 	struct nouveau_bufctx *bctx, *btmp;
401 	struct nouveau_bo *bo;
402 	int ret = 0, i;
403 
404 	if (push->channel) {
405 		ret = pushbuf_submit(push, push->channel);
406 	} else {
407 		nouveau_pushbuf_data(push, NULL, 0, 0);
408 		krec->next = malloc(sizeof(*krec));
409 		nvpb->krec = krec->next;
410 	}
411 
412 	kref = krec->buffer;
413 	for (i = 0; i < krec->nr_buffer; i++, kref++) {
414 		bo = (void *)(unsigned long)kref->user_priv;
415 		cli_kref_set(push->client, bo, NULL, NULL);
416 		if (push->channel)
417 			nouveau_bo_ref(NULL, &bo);
418 	}
419 
420 	krec = nvpb->krec;
421 	krec->vram_used = 0;
422 	krec->gart_used = 0;
423 	krec->nr_buffer = 0;
424 	krec->nr_reloc = 0;
425 	krec->nr_push = 0;
426 
427 	DRMLISTFOREACHENTRYSAFE(bctx, btmp, &nvpb->bctx_list, head) {
428 		DRMLISTJOIN(&bctx->current, &bctx->pending);
429 		DRMINITLISTHEAD(&bctx->current);
430 		DRMLISTDELINIT(&bctx->head);
431 	}
432 
433 	return ret;
434 }
435 
436 static void
pushbuf_refn_fail(struct nouveau_pushbuf * push,int sref,int srel)437 pushbuf_refn_fail(struct nouveau_pushbuf *push, int sref, int srel)
438 {
439 	struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
440 	struct nouveau_pushbuf_krec *krec = nvpb->krec;
441 	struct drm_nouveau_gem_pushbuf_bo *kref;
442 
443 	kref = krec->buffer + sref;
444 	while (krec->nr_buffer-- > sref) {
445 		struct nouveau_bo *bo = (void *)(unsigned long)kref->user_priv;
446 		cli_kref_set(push->client, bo, NULL, NULL);
447 		nouveau_bo_ref(NULL, &bo);
448 		kref++;
449 	}
450 	krec->nr_buffer = sref;
451 	krec->nr_reloc = srel;
452 }
453 
454 static int
pushbuf_refn(struct nouveau_pushbuf * push,bool retry,struct nouveau_pushbuf_refn * refs,int nr)455 pushbuf_refn(struct nouveau_pushbuf *push, bool retry,
456 	     struct nouveau_pushbuf_refn *refs, int nr)
457 {
458 	struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
459 	struct nouveau_pushbuf_krec *krec = nvpb->krec;
460 	struct drm_nouveau_gem_pushbuf_bo *kref;
461 	int sref = krec->nr_buffer;
462 	int ret = 0, i;
463 
464 	for (i = 0; i < nr; i++) {
465 		kref = pushbuf_kref(push, refs[i].bo, refs[i].flags);
466 		if (!kref) {
467 			ret = -ENOSPC;
468 			break;
469 		}
470 	}
471 
472 	if (ret) {
473 		pushbuf_refn_fail(push, sref, krec->nr_reloc);
474 		if (retry) {
475 			pushbuf_flush(push);
476 			nouveau_pushbuf_space(push, 0, 0, 0);
477 			return pushbuf_refn(push, false, refs, nr);
478 		}
479 	}
480 
481 	return ret;
482 }
483 
484 static int
pushbuf_validate(struct nouveau_pushbuf * push,bool retry)485 pushbuf_validate(struct nouveau_pushbuf *push, bool retry)
486 {
487 	struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
488 	struct nouveau_pushbuf_krec *krec = nvpb->krec;
489 	struct drm_nouveau_gem_pushbuf_bo *kref;
490 	struct nouveau_bufctx *bctx = push->bufctx;
491 	struct nouveau_bufref *bref;
492 	int relocs = bctx ? bctx->relocs * 2: 0;
493 	int sref, srel, ret;
494 
495 	ret = nouveau_pushbuf_space(push, relocs, relocs, 0);
496 	if (ret || bctx == NULL)
497 		return ret;
498 
499 	sref = krec->nr_buffer;
500 	srel = krec->nr_reloc;
501 
502 	DRMLISTDEL(&bctx->head);
503 	DRMLISTADD(&bctx->head, &nvpb->bctx_list);
504 
505 	DRMLISTFOREACHENTRY(bref, &bctx->pending, thead) {
506 		kref = pushbuf_kref(push, bref->bo, bref->flags);
507 		if (!kref) {
508 			ret = -ENOSPC;
509 			break;
510 		}
511 
512 		if (bref->packet) {
513 			pushbuf_krel(push, bref->bo, bref->packet, 0, 0, 0);
514 			*push->cur++ = 0;
515 			pushbuf_krel(push, bref->bo, bref->data, bref->flags,
516 					   bref->vor, bref->tor);
517 			*push->cur++ = 0;
518 		}
519 	}
520 
521 	DRMLISTJOIN(&bctx->pending, &bctx->current);
522 	DRMINITLISTHEAD(&bctx->pending);
523 
524 	if (ret) {
525 		pushbuf_refn_fail(push, sref, srel);
526 		if (retry) {
527 			pushbuf_flush(push);
528 			return pushbuf_validate(push, false);
529 		}
530 	}
531 
532 	return ret;
533 }
534 
535 int
nouveau_pushbuf_new(struct nouveau_client * client,struct nouveau_object * chan,int nr,uint32_t size,bool immediate,struct nouveau_pushbuf ** ppush)536 nouveau_pushbuf_new(struct nouveau_client *client, struct nouveau_object *chan,
537 		    int nr, uint32_t size, bool immediate,
538 		    struct nouveau_pushbuf **ppush)
539 {
540 	struct nouveau_drm *drm = nouveau_drm(&client->device->object);
541 	struct nouveau_fifo *fifo = chan->data;
542 	struct nouveau_pushbuf_priv *nvpb;
543 	struct nouveau_pushbuf *push;
544 	struct drm_nouveau_gem_pushbuf req = {};
545 	int ret;
546 
547 	if (chan->oclass != NOUVEAU_FIFO_CHANNEL_CLASS)
548 		return -EINVAL;
549 
550 	/* nop pushbuf call, to get the current "return to main" sequence
551 	 * we need to append to the pushbuf on early chipsets
552 	 */
553 	req.channel = fifo->channel;
554 	req.nr_push = 0;
555 	ret = drmCommandWriteRead(drm->fd, DRM_NOUVEAU_GEM_PUSHBUF,
556 				  &req, sizeof(req));
557 	if (ret)
558 		return ret;
559 
560 	nvpb = calloc(1, sizeof(*nvpb) + nr * sizeof(*nvpb->bos));
561 	if (!nvpb)
562 		return -ENOMEM;
563 
564 #ifndef SIMULATE
565 	nvpb->suffix0 = req.suffix0;
566 	nvpb->suffix1 = req.suffix1;
567 #else
568 	nvpb->suffix0 = 0xffffffff;
569 	nvpb->suffix1 = 0xffffffff;
570 #endif
571 	nvpb->krec = calloc(1, sizeof(*nvpb->krec));
572 	nvpb->list = nvpb->krec;
573 	if (!nvpb->krec) {
574 		free(nvpb);
575 		return -ENOMEM;
576 	}
577 
578 	push = &nvpb->base;
579 	push->client = client;
580 	push->channel = immediate ? chan : NULL;
581 	push->flags = NOUVEAU_BO_RD;
582 	if (fifo->pushbuf & NOUVEAU_GEM_DOMAIN_GART) {
583 		push->flags |= NOUVEAU_BO_GART;
584 		nvpb->type   = NOUVEAU_BO_GART;
585 	} else
586 	if (fifo->pushbuf & NOUVEAU_GEM_DOMAIN_VRAM) {
587 		push->flags |= NOUVEAU_BO_VRAM;
588 		nvpb->type   = NOUVEAU_BO_VRAM;
589 	}
590 	nvpb->type |= NOUVEAU_BO_MAP;
591 
592 	for (nvpb->bo_nr = 0; nvpb->bo_nr < nr; nvpb->bo_nr++) {
593 		ret = nouveau_bo_new(client->device, nvpb->type, 0, size,
594 				     NULL, &nvpb->bos[nvpb->bo_nr]);
595 		if (ret) {
596 			nouveau_pushbuf_del(&push);
597 			return ret;
598 		}
599 	}
600 
601 	DRMINITLISTHEAD(&nvpb->bctx_list);
602 	*ppush = push;
603 	return 0;
604 }
605 
606 void
nouveau_pushbuf_del(struct nouveau_pushbuf ** ppush)607 nouveau_pushbuf_del(struct nouveau_pushbuf **ppush)
608 {
609 	struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(*ppush);
610 	if (nvpb) {
611 		struct drm_nouveau_gem_pushbuf_bo *kref;
612 		struct nouveau_pushbuf_krec *krec;
613 		while ((krec = nvpb->list)) {
614 			kref = krec->buffer;
615 			while (krec->nr_buffer--) {
616 				unsigned long priv = kref++->user_priv;
617 				struct nouveau_bo *bo = (void *)priv;
618 				cli_kref_set(nvpb->base.client, bo, NULL, NULL);
619 				nouveau_bo_ref(NULL, &bo);
620 			}
621 			nvpb->list = krec->next;
622 			free(krec);
623 		}
624 		while (nvpb->bo_nr--)
625 			nouveau_bo_ref(NULL, &nvpb->bos[nvpb->bo_nr]);
626 		nouveau_bo_ref(NULL, &nvpb->bo);
627 		free(nvpb);
628 	}
629 	*ppush = NULL;
630 }
631 
632 struct nouveau_bufctx *
nouveau_pushbuf_bufctx(struct nouveau_pushbuf * push,struct nouveau_bufctx * ctx)633 nouveau_pushbuf_bufctx(struct nouveau_pushbuf *push, struct nouveau_bufctx *ctx)
634 {
635 	struct nouveau_bufctx *prev = push->bufctx;
636 	push->bufctx = ctx;
637 	return prev;
638 }
639 
640 int
nouveau_pushbuf_space(struct nouveau_pushbuf * push,uint32_t dwords,uint32_t relocs,uint32_t pushes)641 nouveau_pushbuf_space(struct nouveau_pushbuf *push,
642 		      uint32_t dwords, uint32_t relocs, uint32_t pushes)
643 {
644 	struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
645 	struct nouveau_pushbuf_krec *krec = nvpb->krec;
646 	struct nouveau_client *client = push->client;
647 	struct nouveau_bo *bo = NULL;
648 	bool flushed = false;
649 	int ret = 0;
650 
651 	/* switch to next buffer if insufficient space in the current one */
652 	if (push->cur + dwords >= push->end) {
653 		if (nvpb->bo_next < nvpb->bo_nr) {
654 			nouveau_bo_ref(nvpb->bos[nvpb->bo_next++], &bo);
655 			if (nvpb->bo_next == nvpb->bo_nr && push->channel)
656 				nvpb->bo_next = 0;
657 		} else {
658 			ret = nouveau_bo_new(client->device, nvpb->type, 0,
659 					     nvpb->bos[0]->size, NULL, &bo);
660 			if (ret)
661 				return ret;
662 		}
663 	}
664 
665 	/* make sure there's always enough space to queue up the pending
666 	 * data in the pushbuf proper
667 	 */
668 	pushes++;
669 
670 	/* need to flush if we've run out of space on an immediate pushbuf,
671 	 * if the new buffer won't fit, or if the kernel push/reloc limits
672 	 * have been hit
673 	 */
674 	if ((bo && ( push->channel ||
675 		    !pushbuf_kref(push, bo, push->flags))) ||
676 	    krec->nr_reloc + relocs >= NOUVEAU_GEM_MAX_RELOCS ||
677 	    krec->nr_push + pushes >= NOUVEAU_GEM_MAX_PUSH) {
678 		if (nvpb->bo && krec->nr_buffer)
679 			pushbuf_flush(push);
680 		flushed = true;
681 	}
682 
683 	/* if necessary, switch to new buffer */
684 	if (bo) {
685 		ret = nouveau_bo_map(bo, NOUVEAU_BO_WR, push->client);
686 		if (ret)
687 			return ret;
688 
689 		nouveau_pushbuf_data(push, NULL, 0, 0);
690 		nouveau_bo_ref(bo, &nvpb->bo);
691 		nouveau_bo_ref(NULL, &bo);
692 
693 		nvpb->bgn = nvpb->bo->map;
694 		nvpb->ptr = nvpb->bgn;
695 		push->cur = nvpb->bgn;
696 		push->end = push->cur + (nvpb->bo->size / 4);
697 		push->end -= 2 + push->rsvd_kick; /* space for suffix */
698 	}
699 
700 	pushbuf_kref(push, nvpb->bo, push->flags);
701 	return flushed ? pushbuf_validate(push, false) : 0;
702 }
703 
704 void
nouveau_pushbuf_data(struct nouveau_pushbuf * push,struct nouveau_bo * bo,uint64_t offset,uint64_t length)705 nouveau_pushbuf_data(struct nouveau_pushbuf *push, struct nouveau_bo *bo,
706 		     uint64_t offset, uint64_t length)
707 {
708 	struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
709 	struct nouveau_pushbuf_krec *krec = nvpb->krec;
710 	struct drm_nouveau_gem_pushbuf_push *kpsh;
711 	struct drm_nouveau_gem_pushbuf_bo *kref;
712 
713 	if (bo != nvpb->bo && nvpb->bgn != push->cur) {
714 		if (nvpb->suffix0 || nvpb->suffix1) {
715 			*push->cur++ = nvpb->suffix0;
716 			*push->cur++ = nvpb->suffix1;
717 		}
718 
719 		nouveau_pushbuf_data(push, nvpb->bo,
720 				     (nvpb->bgn - nvpb->ptr) * 4,
721 				     (push->cur - nvpb->bgn) * 4);
722 		nvpb->bgn = push->cur;
723 	}
724 
725 	if (bo) {
726 		kref = cli_kref_get(push->client, bo);
727 		assert(kref);
728 		kpsh = &krec->push[krec->nr_push++];
729 		kpsh->bo_index = kref - krec->buffer;
730 		kpsh->offset   = offset;
731 		kpsh->length   = length;
732 	}
733 }
734 
735 int
nouveau_pushbuf_refn(struct nouveau_pushbuf * push,struct nouveau_pushbuf_refn * refs,int nr)736 nouveau_pushbuf_refn(struct nouveau_pushbuf *push,
737 		     struct nouveau_pushbuf_refn *refs, int nr)
738 {
739 	return pushbuf_refn(push, true, refs, nr);
740 }
741 
742 void
nouveau_pushbuf_reloc(struct nouveau_pushbuf * push,struct nouveau_bo * bo,uint32_t data,uint32_t flags,uint32_t vor,uint32_t tor)743 nouveau_pushbuf_reloc(struct nouveau_pushbuf *push, struct nouveau_bo *bo,
744 		      uint32_t data, uint32_t flags, uint32_t vor, uint32_t tor)
745 {
746 	*push->cur = pushbuf_krel(push, bo, data, flags, vor, tor);
747 	push->cur++;
748 }
749 
750 int
nouveau_pushbuf_validate(struct nouveau_pushbuf * push)751 nouveau_pushbuf_validate(struct nouveau_pushbuf *push)
752 {
753 	return pushbuf_validate(push, true);
754 }
755 
756 uint32_t
nouveau_pushbuf_refd(struct nouveau_pushbuf * push,struct nouveau_bo * bo)757 nouveau_pushbuf_refd(struct nouveau_pushbuf *push, struct nouveau_bo *bo)
758 {
759 	struct drm_nouveau_gem_pushbuf_bo *kref;
760 	uint32_t flags = 0;
761 
762 	if (cli_push_get(push->client, bo) == push) {
763 		kref = cli_kref_get(push->client, bo);
764 		assert(kref);
765 		if (kref->read_domains)
766 			flags |= NOUVEAU_BO_RD;
767 		if (kref->write_domains)
768 			flags |= NOUVEAU_BO_WR;
769 	}
770 
771 	return flags;
772 }
773 
774 int
nouveau_pushbuf_kick(struct nouveau_pushbuf * push,struct nouveau_object * chan)775 nouveau_pushbuf_kick(struct nouveau_pushbuf *push, struct nouveau_object *chan)
776 {
777 	if (!push->channel)
778 		return pushbuf_submit(push, chan);
779 	pushbuf_flush(push);
780 	return pushbuf_validate(push, false);
781 }
782