• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 
2 #include "util/u_inlines.h"
3 #include "util/u_memory.h"
4 #include "util/u_math.h"
5 #include "util/u_surface.h"
6 
7 #include "nouveau_screen.h"
8 #include "nouveau_context.h"
9 #include "nouveau_winsys.h"
10 #include "nouveau_fence.h"
11 #include "nouveau_buffer.h"
12 #include "nouveau_mm.h"
13 
14 struct nouveau_transfer {
15    struct pipe_transfer base;
16 
17    uint8_t *map;
18    struct nouveau_bo *bo;
19    struct nouveau_mm_allocation *mm;
20    uint32_t offset;
21 };
22 
23 static inline struct nouveau_transfer *
nouveau_transfer(struct pipe_transfer * transfer)24 nouveau_transfer(struct pipe_transfer *transfer)
25 {
26    return (struct nouveau_transfer *)transfer;
27 }
28 
29 static inline bool
nouveau_buffer_malloc(struct nv04_resource * buf)30 nouveau_buffer_malloc(struct nv04_resource *buf)
31 {
32    if (!buf->data)
33       buf->data = align_malloc(buf->base.width0, NOUVEAU_MIN_BUFFER_MAP_ALIGN);
34    return !!buf->data;
35 }
36 
37 static inline bool
nouveau_buffer_allocate(struct nouveau_screen * screen,struct nv04_resource * buf,unsigned domain)38 nouveau_buffer_allocate(struct nouveau_screen *screen,
39                         struct nv04_resource *buf, unsigned domain)
40 {
41    uint32_t size = align(buf->base.width0, 0x100);
42 
43    if (domain == NOUVEAU_BO_VRAM) {
44       buf->mm = nouveau_mm_allocate(screen->mm_VRAM, size,
45                                     &buf->bo, &buf->offset);
46       if (!buf->bo)
47          return nouveau_buffer_allocate(screen, buf, NOUVEAU_BO_GART);
48       NOUVEAU_DRV_STAT(screen, buf_obj_current_bytes_vid, buf->base.width0);
49    } else
50    if (domain == NOUVEAU_BO_GART) {
51       buf->mm = nouveau_mm_allocate(screen->mm_GART, size,
52                                     &buf->bo, &buf->offset);
53       if (!buf->bo)
54          return false;
55       NOUVEAU_DRV_STAT(screen, buf_obj_current_bytes_sys, buf->base.width0);
56    } else {
57       assert(domain == 0);
58       if (!nouveau_buffer_malloc(buf))
59          return false;
60    }
61    buf->domain = domain;
62    if (buf->bo)
63       buf->address = buf->bo->offset + buf->offset;
64 
65    util_range_set_empty(&buf->valid_buffer_range);
66 
67    return true;
68 }
69 
70 static inline void
release_allocation(struct nouveau_mm_allocation ** mm,struct nouveau_fence * fence)71 release_allocation(struct nouveau_mm_allocation **mm,
72                    struct nouveau_fence *fence)
73 {
74    nouveau_fence_work(fence, nouveau_mm_free_work, *mm);
75    (*mm) = NULL;
76 }
77 
78 inline void
nouveau_buffer_release_gpu_storage(struct nv04_resource * buf)79 nouveau_buffer_release_gpu_storage(struct nv04_resource *buf)
80 {
81    assert(!(buf->status & NOUVEAU_BUFFER_STATUS_USER_PTR));
82 
83    if (buf->fence && buf->fence->state < NOUVEAU_FENCE_STATE_FLUSHED) {
84       nouveau_fence_work(buf->fence, nouveau_fence_unref_bo, buf->bo);
85       buf->bo = NULL;
86    } else {
87       nouveau_bo_ref(NULL, &buf->bo);
88    }
89 
90    if (buf->mm)
91       release_allocation(&buf->mm, buf->fence);
92 
93    if (buf->domain == NOUVEAU_BO_VRAM)
94       NOUVEAU_DRV_STAT_RES(buf, buf_obj_current_bytes_vid, -(uint64_t)buf->base.width0);
95    if (buf->domain == NOUVEAU_BO_GART)
96       NOUVEAU_DRV_STAT_RES(buf, buf_obj_current_bytes_sys, -(uint64_t)buf->base.width0);
97 
98    buf->domain = 0;
99 }
100 
101 static inline bool
nouveau_buffer_reallocate(struct nouveau_screen * screen,struct nv04_resource * buf,unsigned domain)102 nouveau_buffer_reallocate(struct nouveau_screen *screen,
103                           struct nv04_resource *buf, unsigned domain)
104 {
105    nouveau_buffer_release_gpu_storage(buf);
106 
107    nouveau_fence_ref(NULL, &buf->fence);
108    nouveau_fence_ref(NULL, &buf->fence_wr);
109 
110    buf->status &= NOUVEAU_BUFFER_STATUS_REALLOC_MASK;
111 
112    return nouveau_buffer_allocate(screen, buf, domain);
113 }
114 
115 static void
nouveau_buffer_destroy(struct pipe_screen * pscreen,struct pipe_resource * presource)116 nouveau_buffer_destroy(struct pipe_screen *pscreen,
117                        struct pipe_resource *presource)
118 {
119    struct nv04_resource *res = nv04_resource(presource);
120 
121    nouveau_buffer_release_gpu_storage(res);
122 
123    if (res->data && !(res->status & NOUVEAU_BUFFER_STATUS_USER_MEMORY))
124       align_free(res->data);
125 
126    nouveau_fence_ref(NULL, &res->fence);
127    nouveau_fence_ref(NULL, &res->fence_wr);
128 
129    util_range_destroy(&res->valid_buffer_range);
130 
131    FREE(res);
132 
133    NOUVEAU_DRV_STAT(nouveau_screen(pscreen), buf_obj_current_count, -1);
134 }
135 
136 /* Set up a staging area for the transfer. This is either done in "regular"
137  * system memory if the driver supports push_data (nv50+) and the data is
138  * small enough (and permit_pb == true), or in GART memory.
139  */
140 static uint8_t *
nouveau_transfer_staging(struct nouveau_context * nv,struct nouveau_transfer * tx,bool permit_pb)141 nouveau_transfer_staging(struct nouveau_context *nv,
142                          struct nouveau_transfer *tx, bool permit_pb)
143 {
144    const unsigned adj = tx->base.box.x & NOUVEAU_MIN_BUFFER_MAP_ALIGN_MASK;
145    const unsigned size = align(tx->base.box.width, 4) + adj;
146 
147    if (!nv->push_data)
148       permit_pb = false;
149 
150    if ((size <= nv->screen->transfer_pushbuf_threshold) && permit_pb) {
151       tx->map = align_malloc(size, NOUVEAU_MIN_BUFFER_MAP_ALIGN);
152       if (tx->map)
153          tx->map += adj;
154    } else {
155       tx->mm =
156          nouveau_mm_allocate(nv->screen->mm_GART, size, &tx->bo, &tx->offset);
157       if (tx->bo) {
158          tx->offset += adj;
159          if (!nouveau_bo_map(tx->bo, 0, NULL))
160             tx->map = (uint8_t *)tx->bo->map + tx->offset;
161       }
162    }
163    return tx->map;
164 }
165 
166 /* Copies data from the resource into the transfer's temporary GART
167  * buffer. Also updates buf->data if present.
168  *
169  * Maybe just migrate to GART right away if we actually need to do this. */
170 static bool
nouveau_transfer_read(struct nouveau_context * nv,struct nouveau_transfer * tx)171 nouveau_transfer_read(struct nouveau_context *nv, struct nouveau_transfer *tx)
172 {
173    struct nv04_resource *buf = nv04_resource(tx->base.resource);
174    const unsigned base = tx->base.box.x;
175    const unsigned size = tx->base.box.width;
176 
177    NOUVEAU_DRV_STAT(nv->screen, buf_read_bytes_staging_vid, size);
178 
179    nv->copy_data(nv, tx->bo, tx->offset, NOUVEAU_BO_GART,
180                  buf->bo, buf->offset + base, buf->domain, size);
181 
182    if (nouveau_bo_wait(tx->bo, NOUVEAU_BO_RD, nv->client))
183       return false;
184 
185    if (buf->data)
186       memcpy(buf->data + base, tx->map, size);
187 
188    return true;
189 }
190 
191 static void
nouveau_transfer_write(struct nouveau_context * nv,struct nouveau_transfer * tx,unsigned offset,unsigned size)192 nouveau_transfer_write(struct nouveau_context *nv, struct nouveau_transfer *tx,
193                        unsigned offset, unsigned size)
194 {
195    struct nv04_resource *buf = nv04_resource(tx->base.resource);
196    uint8_t *data = tx->map + offset;
197    const unsigned base = tx->base.box.x + offset;
198    const bool can_cb = !((base | size) & 3);
199 
200    if (buf->data)
201       memcpy(data, buf->data + base, size);
202    else
203       buf->status |= NOUVEAU_BUFFER_STATUS_DIRTY;
204 
205    if (buf->domain == NOUVEAU_BO_VRAM)
206       NOUVEAU_DRV_STAT(nv->screen, buf_write_bytes_staging_vid, size);
207    if (buf->domain == NOUVEAU_BO_GART)
208       NOUVEAU_DRV_STAT(nv->screen, buf_write_bytes_staging_sys, size);
209 
210    if (tx->bo)
211       nv->copy_data(nv, buf->bo, buf->offset + base, buf->domain,
212                     tx->bo, tx->offset + offset, NOUVEAU_BO_GART, size);
213    else
214    if (nv->push_cb && can_cb)
215       nv->push_cb(nv, buf,
216                   base, size / 4, (const uint32_t *)data);
217    else
218       nv->push_data(nv, buf->bo, buf->offset + base, buf->domain, size, data);
219 
220    nouveau_fence_ref(nv->screen->fence.current, &buf->fence);
221    nouveau_fence_ref(nv->screen->fence.current, &buf->fence_wr);
222 }
223 
224 /* Does a CPU wait for the buffer's backing data to become reliably accessible
225  * for write/read by waiting on the buffer's relevant fences.
226  */
227 static inline bool
nouveau_buffer_sync(struct nouveau_context * nv,struct nv04_resource * buf,unsigned rw)228 nouveau_buffer_sync(struct nouveau_context *nv,
229                     struct nv04_resource *buf, unsigned rw)
230 {
231    if (rw == PIPE_MAP_READ) {
232       if (!buf->fence_wr)
233          return true;
234       NOUVEAU_DRV_STAT_RES(buf, buf_non_kernel_fence_sync_count,
235                            !nouveau_fence_signalled(buf->fence_wr));
236       if (!nouveau_fence_wait(buf->fence_wr, &nv->debug))
237          return false;
238    } else {
239       if (!buf->fence)
240          return true;
241       NOUVEAU_DRV_STAT_RES(buf, buf_non_kernel_fence_sync_count,
242                            !nouveau_fence_signalled(buf->fence));
243       if (!nouveau_fence_wait(buf->fence, &nv->debug))
244          return false;
245 
246       nouveau_fence_ref(NULL, &buf->fence);
247    }
248    nouveau_fence_ref(NULL, &buf->fence_wr);
249 
250    return true;
251 }
252 
253 static inline bool
nouveau_buffer_busy(struct nv04_resource * buf,unsigned rw)254 nouveau_buffer_busy(struct nv04_resource *buf, unsigned rw)
255 {
256    if (rw == PIPE_MAP_READ)
257       return (buf->fence_wr && !nouveau_fence_signalled(buf->fence_wr));
258    else
259       return (buf->fence && !nouveau_fence_signalled(buf->fence));
260 }
261 
262 static inline void
nouveau_buffer_transfer_init(struct nouveau_transfer * tx,struct pipe_resource * resource,const struct pipe_box * box,unsigned usage)263 nouveau_buffer_transfer_init(struct nouveau_transfer *tx,
264                              struct pipe_resource *resource,
265                              const struct pipe_box *box,
266                              unsigned usage)
267 {
268    tx->base.resource = resource;
269    tx->base.level = 0;
270    tx->base.usage = usage;
271    tx->base.box.x = box->x;
272    tx->base.box.y = 0;
273    tx->base.box.z = 0;
274    tx->base.box.width = box->width;
275    tx->base.box.height = 1;
276    tx->base.box.depth = 1;
277    tx->base.stride = 0;
278    tx->base.layer_stride = 0;
279 
280    tx->bo = NULL;
281    tx->map = NULL;
282 }
283 
284 static inline void
nouveau_buffer_transfer_del(struct nouveau_context * nv,struct nouveau_transfer * tx)285 nouveau_buffer_transfer_del(struct nouveau_context *nv,
286                             struct nouveau_transfer *tx)
287 {
288    if (tx->map) {
289       if (likely(tx->bo)) {
290          nouveau_fence_work(nv->screen->fence.current,
291                             nouveau_fence_unref_bo, tx->bo);
292          if (tx->mm)
293             release_allocation(&tx->mm, nv->screen->fence.current);
294       } else {
295          align_free(tx->map -
296                     (tx->base.box.x & NOUVEAU_MIN_BUFFER_MAP_ALIGN_MASK));
297       }
298    }
299 }
300 
301 /* Creates a cache in system memory of the buffer data. */
302 static bool
nouveau_buffer_cache(struct nouveau_context * nv,struct nv04_resource * buf)303 nouveau_buffer_cache(struct nouveau_context *nv, struct nv04_resource *buf)
304 {
305    struct nouveau_transfer tx;
306    bool ret;
307    tx.base.resource = &buf->base;
308    tx.base.box.x = 0;
309    tx.base.box.width = buf->base.width0;
310    tx.bo = NULL;
311    tx.map = NULL;
312 
313    if (!buf->data)
314       if (!nouveau_buffer_malloc(buf))
315          return false;
316    if (!(buf->status & NOUVEAU_BUFFER_STATUS_DIRTY))
317       return true;
318    nv->stats.buf_cache_count++;
319 
320    if (!nouveau_transfer_staging(nv, &tx, false))
321       return false;
322 
323    ret = nouveau_transfer_read(nv, &tx);
324    if (ret) {
325       buf->status &= ~NOUVEAU_BUFFER_STATUS_DIRTY;
326       memcpy(buf->data, tx.map, buf->base.width0);
327    }
328    nouveau_buffer_transfer_del(nv, &tx);
329    return ret;
330 }
331 
332 
333 #define NOUVEAU_TRANSFER_DISCARD \
334    (PIPE_MAP_DISCARD_RANGE | PIPE_MAP_DISCARD_WHOLE_RESOURCE)
335 
336 /* Checks whether it is possible to completely discard the memory backing this
337  * resource. This can be useful if we would otherwise have to wait for a read
338  * operation to complete on this data.
339  */
340 static inline bool
nouveau_buffer_should_discard(struct nv04_resource * buf,unsigned usage)341 nouveau_buffer_should_discard(struct nv04_resource *buf, unsigned usage)
342 {
343    if (!(usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE))
344       return false;
345    if (unlikely(buf->base.bind & PIPE_BIND_SHARED))
346       return false;
347    if (unlikely(usage & PIPE_MAP_PERSISTENT))
348       return false;
349    return buf->mm && nouveau_buffer_busy(buf, PIPE_MAP_WRITE);
350 }
351 
352 /* Returns a pointer to a memory area representing a window into the
353  * resource's data.
354  *
355  * This may or may not be the _actual_ memory area of the resource. However
356  * when calling nouveau_buffer_transfer_unmap, if it wasn't the actual memory
357  * area, the contents of the returned map are copied over to the resource.
358  *
359  * The usage indicates what the caller plans to do with the map:
360  *
361  *   WRITE means that the user plans to write to it
362  *
363  *   READ means that the user plans on reading from it
364  *
365  *   DISCARD_WHOLE_RESOURCE means that the whole resource is going to be
366  *   potentially overwritten, and even if it isn't, the bits that aren't don't
367  *   need to be maintained.
368  *
369  *   DISCARD_RANGE means that all the data in the specified range is going to
370  *   be overwritten.
371  *
372  * The strategy for determining what kind of memory area to return is complex,
373  * see comments inside of the function.
374  */
375 static void *
nouveau_buffer_transfer_map(struct pipe_context * pipe,struct pipe_resource * resource,unsigned level,unsigned usage,const struct pipe_box * box,struct pipe_transfer ** ptransfer)376 nouveau_buffer_transfer_map(struct pipe_context *pipe,
377                             struct pipe_resource *resource,
378                             unsigned level, unsigned usage,
379                             const struct pipe_box *box,
380                             struct pipe_transfer **ptransfer)
381 {
382    struct nouveau_context *nv = nouveau_context(pipe);
383    struct nv04_resource *buf = nv04_resource(resource);
384    struct nouveau_transfer *tx = MALLOC_STRUCT(nouveau_transfer);
385    uint8_t *map;
386    int ret;
387 
388    if (!tx)
389       return NULL;
390    nouveau_buffer_transfer_init(tx, resource, box, usage);
391    *ptransfer = &tx->base;
392 
393    if (usage & PIPE_MAP_READ)
394       NOUVEAU_DRV_STAT(nv->screen, buf_transfers_rd, 1);
395    if (usage & PIPE_MAP_WRITE)
396       NOUVEAU_DRV_STAT(nv->screen, buf_transfers_wr, 1);
397 
398    /* If we are trying to write to an uninitialized range, the user shouldn't
399     * care what was there before. So we can treat the write as if the target
400     * range were being discarded. Furthermore, since we know that even if this
401     * buffer is busy due to GPU activity, because the contents were
402     * uninitialized, the GPU can't care what was there, and so we can treat
403     * the write as being unsynchronized.
404     */
405    if ((usage & PIPE_MAP_WRITE) &&
406        !util_ranges_intersect(&buf->valid_buffer_range, box->x, box->x + box->width))
407       usage |= PIPE_MAP_DISCARD_RANGE | PIPE_MAP_UNSYNCHRONIZED;
408 
409    if (buf->domain == NOUVEAU_BO_VRAM) {
410       if (usage & NOUVEAU_TRANSFER_DISCARD) {
411          /* Set up a staging area for the user to write to. It will be copied
412           * back into VRAM on unmap. */
413          if (usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE)
414             buf->status &= NOUVEAU_BUFFER_STATUS_REALLOC_MASK;
415          nouveau_transfer_staging(nv, tx, true);
416       } else {
417          if (buf->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING) {
418             /* The GPU is currently writing to this buffer. Copy its current
419              * contents to a staging area in the GART. This is necessary since
420              * not the whole area being mapped is being discarded.
421              */
422             if (buf->data) {
423                align_free(buf->data);
424                buf->data = NULL;
425             }
426             nouveau_transfer_staging(nv, tx, false);
427             nouveau_transfer_read(nv, tx);
428          } else {
429             /* The buffer is currently idle. Create a staging area for writes,
430              * and make sure that the cached data is up-to-date. */
431             if (usage & PIPE_MAP_WRITE)
432                nouveau_transfer_staging(nv, tx, true);
433             if (!buf->data)
434                nouveau_buffer_cache(nv, buf);
435          }
436       }
437       return buf->data ? (buf->data + box->x) : tx->map;
438    } else
439    if (unlikely(buf->domain == 0)) {
440       return buf->data + box->x;
441    }
442 
443    /* At this point, buf->domain == GART */
444 
445    if (nouveau_buffer_should_discard(buf, usage)) {
446       int ref = buf->base.reference.count - 1;
447       nouveau_buffer_reallocate(nv->screen, buf, buf->domain);
448       if (ref > 0) /* any references inside context possible ? */
449          nv->invalidate_resource_storage(nv, &buf->base, ref);
450    }
451 
452    /* Note that nouveau_bo_map ends up doing a nouveau_bo_wait with the
453     * relevant flags. If buf->mm is set, that means this resource is part of a
454     * larger slab bo that holds multiple resources. So in that case, don't
455     * wait on the whole slab and instead use the logic below to return a
456     * reasonable buffer for that case.
457     */
458    ret = nouveau_bo_map(buf->bo,
459                         buf->mm ? 0 : nouveau_screen_transfer_flags(usage),
460                         nv->client);
461    if (ret) {
462       FREE(tx);
463       return NULL;
464    }
465    map = (uint8_t *)buf->bo->map + buf->offset + box->x;
466 
467    /* using kernel fences only if !buf->mm */
468    if ((usage & PIPE_MAP_UNSYNCHRONIZED) || !buf->mm)
469       return map;
470 
471    /* If the GPU is currently reading/writing this buffer, we shouldn't
472     * interfere with its progress. So instead we either wait for the GPU to
473     * complete its operation, or set up a staging area to perform our work in.
474     */
475    if (nouveau_buffer_busy(buf, usage & PIPE_MAP_READ_WRITE)) {
476       if (unlikely(usage & (PIPE_MAP_DISCARD_WHOLE_RESOURCE |
477                             PIPE_MAP_PERSISTENT))) {
478          /* Discarding was not possible, must sync because
479           * subsequent transfers might use UNSYNCHRONIZED. */
480          nouveau_buffer_sync(nv, buf, usage & PIPE_MAP_READ_WRITE);
481       } else
482       if (usage & PIPE_MAP_DISCARD_RANGE) {
483          /* The whole range is being discarded, so it doesn't matter what was
484           * there before. No need to copy anything over. */
485          nouveau_transfer_staging(nv, tx, true);
486          map = tx->map;
487       } else
488       if (nouveau_buffer_busy(buf, PIPE_MAP_READ)) {
489          if (usage & PIPE_MAP_DONTBLOCK)
490             map = NULL;
491          else
492             nouveau_buffer_sync(nv, buf, usage & PIPE_MAP_READ_WRITE);
493       } else {
494          /* It is expected that the returned buffer be a representation of the
495           * data in question, so we must copy it over from the buffer. */
496          nouveau_transfer_staging(nv, tx, true);
497          if (tx->map)
498             memcpy(tx->map, map, box->width);
499          map = tx->map;
500       }
501    }
502    if (!map)
503       FREE(tx);
504    return map;
505 }
506 
507 
508 
509 static void
nouveau_buffer_transfer_flush_region(struct pipe_context * pipe,struct pipe_transfer * transfer,const struct pipe_box * box)510 nouveau_buffer_transfer_flush_region(struct pipe_context *pipe,
511                                      struct pipe_transfer *transfer,
512                                      const struct pipe_box *box)
513 {
514    struct nouveau_transfer *tx = nouveau_transfer(transfer);
515    struct nv04_resource *buf = nv04_resource(transfer->resource);
516 
517    if (tx->map)
518       nouveau_transfer_write(nouveau_context(pipe), tx, box->x, box->width);
519 
520    util_range_add(&buf->base, &buf->valid_buffer_range,
521                   tx->base.box.x + box->x,
522                   tx->base.box.x + box->x + box->width);
523 }
524 
525 /* Unmap stage of the transfer. If it was a WRITE transfer and the map that
526  * was returned was not the real resource's data, this needs to transfer the
527  * data back to the resource.
528  *
529  * Also marks vbo dirty based on the buffer's binding
530  */
531 static void
nouveau_buffer_transfer_unmap(struct pipe_context * pipe,struct pipe_transfer * transfer)532 nouveau_buffer_transfer_unmap(struct pipe_context *pipe,
533                               struct pipe_transfer *transfer)
534 {
535    struct nouveau_context *nv = nouveau_context(pipe);
536    struct nouveau_transfer *tx = nouveau_transfer(transfer);
537    struct nv04_resource *buf = nv04_resource(transfer->resource);
538 
539    if (tx->base.usage & PIPE_MAP_WRITE) {
540       if (!(tx->base.usage & PIPE_MAP_FLUSH_EXPLICIT)) {
541          if (tx->map)
542             nouveau_transfer_write(nv, tx, 0, tx->base.box.width);
543 
544          util_range_add(&buf->base, &buf->valid_buffer_range,
545                         tx->base.box.x, tx->base.box.x + tx->base.box.width);
546       }
547 
548       if (likely(buf->domain)) {
549          const uint8_t bind = buf->base.bind;
550          /* make sure we invalidate dedicated caches */
551          if (bind & (PIPE_BIND_VERTEX_BUFFER | PIPE_BIND_INDEX_BUFFER))
552             nv->vbo_dirty = true;
553       }
554    }
555 
556    if (!tx->bo && (tx->base.usage & PIPE_MAP_WRITE))
557       NOUVEAU_DRV_STAT(nv->screen, buf_write_bytes_direct, tx->base.box.width);
558 
559    nouveau_buffer_transfer_del(nv, tx);
560    FREE(tx);
561 }
562 
563 
564 void
nouveau_copy_buffer(struct nouveau_context * nv,struct nv04_resource * dst,unsigned dstx,struct nv04_resource * src,unsigned srcx,unsigned size)565 nouveau_copy_buffer(struct nouveau_context *nv,
566                     struct nv04_resource *dst, unsigned dstx,
567                     struct nv04_resource *src, unsigned srcx, unsigned size)
568 {
569    assert(dst->base.target == PIPE_BUFFER && src->base.target == PIPE_BUFFER);
570 
571    assert(!(dst->status & NOUVEAU_BUFFER_STATUS_USER_PTR));
572    assert(!(src->status & NOUVEAU_BUFFER_STATUS_USER_PTR));
573 
574    if (likely(dst->domain) && likely(src->domain)) {
575       nv->copy_data(nv,
576                     dst->bo, dst->offset + dstx, dst->domain,
577                     src->bo, src->offset + srcx, src->domain, size);
578 
579       dst->status |= NOUVEAU_BUFFER_STATUS_GPU_WRITING;
580       nouveau_fence_ref(nv->screen->fence.current, &dst->fence);
581       nouveau_fence_ref(nv->screen->fence.current, &dst->fence_wr);
582 
583       src->status |= NOUVEAU_BUFFER_STATUS_GPU_READING;
584       nouveau_fence_ref(nv->screen->fence.current, &src->fence);
585    } else {
586       struct pipe_box src_box;
587       src_box.x = srcx;
588       src_box.y = 0;
589       src_box.z = 0;
590       src_box.width = size;
591       src_box.height = 1;
592       src_box.depth = 1;
593       util_resource_copy_region(&nv->pipe,
594                                 &dst->base, 0, dstx, 0, 0,
595                                 &src->base, 0, &src_box);
596    }
597 
598    util_range_add(&dst->base, &dst->valid_buffer_range, dstx, dstx + size);
599 }
600 
601 
602 void *
nouveau_resource_map_offset(struct nouveau_context * nv,struct nv04_resource * res,uint32_t offset,uint32_t flags)603 nouveau_resource_map_offset(struct nouveau_context *nv,
604                             struct nv04_resource *res, uint32_t offset,
605                             uint32_t flags)
606 {
607    if (unlikely(res->status & NOUVEAU_BUFFER_STATUS_USER_MEMORY) ||
608        unlikely(res->status & NOUVEAU_BUFFER_STATUS_USER_PTR))
609       return res->data + offset;
610 
611    if (res->domain == NOUVEAU_BO_VRAM) {
612       if (!res->data || (res->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING))
613          nouveau_buffer_cache(nv, res);
614    }
615    if (res->domain != NOUVEAU_BO_GART)
616       return res->data + offset;
617 
618    if (res->mm) {
619       unsigned rw;
620       rw = (flags & NOUVEAU_BO_WR) ? PIPE_MAP_WRITE : PIPE_MAP_READ;
621       nouveau_buffer_sync(nv, res, rw);
622       if (nouveau_bo_map(res->bo, 0, NULL))
623          return NULL;
624    } else {
625       if (nouveau_bo_map(res->bo, flags, nv->client))
626          return NULL;
627    }
628    return (uint8_t *)res->bo->map + res->offset + offset;
629 }
630 
631 const struct u_resource_vtbl nouveau_buffer_vtbl =
632 {
633    u_default_resource_get_handle,     /* get_handle */
634    nouveau_buffer_destroy,               /* resource_destroy */
635    nouveau_buffer_transfer_map,          /* transfer_map */
636    nouveau_buffer_transfer_flush_region, /* transfer_flush_region */
637    nouveau_buffer_transfer_unmap,        /* transfer_unmap */
638 };
639 
640 static void
nouveau_user_ptr_destroy(struct pipe_screen * pscreen,struct pipe_resource * presource)641 nouveau_user_ptr_destroy(struct pipe_screen *pscreen,
642                          struct pipe_resource *presource)
643 {
644    struct nv04_resource *res = nv04_resource(presource);
645    FREE(res);
646 }
647 
648 static void *
nouveau_user_ptr_transfer_map(struct pipe_context * pipe,struct pipe_resource * resource,unsigned level,unsigned usage,const struct pipe_box * box,struct pipe_transfer ** ptransfer)649 nouveau_user_ptr_transfer_map(struct pipe_context *pipe,
650                               struct pipe_resource *resource,
651                               unsigned level, unsigned usage,
652                               const struct pipe_box *box,
653                               struct pipe_transfer **ptransfer)
654 {
655    struct nouveau_transfer *tx = MALLOC_STRUCT(nouveau_transfer);
656    if (!tx)
657       return NULL;
658    nouveau_buffer_transfer_init(tx, resource, box, usage);
659    *ptransfer = &tx->base;
660    return nv04_resource(resource)->data;
661 }
662 
663 static void
nouveau_user_ptr_transfer_unmap(struct pipe_context * pipe,struct pipe_transfer * transfer)664 nouveau_user_ptr_transfer_unmap(struct pipe_context *pipe,
665                                 struct pipe_transfer *transfer)
666 {
667    struct nouveau_transfer *tx = nouveau_transfer(transfer);
668    FREE(tx);
669 }
670 
671 const struct u_resource_vtbl nouveau_user_ptr_buffer_vtbl =
672 {
673    u_default_resource_get_handle,   /* get_handle */
674    nouveau_user_ptr_destroy,        /* resource_destroy */
675    nouveau_user_ptr_transfer_map,   /* transfer_map */
676    u_default_transfer_flush_region, /* transfer_flush_region */
677    nouveau_user_ptr_transfer_unmap, /* transfer_unmap */
678 };
679 
680 struct pipe_resource *
nouveau_buffer_create(struct pipe_screen * pscreen,const struct pipe_resource * templ)681 nouveau_buffer_create(struct pipe_screen *pscreen,
682                       const struct pipe_resource *templ)
683 {
684    struct nouveau_screen *screen = nouveau_screen(pscreen);
685    struct nv04_resource *buffer;
686    bool ret;
687 
688    buffer = CALLOC_STRUCT(nv04_resource);
689    if (!buffer)
690       return NULL;
691 
692    buffer->base = *templ;
693    buffer->vtbl = &nouveau_buffer_vtbl;
694    pipe_reference_init(&buffer->base.reference, 1);
695    buffer->base.screen = pscreen;
696 
697    if (buffer->base.flags & (PIPE_RESOURCE_FLAG_MAP_PERSISTENT |
698                              PIPE_RESOURCE_FLAG_MAP_COHERENT)) {
699       buffer->domain = NOUVEAU_BO_GART;
700    } else if (buffer->base.bind == 0 || (buffer->base.bind &
701               (screen->vidmem_bindings & screen->sysmem_bindings))) {
702       switch (buffer->base.usage) {
703       case PIPE_USAGE_DEFAULT:
704       case PIPE_USAGE_IMMUTABLE:
705          buffer->domain = NV_VRAM_DOMAIN(screen);
706          break;
707       case PIPE_USAGE_DYNAMIC:
708          /* For most apps, we'd have to do staging transfers to avoid sync
709           * with this usage, and GART -> GART copies would be suboptimal.
710           */
711          buffer->domain = NV_VRAM_DOMAIN(screen);
712          break;
713       case PIPE_USAGE_STAGING:
714       case PIPE_USAGE_STREAM:
715          buffer->domain = NOUVEAU_BO_GART;
716          break;
717       default:
718          assert(0);
719          break;
720       }
721    } else {
722       if (buffer->base.bind & screen->vidmem_bindings)
723          buffer->domain = NV_VRAM_DOMAIN(screen);
724       else
725       if (buffer->base.bind & screen->sysmem_bindings)
726          buffer->domain = NOUVEAU_BO_GART;
727    }
728 
729    ret = nouveau_buffer_allocate(screen, buffer, buffer->domain);
730 
731    if (ret == false)
732       goto fail;
733 
734    if (buffer->domain == NOUVEAU_BO_VRAM && screen->hint_buf_keep_sysmem_copy)
735       nouveau_buffer_cache(NULL, buffer);
736 
737    NOUVEAU_DRV_STAT(screen, buf_obj_current_count, 1);
738 
739    util_range_init(&buffer->valid_buffer_range);
740 
741    return &buffer->base;
742 
743 fail:
744    FREE(buffer);
745    return NULL;
746 }
747 
748 struct pipe_resource *
nouveau_buffer_create_from_user(struct pipe_screen * pscreen,const struct pipe_resource * templ,void * user_ptr)749 nouveau_buffer_create_from_user(struct pipe_screen *pscreen,
750                                 const struct pipe_resource *templ,
751                                 void *user_ptr)
752 {
753    struct nv04_resource *buffer;
754 
755    buffer = CALLOC_STRUCT(nv04_resource);
756    if (!buffer)
757       return NULL;
758 
759    buffer->base = *templ;
760    buffer->vtbl = &nouveau_user_ptr_buffer_vtbl;
761    /* set address and data to the same thing for higher compatibility with
762     * existing code. It's correct nonetheless as the same pointer is equally
763     * valid on the CPU and the GPU.
764     */
765    buffer->address = (uint64_t)user_ptr;
766    buffer->data = user_ptr;
767    buffer->status = NOUVEAU_BUFFER_STATUS_USER_PTR;
768    buffer->base.screen = pscreen;
769 
770    pipe_reference_init(&buffer->base.reference, 1);
771 
772    return &buffer->base;
773 }
774 
775 struct pipe_resource *
nouveau_user_buffer_create(struct pipe_screen * pscreen,void * ptr,unsigned bytes,unsigned bind)776 nouveau_user_buffer_create(struct pipe_screen *pscreen, void *ptr,
777                            unsigned bytes, unsigned bind)
778 {
779    struct nv04_resource *buffer;
780 
781    buffer = CALLOC_STRUCT(nv04_resource);
782    if (!buffer)
783       return NULL;
784 
785    pipe_reference_init(&buffer->base.reference, 1);
786    buffer->vtbl = &nouveau_buffer_vtbl;
787    buffer->base.screen = pscreen;
788    buffer->base.format = PIPE_FORMAT_R8_UNORM;
789    buffer->base.usage = PIPE_USAGE_IMMUTABLE;
790    buffer->base.bind = bind;
791    buffer->base.width0 = bytes;
792    buffer->base.height0 = 1;
793    buffer->base.depth0 = 1;
794 
795    buffer->data = ptr;
796    buffer->status = NOUVEAU_BUFFER_STATUS_USER_MEMORY;
797 
798    util_range_init(&buffer->valid_buffer_range);
799    util_range_add(&buffer->base, &buffer->valid_buffer_range, 0, bytes);
800 
801    return &buffer->base;
802 }
803 
804 static inline bool
nouveau_buffer_data_fetch(struct nouveau_context * nv,struct nv04_resource * buf,struct nouveau_bo * bo,unsigned offset,unsigned size)805 nouveau_buffer_data_fetch(struct nouveau_context *nv, struct nv04_resource *buf,
806                           struct nouveau_bo *bo, unsigned offset, unsigned size)
807 {
808    if (!nouveau_buffer_malloc(buf))
809       return false;
810    if (nouveau_bo_map(bo, NOUVEAU_BO_RD, nv->client))
811       return false;
812    memcpy(buf->data, (uint8_t *)bo->map + offset, size);
813    return true;
814 }
815 
816 /* Migrate a linear buffer (vertex, index, constants) USER -> GART -> VRAM. */
817 bool
nouveau_buffer_migrate(struct nouveau_context * nv,struct nv04_resource * buf,const unsigned new_domain)818 nouveau_buffer_migrate(struct nouveau_context *nv,
819                        struct nv04_resource *buf, const unsigned new_domain)
820 {
821    assert(!(buf->status & NOUVEAU_BUFFER_STATUS_USER_PTR));
822 
823    struct nouveau_screen *screen = nv->screen;
824    struct nouveau_bo *bo;
825    const unsigned old_domain = buf->domain;
826    unsigned size = buf->base.width0;
827    unsigned offset;
828    int ret;
829 
830    assert(new_domain != old_domain);
831 
832    if (new_domain == NOUVEAU_BO_GART && old_domain == 0) {
833       if (!nouveau_buffer_allocate(screen, buf, new_domain))
834          return false;
835       ret = nouveau_bo_map(buf->bo, 0, nv->client);
836       if (ret)
837          return ret;
838       memcpy((uint8_t *)buf->bo->map + buf->offset, buf->data, size);
839       align_free(buf->data);
840    } else
841    if (old_domain != 0 && new_domain != 0) {
842       struct nouveau_mm_allocation *mm = buf->mm;
843 
844       if (new_domain == NOUVEAU_BO_VRAM) {
845          /* keep a system memory copy of our data in case we hit a fallback */
846          if (!nouveau_buffer_data_fetch(nv, buf, buf->bo, buf->offset, size))
847             return false;
848          if (nouveau_mesa_debug)
849             debug_printf("migrating %u KiB to VRAM\n", size / 1024);
850       }
851 
852       offset = buf->offset;
853       bo = buf->bo;
854       buf->bo = NULL;
855       buf->mm = NULL;
856       nouveau_buffer_allocate(screen, buf, new_domain);
857 
858       nv->copy_data(nv, buf->bo, buf->offset, new_domain,
859                     bo, offset, old_domain, buf->base.width0);
860 
861       nouveau_fence_work(screen->fence.current, nouveau_fence_unref_bo, bo);
862       if (mm)
863          release_allocation(&mm, screen->fence.current);
864    } else
865    if (new_domain == NOUVEAU_BO_VRAM && old_domain == 0) {
866       struct nouveau_transfer tx;
867       if (!nouveau_buffer_allocate(screen, buf, NOUVEAU_BO_VRAM))
868          return false;
869       tx.base.resource = &buf->base;
870       tx.base.box.x = 0;
871       tx.base.box.width = buf->base.width0;
872       tx.bo = NULL;
873       tx.map = NULL;
874       if (!nouveau_transfer_staging(nv, &tx, false))
875          return false;
876       nouveau_transfer_write(nv, &tx, 0, tx.base.box.width);
877       nouveau_buffer_transfer_del(nv, &tx);
878    } else
879       return false;
880 
881    assert(buf->domain == new_domain);
882    return true;
883 }
884 
885 /* Migrate data from glVertexAttribPointer(non-VBO) user buffers to GART.
886  * We'd like to only allocate @size bytes here, but then we'd have to rebase
887  * the vertex indices ...
888  */
889 bool
nouveau_user_buffer_upload(struct nouveau_context * nv,struct nv04_resource * buf,unsigned base,unsigned size)890 nouveau_user_buffer_upload(struct nouveau_context *nv,
891                            struct nv04_resource *buf,
892                            unsigned base, unsigned size)
893 {
894    assert(!(buf->status & NOUVEAU_BUFFER_STATUS_USER_PTR));
895 
896    struct nouveau_screen *screen = nouveau_screen(buf->base.screen);
897    int ret;
898 
899    assert(buf->status & NOUVEAU_BUFFER_STATUS_USER_MEMORY);
900 
901    buf->base.width0 = base + size;
902    if (!nouveau_buffer_reallocate(screen, buf, NOUVEAU_BO_GART))
903       return false;
904 
905    ret = nouveau_bo_map(buf->bo, 0, nv->client);
906    if (ret)
907       return false;
908    memcpy((uint8_t *)buf->bo->map + buf->offset + base, buf->data + base, size);
909 
910    return true;
911 }
912 
913 /* Invalidate underlying buffer storage, reset fences, reallocate to non-busy
914  * buffer.
915  */
916 void
nouveau_buffer_invalidate(struct pipe_context * pipe,struct pipe_resource * resource)917 nouveau_buffer_invalidate(struct pipe_context *pipe,
918                           struct pipe_resource *resource)
919 {
920    struct nouveau_context *nv = nouveau_context(pipe);
921    struct nv04_resource *buf = nv04_resource(resource);
922    int ref = buf->base.reference.count - 1;
923 
924    assert(!(buf->status & NOUVEAU_BUFFER_STATUS_USER_PTR));
925 
926    /* Shared buffers shouldn't get reallocated */
927    if (unlikely(buf->base.bind & PIPE_BIND_SHARED))
928       return;
929 
930    /* If the buffer is sub-allocated and not currently being written, just
931     * wipe the valid buffer range. Otherwise we have to create fresh
932     * storage. (We don't keep track of fences for non-sub-allocated BO's.)
933     */
934    if (buf->mm && !nouveau_buffer_busy(buf, PIPE_MAP_WRITE)) {
935       util_range_set_empty(&buf->valid_buffer_range);
936    } else {
937       nouveau_buffer_reallocate(nv->screen, buf, buf->domain);
938       if (ref > 0) /* any references inside context possible ? */
939          nv->invalidate_resource_storage(nv, &buf->base, ref);
940    }
941 }
942 
943 
944 /* Scratch data allocation. */
945 
946 static inline int
nouveau_scratch_bo_alloc(struct nouveau_context * nv,struct nouveau_bo ** pbo,unsigned size)947 nouveau_scratch_bo_alloc(struct nouveau_context *nv, struct nouveau_bo **pbo,
948                          unsigned size)
949 {
950    return nouveau_bo_new(nv->screen->device, NOUVEAU_BO_GART | NOUVEAU_BO_MAP,
951                          4096, size, NULL, pbo);
952 }
953 
954 static void
nouveau_scratch_unref_bos(void * d)955 nouveau_scratch_unref_bos(void *d)
956 {
957    struct runout *b = d;
958    int i;
959 
960    for (i = 0; i < b->nr; ++i)
961       nouveau_bo_ref(NULL, &b->bo[i]);
962 
963    FREE(b);
964 }
965 
966 void
nouveau_scratch_runout_release(struct nouveau_context * nv)967 nouveau_scratch_runout_release(struct nouveau_context *nv)
968 {
969    if (!nv->scratch.runout)
970       return;
971 
972    if (!nouveau_fence_work(nv->screen->fence.current, nouveau_scratch_unref_bos,
973          nv->scratch.runout))
974       return;
975 
976    nv->scratch.end = 0;
977    nv->scratch.runout = NULL;
978 }
979 
980 /* Allocate an extra bo if we can't fit everything we need simultaneously.
981  * (Could happen for very large user arrays.)
982  */
983 static inline bool
nouveau_scratch_runout(struct nouveau_context * nv,unsigned size)984 nouveau_scratch_runout(struct nouveau_context *nv, unsigned size)
985 {
986    int ret;
987    unsigned n;
988 
989    if (nv->scratch.runout)
990       n = nv->scratch.runout->nr;
991    else
992       n = 0;
993    nv->scratch.runout = REALLOC(nv->scratch.runout, n == 0 ? 0 :
994                                 (sizeof(*nv->scratch.runout) + (n + 0) * sizeof(void *)),
995                                  sizeof(*nv->scratch.runout) + (n + 1) * sizeof(void *));
996    nv->scratch.runout->nr = n + 1;
997    nv->scratch.runout->bo[n] = NULL;
998 
999    ret = nouveau_scratch_bo_alloc(nv, &nv->scratch.runout->bo[n], size);
1000    if (!ret) {
1001       ret = nouveau_bo_map(nv->scratch.runout->bo[n], 0, NULL);
1002       if (ret)
1003          nouveau_bo_ref(NULL, &nv->scratch.runout->bo[--nv->scratch.runout->nr]);
1004    }
1005    if (!ret) {
1006       nv->scratch.current = nv->scratch.runout->bo[n];
1007       nv->scratch.offset = 0;
1008       nv->scratch.end = size;
1009       nv->scratch.map = nv->scratch.current->map;
1010    }
1011    return !ret;
1012 }
1013 
1014 /* Continue to next scratch buffer, if available (no wrapping, large enough).
1015  * Allocate it if it has not yet been created.
1016  */
1017 static inline bool
nouveau_scratch_next(struct nouveau_context * nv,unsigned size)1018 nouveau_scratch_next(struct nouveau_context *nv, unsigned size)
1019 {
1020    struct nouveau_bo *bo;
1021    int ret;
1022    const unsigned i = (nv->scratch.id + 1) % NOUVEAU_MAX_SCRATCH_BUFS;
1023 
1024    if ((size > nv->scratch.bo_size) || (i == nv->scratch.wrap))
1025       return false;
1026    nv->scratch.id = i;
1027 
1028    bo = nv->scratch.bo[i];
1029    if (!bo) {
1030       ret = nouveau_scratch_bo_alloc(nv, &bo, nv->scratch.bo_size);
1031       if (ret)
1032          return false;
1033       nv->scratch.bo[i] = bo;
1034    }
1035    nv->scratch.current = bo;
1036    nv->scratch.offset = 0;
1037    nv->scratch.end = nv->scratch.bo_size;
1038 
1039    ret = nouveau_bo_map(bo, NOUVEAU_BO_WR, nv->client);
1040    if (!ret)
1041       nv->scratch.map = bo->map;
1042    return !ret;
1043 }
1044 
1045 static bool
nouveau_scratch_more(struct nouveau_context * nv,unsigned min_size)1046 nouveau_scratch_more(struct nouveau_context *nv, unsigned min_size)
1047 {
1048    bool ret;
1049 
1050    ret = nouveau_scratch_next(nv, min_size);
1051    if (!ret)
1052       ret = nouveau_scratch_runout(nv, min_size);
1053    return ret;
1054 }
1055 
1056 
1057 /* Copy data to a scratch buffer and return address & bo the data resides in. */
1058 uint64_t
nouveau_scratch_data(struct nouveau_context * nv,const void * data,unsigned base,unsigned size,struct nouveau_bo ** bo)1059 nouveau_scratch_data(struct nouveau_context *nv,
1060                      const void *data, unsigned base, unsigned size,
1061                      struct nouveau_bo **bo)
1062 {
1063    unsigned bgn = MAX2(base, nv->scratch.offset);
1064    unsigned end = bgn + size;
1065 
1066    if (end >= nv->scratch.end) {
1067       end = base + size;
1068       if (!nouveau_scratch_more(nv, end))
1069          return 0;
1070       bgn = base;
1071    }
1072    nv->scratch.offset = align(end, 4);
1073 
1074    memcpy(nv->scratch.map + bgn, (const uint8_t *)data + base, size);
1075 
1076    *bo = nv->scratch.current;
1077    return (*bo)->offset + (bgn - base);
1078 }
1079 
1080 void *
nouveau_scratch_get(struct nouveau_context * nv,unsigned size,uint64_t * gpu_addr,struct nouveau_bo ** pbo)1081 nouveau_scratch_get(struct nouveau_context *nv,
1082                     unsigned size, uint64_t *gpu_addr, struct nouveau_bo **pbo)
1083 {
1084    unsigned bgn = nv->scratch.offset;
1085    unsigned end = nv->scratch.offset + size;
1086 
1087    if (end >= nv->scratch.end) {
1088       end = size;
1089       if (!nouveau_scratch_more(nv, end))
1090          return NULL;
1091       bgn = 0;
1092    }
1093    nv->scratch.offset = align(end, 4);
1094 
1095    *pbo = nv->scratch.current;
1096    *gpu_addr = nv->scratch.current->offset + bgn;
1097    return nv->scratch.map + bgn;
1098 }
1099