• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2013 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * on the rights to use, copy, modify, merge, publish, distribute, sub
9  * license, and/or sell copies of the Software, and to permit persons to whom
10  * the Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22  * USE OR OTHER DEALINGS IN THE SOFTWARE.
23  */
24 
25 #include "si_pipe.h"
26 #include "util/u_memory.h"
27 #include "util/u_transfer.h"
28 #include "util/u_upload_mgr.h"
29 
30 #include <inttypes.h>
31 #include <stdio.h>
32 
si_cs_is_buffer_referenced(struct si_context * sctx,struct pb_buffer * buf,unsigned usage)33 bool si_cs_is_buffer_referenced(struct si_context *sctx, struct pb_buffer *buf,
34                                 unsigned usage)
35 {
36    return sctx->ws->cs_is_buffer_referenced(&sctx->gfx_cs, buf, usage);
37 }
38 
si_buffer_map(struct si_context * sctx,struct si_resource * resource,unsigned usage)39 void *si_buffer_map(struct si_context *sctx, struct si_resource *resource,
40                     unsigned usage)
41 {
42    return sctx->ws->buffer_map(sctx->ws, resource->buf, &sctx->gfx_cs, usage);
43 }
44 
si_init_resource_fields(struct si_screen * sscreen,struct si_resource * res,uint64_t size,unsigned alignment)45 void si_init_resource_fields(struct si_screen *sscreen, struct si_resource *res, uint64_t size,
46                              unsigned alignment)
47 {
48    struct si_texture *tex = (struct si_texture *)res;
49 
50    res->bo_size = size;
51    res->bo_alignment_log2 = util_logbase2(alignment);
52    res->flags = 0;
53    res->texture_handle_allocated = false;
54    res->image_handle_allocated = false;
55 
56    switch (res->b.b.usage) {
57    case PIPE_USAGE_STREAM:
58       res->flags |= RADEON_FLAG_GTT_WC;
59       if (sscreen->info.smart_access_memory)
60          res->domains = RADEON_DOMAIN_VRAM;
61       else
62          res->domains = RADEON_DOMAIN_GTT;
63       break;
64    case PIPE_USAGE_STAGING:
65       /* Transfers are likely to occur more often with these
66        * resources. */
67       res->domains = RADEON_DOMAIN_GTT;
68       break;
69    case PIPE_USAGE_DYNAMIC:
70    case PIPE_USAGE_DEFAULT:
71    case PIPE_USAGE_IMMUTABLE:
72    default:
73       /* Not listing GTT here improves performance in some
74        * apps. */
75       res->domains = RADEON_DOMAIN_VRAM;
76       res->flags |= RADEON_FLAG_GTT_WC;
77       break;
78    }
79 
80    if (res->b.b.target == PIPE_BUFFER && res->b.b.flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT) {
81       /* Use GTT for all persistent mappings with older
82        * kernels, because they didn't always flush the HDP
83        * cache before CS execution.
84        *
85        * Write-combined CPU mappings are fine, the kernel
86        * ensures all CPU writes finish before the GPU
87        * executes a command stream.
88        *
89        * radeon doesn't have good BO move throttling, so put all
90        * persistent buffers into GTT to prevent VRAM CPU page faults.
91        */
92       if (!sscreen->info.is_amdgpu)
93          res->domains = RADEON_DOMAIN_GTT;
94    }
95 
96    /* Tiled textures are unmappable. Always put them in VRAM. */
97    if ((res->b.b.target != PIPE_BUFFER && !tex->surface.is_linear) ||
98        res->b.b.flags & PIPE_RESOURCE_FLAG_UNMAPPABLE) {
99       res->domains = RADEON_DOMAIN_VRAM;
100       res->flags |= RADEON_FLAG_NO_CPU_ACCESS | RADEON_FLAG_GTT_WC;
101    }
102 
103    /* Displayable and shareable surfaces are not suballocated. */
104    if (res->b.b.bind & (PIPE_BIND_SHARED | PIPE_BIND_SCANOUT))
105       res->flags |= RADEON_FLAG_NO_SUBALLOC; /* shareable */
106    else
107       res->flags |= RADEON_FLAG_NO_INTERPROCESS_SHARING;
108 
109    if (res->b.b.bind & PIPE_BIND_PROTECTED ||
110        /* Force scanout/depth/stencil buffer allocation to be encrypted */
111        (sscreen->debug_flags & DBG(TMZ) &&
112         res->b.b.bind & (PIPE_BIND_SCANOUT | PIPE_BIND_DEPTH_STENCIL)))
113       res->flags |= RADEON_FLAG_ENCRYPTED;
114 
115    if (res->b.b.flags & PIPE_RESOURCE_FLAG_ENCRYPTED)
116       res->flags |= RADEON_FLAG_ENCRYPTED;
117 
118    if (sscreen->debug_flags & DBG(NO_WC))
119       res->flags &= ~RADEON_FLAG_GTT_WC;
120 
121    if (res->b.b.flags & SI_RESOURCE_FLAG_READ_ONLY)
122       res->flags |= RADEON_FLAG_READ_ONLY;
123 
124    if (res->b.b.flags & SI_RESOURCE_FLAG_32BIT)
125       res->flags |= RADEON_FLAG_32BIT;
126 
127    if (res->b.b.flags & SI_RESOURCE_FLAG_DRIVER_INTERNAL)
128       res->flags |= RADEON_FLAG_DRIVER_INTERNAL;
129 
130    if (res->b.b.flags & PIPE_RESOURCE_FLAG_SPARSE)
131       res->flags |= RADEON_FLAG_SPARSE;
132 
133    /* For higher throughput and lower latency over PCIe assuming sequential access.
134     * Only CP DMA and optimized compute benefit from this.
135     * GFX8 and older don't support RADEON_FLAG_GL2_BYPASS.
136     */
137    if (sscreen->info.gfx_level >= GFX9 &&
138        res->b.b.flags & SI_RESOURCE_FLAG_GL2_BYPASS)
139       res->flags |= RADEON_FLAG_GL2_BYPASS;
140 
141    if (res->b.b.flags & SI_RESOURCE_FLAG_DISCARDABLE &&
142        sscreen->info.drm_major == 3 && sscreen->info.drm_minor >= 47) {
143       /* Assume VRAM, so that we can use BIG_PAGE. */
144       assert(res->domains == RADEON_DOMAIN_VRAM);
145       res->flags |= RADEON_FLAG_DISCARDABLE;
146    }
147 
148    if (res->domains == RADEON_DOMAIN_VRAM &&
149        sscreen->options.mall_noalloc)
150       res->flags |= RADEON_FLAG_MALL_NOALLOC;
151 
152    /* Set expected VRAM and GART usage for the buffer. */
153    res->memory_usage_kb = MAX2(1, size / 1024);
154 
155    if (res->domains & RADEON_DOMAIN_VRAM) {
156       /* We don't want to evict buffers from VRAM by mapping them for CPU access,
157        * because they might never be moved back again. If a buffer is large enough,
158        * upload data by copying from a temporary GTT buffer.
159        */
160       if (!sscreen->info.smart_access_memory &&
161           sscreen->info.has_dedicated_vram &&
162           !res->b.cpu_storage && /* TODO: The CPU storage breaks this. */
163           size >= sscreen->options.max_vram_map_size)
164          res->b.b.flags |= PIPE_RESOURCE_FLAG_DONT_MAP_DIRECTLY;
165    }
166 }
167 
si_alloc_resource(struct si_screen * sscreen,struct si_resource * res)168 bool si_alloc_resource(struct si_screen *sscreen, struct si_resource *res)
169 {
170    struct pb_buffer *old_buf, *new_buf;
171 
172    /* Allocate a new resource. */
173    new_buf = sscreen->ws->buffer_create(sscreen->ws, res->bo_size, 1 << res->bo_alignment_log2,
174                                         res->domains, res->flags);
175    if (!new_buf) {
176       return false;
177    }
178 
179    /* Replace the pointer such that if res->buf wasn't NULL, it won't be
180     * NULL. This should prevent crashes with multiple contexts using
181     * the same buffer where one of the contexts invalidates it while
182     * the others are using it. */
183    old_buf = res->buf;
184    res->buf = new_buf; /* should be atomic */
185    res->gpu_address = sscreen->ws->buffer_get_virtual_address(res->buf);
186 
187    if (res->flags & RADEON_FLAG_32BIT) {
188       uint64_t start = res->gpu_address;
189       uint64_t last = start + res->bo_size - 1;
190       (void)start;
191       (void)last;
192 
193       assert((start >> 32) == sscreen->info.address32_hi);
194       assert((last >> 32) == sscreen->info.address32_hi);
195    }
196 
197    radeon_bo_reference(sscreen->ws, &old_buf, NULL);
198 
199    util_range_set_empty(&res->valid_buffer_range);
200    res->TC_L2_dirty = false;
201 
202    /* Print debug information. */
203    if (sscreen->debug_flags & DBG(VM) && res->b.b.target == PIPE_BUFFER) {
204       fprintf(stderr, "VM start=0x%" PRIX64 "  end=0x%" PRIX64 " | Buffer %" PRIu64 " bytes\n",
205               res->gpu_address, res->gpu_address + res->buf->size, res->buf->size);
206    }
207 
208    if (res->b.b.flags & SI_RESOURCE_FLAG_CLEAR)
209       si_screen_clear_buffer(sscreen, &res->b.b, 0, res->bo_size, 0, SI_OP_SYNC_AFTER);
210 
211    return true;
212 }
213 
si_resource_destroy(struct pipe_screen * screen,struct pipe_resource * buf)214 static void si_resource_destroy(struct pipe_screen *screen, struct pipe_resource *buf)
215 {
216    if (buf->target == PIPE_BUFFER) {
217       struct si_screen *sscreen = (struct si_screen *)screen;
218       struct si_resource *buffer = si_resource(buf);
219 
220       threaded_resource_deinit(buf);
221       util_range_destroy(&buffer->valid_buffer_range);
222       radeon_bo_reference(((struct si_screen*)screen)->ws, &buffer->buf, NULL);
223       util_idalloc_mt_free(&sscreen->buffer_ids, buffer->b.buffer_id_unique);
224       FREE_CL(buffer);
225    } else if (buf->flags & SI_RESOURCE_AUX_PLANE) {
226       struct si_auxiliary_texture *tex = (struct si_auxiliary_texture *)buf;
227 
228       radeon_bo_reference(((struct si_screen*)screen)->ws, &tex->buffer, NULL);
229       FREE_CL(tex);
230    } else {
231       struct si_texture *tex = (struct si_texture *)buf;
232       struct si_resource *resource = &tex->buffer;
233 
234       si_texture_reference(&tex->flushed_depth_texture, NULL);
235 
236       if (tex->cmask_buffer != &tex->buffer) {
237          si_resource_reference(&tex->cmask_buffer, NULL);
238       }
239       radeon_bo_reference(((struct si_screen*)screen)->ws, &resource->buf, NULL);
240       FREE_CL(tex);
241    }
242 }
243 
244 /* Reallocate the buffer a update all resource bindings where the buffer is
245  * bound.
246  *
247  * This is used to avoid CPU-GPU synchronizations, because it makes the buffer
248  * idle by discarding its contents.
249  */
si_invalidate_buffer(struct si_context * sctx,struct si_resource * buf)250 static bool si_invalidate_buffer(struct si_context *sctx, struct si_resource *buf)
251 {
252    /* Shared buffers can't be reallocated. */
253    if (buf->b.is_shared)
254       return false;
255 
256    /* Sparse buffers can't be reallocated. */
257    if (buf->flags & RADEON_FLAG_SPARSE)
258       return false;
259 
260    /* In AMD_pinned_memory, the user pointer association only gets
261     * broken when the buffer is explicitly re-allocated.
262     */
263    if (buf->b.is_user_ptr)
264       return false;
265 
266    /* Check if mapping this buffer would cause waiting for the GPU. */
267    if (si_cs_is_buffer_referenced(sctx, buf->buf, RADEON_USAGE_READWRITE) ||
268        !sctx->ws->buffer_wait(sctx->ws, buf->buf, 0, RADEON_USAGE_READWRITE)) {
269       /* Reallocate the buffer in the same pipe_resource. */
270       si_alloc_resource(sctx->screen, buf);
271       si_rebind_buffer(sctx, &buf->b.b);
272    } else {
273       util_range_set_empty(&buf->valid_buffer_range);
274    }
275 
276    return true;
277 }
278 
279 /* Replace the storage of dst with src. */
si_replace_buffer_storage(struct pipe_context * ctx,struct pipe_resource * dst,struct pipe_resource * src,unsigned num_rebinds,uint32_t rebind_mask,uint32_t delete_buffer_id)280 void si_replace_buffer_storage(struct pipe_context *ctx, struct pipe_resource *dst,
281                                struct pipe_resource *src, unsigned num_rebinds, uint32_t rebind_mask,
282                                uint32_t delete_buffer_id)
283 {
284    struct si_context *sctx = (struct si_context *)ctx;
285    struct si_resource *sdst = si_resource(dst);
286    struct si_resource *ssrc = si_resource(src);
287 
288    radeon_bo_reference(sctx->screen->ws, &sdst->buf, ssrc->buf);
289    sdst->gpu_address = ssrc->gpu_address;
290    sdst->b.b.bind = ssrc->b.b.bind;
291    sdst->flags = ssrc->flags;
292 
293    assert(sdst->memory_usage_kb == ssrc->memory_usage_kb);
294    assert(sdst->bo_size == ssrc->bo_size);
295    assert(sdst->bo_alignment_log2 == ssrc->bo_alignment_log2);
296    assert(sdst->domains == ssrc->domains);
297 
298    si_rebind_buffer(sctx, dst);
299 
300    util_idalloc_mt_free(&sctx->screen->buffer_ids, delete_buffer_id);
301 }
302 
si_invalidate_resource(struct pipe_context * ctx,struct pipe_resource * resource)303 static void si_invalidate_resource(struct pipe_context *ctx, struct pipe_resource *resource)
304 {
305    struct si_context *sctx = (struct si_context *)ctx;
306    struct si_resource *buf = si_resource(resource);
307 
308    /* We currently only do anyting here for buffers */
309    if (resource->target == PIPE_BUFFER)
310       (void)si_invalidate_buffer(sctx, buf);
311 }
312 
si_buffer_get_transfer(struct pipe_context * ctx,struct pipe_resource * resource,unsigned usage,const struct pipe_box * box,struct pipe_transfer ** ptransfer,void * data,struct si_resource * staging,unsigned offset)313 static void *si_buffer_get_transfer(struct pipe_context *ctx, struct pipe_resource *resource,
314                                     unsigned usage, const struct pipe_box *box,
315                                     struct pipe_transfer **ptransfer, void *data,
316                                     struct si_resource *staging, unsigned offset)
317 {
318    struct si_context *sctx = (struct si_context *)ctx;
319    struct si_transfer *transfer;
320 
321    if (usage & PIPE_MAP_THREAD_SAFE)
322       transfer = calloc(1, sizeof(*transfer));
323    else if (usage & TC_TRANSFER_MAP_THREADED_UNSYNC)
324       transfer = slab_zalloc(&sctx->pool_transfers_unsync);
325    else
326       transfer = slab_zalloc(&sctx->pool_transfers);
327 
328    pipe_resource_reference(&transfer->b.b.resource, resource);
329    transfer->b.b.usage = usage;
330    transfer->b.b.box = *box;
331    transfer->b.b.offset = offset;
332    transfer->staging = staging;
333    *ptransfer = &transfer->b.b;
334    return data;
335 }
336 
si_buffer_transfer_map(struct pipe_context * ctx,struct pipe_resource * resource,unsigned level,unsigned usage,const struct pipe_box * box,struct pipe_transfer ** ptransfer)337 static void *si_buffer_transfer_map(struct pipe_context *ctx, struct pipe_resource *resource,
338                                     unsigned level, unsigned usage, const struct pipe_box *box,
339                                     struct pipe_transfer **ptransfer)
340 {
341    struct si_context *sctx = (struct si_context *)ctx;
342    struct si_resource *buf = si_resource(resource);
343    uint8_t *data;
344 
345    assert(resource->target == PIPE_BUFFER);
346    assert(box->x + box->width <= resource->width0);
347 
348    /* From GL_AMD_pinned_memory issues:
349     *
350     *     4) Is glMapBuffer on a shared buffer guaranteed to return the
351     *        same system address which was specified at creation time?
352     *
353     *        RESOLVED: NO. The GL implementation might return a different
354     *        virtual mapping of that memory, although the same physical
355     *        page will be used.
356     *
357     * So don't ever use staging buffers.
358     */
359    if (buf->b.is_user_ptr)
360       usage |= PIPE_MAP_PERSISTENT;
361    if (usage & PIPE_MAP_ONCE)
362       usage |= RADEON_MAP_TEMPORARY;
363 
364    /* See if the buffer range being mapped has never been initialized,
365     * in which case it can be mapped unsynchronized. */
366    if (!(usage & (PIPE_MAP_UNSYNCHRONIZED | TC_TRANSFER_MAP_NO_INFER_UNSYNCHRONIZED)) &&
367        usage & PIPE_MAP_WRITE && !buf->b.is_shared &&
368        !util_ranges_intersect(&buf->valid_buffer_range, box->x, box->x + box->width)) {
369       usage |= PIPE_MAP_UNSYNCHRONIZED;
370    }
371 
372    /* If discarding the entire range, discard the whole resource instead. */
373    if (usage & PIPE_MAP_DISCARD_RANGE && box->x == 0 && box->width == resource->width0) {
374       usage |= PIPE_MAP_DISCARD_WHOLE_RESOURCE;
375    }
376 
377    /* If a buffer in VRAM is too large and the range is discarded, don't
378     * map it directly. This makes sure that the buffer stays in VRAM.
379     */
380    bool force_discard_range = false;
381    if (usage & (PIPE_MAP_DISCARD_WHOLE_RESOURCE | PIPE_MAP_DISCARD_RANGE) &&
382        !(usage & PIPE_MAP_PERSISTENT) &&
383        buf->b.b.flags & PIPE_RESOURCE_FLAG_DONT_MAP_DIRECTLY) {
384       usage &= ~(PIPE_MAP_DISCARD_WHOLE_RESOURCE | PIPE_MAP_UNSYNCHRONIZED);
385       usage |= PIPE_MAP_DISCARD_RANGE;
386       force_discard_range = true;
387    }
388 
389    if (usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE &&
390        !(usage & (PIPE_MAP_UNSYNCHRONIZED | TC_TRANSFER_MAP_NO_INVALIDATE))) {
391       assert(usage & PIPE_MAP_WRITE);
392 
393       if (si_invalidate_buffer(sctx, buf)) {
394          /* At this point, the buffer is always idle. */
395          usage |= PIPE_MAP_UNSYNCHRONIZED;
396       } else {
397          /* Fall back to a temporary buffer. */
398          usage |= PIPE_MAP_DISCARD_RANGE;
399       }
400    }
401 
402    if (usage & PIPE_MAP_DISCARD_RANGE &&
403        ((!(usage & (PIPE_MAP_UNSYNCHRONIZED | PIPE_MAP_PERSISTENT))) ||
404         (buf->flags & RADEON_FLAG_SPARSE))) {
405       assert(usage & PIPE_MAP_WRITE);
406 
407       /* Check if mapping this buffer would cause waiting for the GPU.
408        */
409       if (buf->flags & (RADEON_FLAG_SPARSE | RADEON_FLAG_NO_CPU_ACCESS) ||
410           force_discard_range ||
411           si_cs_is_buffer_referenced(sctx, buf->buf, RADEON_USAGE_READWRITE) ||
412           !sctx->ws->buffer_wait(sctx->ws, buf->buf, 0, RADEON_USAGE_READWRITE)) {
413          /* Do a wait-free write-only transfer using a temporary buffer. */
414          struct u_upload_mgr *uploader;
415          struct si_resource *staging = NULL;
416          unsigned offset;
417 
418          /* If we are not called from the driver thread, we have
419           * to use the uploader from u_threaded_context, which is
420           * local to the calling thread.
421           */
422          if (usage & TC_TRANSFER_MAP_THREADED_UNSYNC)
423             uploader = sctx->tc->base.stream_uploader;
424          else
425             uploader = sctx->b.stream_uploader;
426 
427          u_upload_alloc(uploader, 0, box->width + (box->x % SI_MAP_BUFFER_ALIGNMENT),
428                         sctx->screen->info.tcc_cache_line_size, &offset,
429                         (struct pipe_resource **)&staging, (void **)&data);
430 
431          if (staging) {
432             data += box->x % SI_MAP_BUFFER_ALIGNMENT;
433             return si_buffer_get_transfer(ctx, resource, usage, box, ptransfer, data, staging,
434                                           offset);
435          } else if (buf->flags & RADEON_FLAG_SPARSE) {
436             return NULL;
437          }
438       } else {
439          /* At this point, the buffer is always idle (we checked it above). */
440          usage |= PIPE_MAP_UNSYNCHRONIZED;
441       }
442    }
443    /* Use a staging buffer in cached GTT for reads. */
444    else if (((usage & PIPE_MAP_READ) && !(usage & PIPE_MAP_PERSISTENT) &&
445              (buf->domains & RADEON_DOMAIN_VRAM || buf->flags & RADEON_FLAG_GTT_WC)) ||
446             (buf->flags & (RADEON_FLAG_SPARSE | RADEON_FLAG_NO_CPU_ACCESS))) {
447       struct si_resource *staging;
448 
449       assert(!(usage & (TC_TRANSFER_MAP_THREADED_UNSYNC | PIPE_MAP_THREAD_SAFE)));
450       staging = si_aligned_buffer_create(ctx->screen,
451                                          SI_RESOURCE_FLAG_GL2_BYPASS | SI_RESOURCE_FLAG_DRIVER_INTERNAL,
452                                          PIPE_USAGE_STAGING,
453                                          box->width + (box->x % SI_MAP_BUFFER_ALIGNMENT), 256);
454       if (staging) {
455          /* Copy the VRAM buffer to the staging buffer. */
456          si_copy_buffer(sctx, &staging->b.b, resource, box->x % SI_MAP_BUFFER_ALIGNMENT,
457                         box->x, box->width, SI_OP_SYNC_BEFORE_AFTER);
458 
459          data = si_buffer_map(sctx, staging, usage & ~PIPE_MAP_UNSYNCHRONIZED);
460          if (!data) {
461             si_resource_reference(&staging, NULL);
462             return NULL;
463          }
464          data += box->x % SI_MAP_BUFFER_ALIGNMENT;
465 
466          return si_buffer_get_transfer(ctx, resource, usage, box, ptransfer, data, staging, 0);
467       } else if (buf->flags & RADEON_FLAG_SPARSE) {
468          return NULL;
469       }
470    }
471 
472    data = si_buffer_map(sctx, buf, usage);
473    if (!data) {
474       return NULL;
475    }
476    data += box->x;
477 
478    return si_buffer_get_transfer(ctx, resource, usage, box, ptransfer, data, NULL, 0);
479 }
480 
si_buffer_do_flush_region(struct pipe_context * ctx,struct pipe_transfer * transfer,const struct pipe_box * box)481 static void si_buffer_do_flush_region(struct pipe_context *ctx, struct pipe_transfer *transfer,
482                                       const struct pipe_box *box)
483 {
484    struct si_context *sctx = (struct si_context *)ctx;
485    struct si_transfer *stransfer = (struct si_transfer *)transfer;
486    struct si_resource *buf = si_resource(transfer->resource);
487 
488    if (stransfer->staging) {
489       unsigned src_offset =
490          stransfer->b.b.offset + transfer->box.x % SI_MAP_BUFFER_ALIGNMENT + (box->x - transfer->box.x);
491 
492       /* Copy the staging buffer into the original one. */
493       si_copy_buffer(sctx, transfer->resource, &stransfer->staging->b.b, box->x, src_offset,
494                      box->width, SI_OP_SYNC_BEFORE_AFTER);
495    }
496 
497    util_range_add(&buf->b.b, &buf->valid_buffer_range, box->x, box->x + box->width);
498 }
499 
si_buffer_flush_region(struct pipe_context * ctx,struct pipe_transfer * transfer,const struct pipe_box * rel_box)500 static void si_buffer_flush_region(struct pipe_context *ctx, struct pipe_transfer *transfer,
501                                    const struct pipe_box *rel_box)
502 {
503    unsigned required_usage = PIPE_MAP_WRITE | PIPE_MAP_FLUSH_EXPLICIT;
504 
505    if ((transfer->usage & required_usage) == required_usage) {
506       struct pipe_box box;
507 
508       u_box_1d(transfer->box.x + rel_box->x, rel_box->width, &box);
509       si_buffer_do_flush_region(ctx, transfer, &box);
510    }
511 }
512 
si_buffer_transfer_unmap(struct pipe_context * ctx,struct pipe_transfer * transfer)513 static void si_buffer_transfer_unmap(struct pipe_context *ctx, struct pipe_transfer *transfer)
514 {
515    struct si_context *sctx = (struct si_context *)ctx;
516    struct si_transfer *stransfer = (struct si_transfer *)transfer;
517 
518    if (transfer->usage & PIPE_MAP_WRITE && !(transfer->usage & PIPE_MAP_FLUSH_EXPLICIT))
519       si_buffer_do_flush_region(ctx, transfer, &transfer->box);
520 
521    if (transfer->usage & (PIPE_MAP_ONCE | RADEON_MAP_TEMPORARY) &&
522        !stransfer->staging)
523       sctx->ws->buffer_unmap(sctx->ws, si_resource(stransfer->b.b.resource)->buf);
524 
525    si_resource_reference(&stransfer->staging, NULL);
526    assert(stransfer->b.staging == NULL); /* for threaded context only */
527    pipe_resource_reference(&transfer->resource, NULL);
528 
529    if (transfer->usage & PIPE_MAP_THREAD_SAFE) {
530       free(transfer);
531    } else {
532       /* Don't use pool_transfers_unsync. We are always in the driver
533        * thread. Freeing an object into a different pool is allowed.
534        */
535       slab_free(&sctx->pool_transfers, transfer);
536    }
537 }
538 
si_buffer_subdata(struct pipe_context * ctx,struct pipe_resource * buffer,unsigned usage,unsigned offset,unsigned size,const void * data)539 static void si_buffer_subdata(struct pipe_context *ctx, struct pipe_resource *buffer,
540                               unsigned usage, unsigned offset, unsigned size, const void *data)
541 {
542    struct pipe_transfer *transfer = NULL;
543    struct pipe_box box;
544    uint8_t *map = NULL;
545 
546    usage |= PIPE_MAP_WRITE;
547 
548    if (!(usage & PIPE_MAP_DIRECTLY))
549       usage |= PIPE_MAP_DISCARD_RANGE;
550 
551    u_box_1d(offset, size, &box);
552    map = si_buffer_transfer_map(ctx, buffer, 0, usage, &box, &transfer);
553    if (!map)
554       return;
555 
556    memcpy(map, data, size);
557    si_buffer_transfer_unmap(ctx, transfer);
558 }
559 
si_alloc_buffer_struct(struct pipe_screen * screen,const struct pipe_resource * templ,bool allow_cpu_storage)560 static struct si_resource *si_alloc_buffer_struct(struct pipe_screen *screen,
561                                                   const struct pipe_resource *templ,
562                                                   bool allow_cpu_storage)
563 {
564    struct si_resource *buf = MALLOC_STRUCT_CL(si_resource);
565 
566    buf->b.b = *templ;
567    buf->b.b.next = NULL;
568    pipe_reference_init(&buf->b.b.reference, 1);
569    buf->b.b.screen = screen;
570 
571    threaded_resource_init(&buf->b.b, allow_cpu_storage);
572 
573    buf->buf = NULL;
574    buf->bind_history = 0;
575    buf->TC_L2_dirty = false;
576    util_range_init(&buf->valid_buffer_range);
577    return buf;
578 }
579 
si_buffer_create(struct pipe_screen * screen,const struct pipe_resource * templ,unsigned alignment)580 static struct pipe_resource *si_buffer_create(struct pipe_screen *screen,
581                                               const struct pipe_resource *templ, unsigned alignment)
582 {
583    struct si_screen *sscreen = (struct si_screen *)screen;
584    struct si_resource *buf =
585       si_alloc_buffer_struct(screen, templ,
586                              templ->width0 <= sscreen->options.tc_max_cpu_storage_size);
587 
588    if (templ->flags & PIPE_RESOURCE_FLAG_SPARSE)
589       buf->b.b.flags |= PIPE_RESOURCE_FLAG_UNMAPPABLE;
590 
591    si_init_resource_fields(sscreen, buf, templ->width0, alignment);
592 
593    buf->b.buffer_id_unique = util_idalloc_mt_alloc(&sscreen->buffer_ids);
594 
595    if (!si_alloc_resource(sscreen, buf)) {
596       si_resource_destroy(screen, &buf->b.b);
597       return NULL;
598    }
599 
600    return &buf->b.b;
601 }
602 
pipe_aligned_buffer_create(struct pipe_screen * screen,unsigned flags,unsigned usage,unsigned size,unsigned alignment)603 struct pipe_resource *pipe_aligned_buffer_create(struct pipe_screen *screen, unsigned flags,
604                                                  unsigned usage, unsigned size, unsigned alignment)
605 {
606    struct pipe_resource buffer;
607 
608    memset(&buffer, 0, sizeof buffer);
609    buffer.target = PIPE_BUFFER;
610    buffer.format = PIPE_FORMAT_R8_UNORM;
611    buffer.bind = 0;
612    buffer.usage = usage;
613    buffer.flags = flags;
614    buffer.width0 = size;
615    buffer.height0 = 1;
616    buffer.depth0 = 1;
617    buffer.array_size = 1;
618    return si_buffer_create(screen, &buffer, alignment);
619 }
620 
si_aligned_buffer_create(struct pipe_screen * screen,unsigned flags,unsigned usage,unsigned size,unsigned alignment)621 struct si_resource *si_aligned_buffer_create(struct pipe_screen *screen, unsigned flags,
622                                              unsigned usage, unsigned size, unsigned alignment)
623 {
624    return si_resource(pipe_aligned_buffer_create(screen, flags, usage, size, alignment));
625 }
626 
si_buffer_from_user_memory(struct pipe_screen * screen,const struct pipe_resource * templ,void * user_memory)627 static struct pipe_resource *si_buffer_from_user_memory(struct pipe_screen *screen,
628                                                         const struct pipe_resource *templ,
629                                                         void *user_memory)
630 {
631    struct si_screen *sscreen = (struct si_screen *)screen;
632    struct radeon_winsys *ws = sscreen->ws;
633    struct si_resource *buf = si_alloc_buffer_struct(screen, templ, false);
634 
635    buf->domains = RADEON_DOMAIN_GTT;
636    buf->flags = 0;
637    buf->b.is_user_ptr = true;
638    util_range_add(&buf->b.b, &buf->valid_buffer_range, 0, templ->width0);
639    util_range_add(&buf->b.b, &buf->b.valid_buffer_range, 0, templ->width0);
640 
641    buf->b.buffer_id_unique = util_idalloc_mt_alloc(&sscreen->buffer_ids);
642 
643    /* Convert a user pointer to a buffer. */
644    buf->buf = ws->buffer_from_ptr(ws, user_memory, templ->width0, 0);
645    if (!buf->buf) {
646       si_resource_destroy(screen, &buf->b.b);
647       return NULL;
648    }
649 
650    buf->gpu_address = ws->buffer_get_virtual_address(buf->buf);
651    buf->memory_usage_kb = templ->width0 / 1024;
652    return &buf->b.b;
653 }
654 
si_buffer_from_winsys_buffer(struct pipe_screen * screen,const struct pipe_resource * templ,struct pb_buffer * imported_buf,uint64_t offset)655 struct pipe_resource *si_buffer_from_winsys_buffer(struct pipe_screen *screen,
656                                                    const struct pipe_resource *templ,
657                                                    struct pb_buffer *imported_buf,
658                                                    uint64_t offset)
659 {
660    if (offset + templ->width0 > imported_buf->size)
661       return NULL;
662 
663    struct si_screen *sscreen = (struct si_screen *)screen;
664    struct si_resource *res = si_alloc_buffer_struct(screen, templ, false);
665 
666    if (!res)
667       return NULL;
668 
669    enum radeon_bo_domain domains = sscreen->ws->buffer_get_initial_domain(imported_buf);
670 
671    /* Get or guess the BO flags. */
672    unsigned flags = RADEON_FLAG_NO_SUBALLOC;
673 
674    if (sscreen->ws->buffer_get_flags)
675       res->flags |= sscreen->ws->buffer_get_flags(imported_buf);
676    else
677       flags |= RADEON_FLAG_GTT_WC; /* unknown flags, guess them */
678 
679    /* Deduce the usage. */
680    switch (domains) {
681    case RADEON_DOMAIN_VRAM:
682    case RADEON_DOMAIN_VRAM_GTT:
683       res->b.b.usage = PIPE_USAGE_DEFAULT;
684       break;
685 
686    default:
687       /* Other values are interpreted as GTT. */
688       domains = RADEON_DOMAIN_GTT;
689 
690       if (flags & RADEON_FLAG_GTT_WC)
691          res->b.b.usage = PIPE_USAGE_STREAM;
692       else
693          res->b.b.usage = PIPE_USAGE_STAGING;
694    }
695 
696    si_init_resource_fields(sscreen, res, imported_buf->size,
697                            1 << imported_buf->alignment_log2);
698 
699    res->b.is_shared = true;
700    res->b.buffer_id_unique = util_idalloc_mt_alloc(&sscreen->buffer_ids);
701    res->buf = imported_buf;
702    res->gpu_address = sscreen->ws->buffer_get_virtual_address(res->buf) + offset;
703    res->domains = domains;
704    res->flags = flags;
705 
706    if (res->flags & RADEON_FLAG_NO_CPU_ACCESS)
707       res->b.b.flags |= PIPE_RESOURCE_FLAG_UNMAPPABLE;
708 
709    util_range_add(&res->b.b, &res->valid_buffer_range, 0, templ->width0);
710    util_range_add(&res->b.b, &res->b.valid_buffer_range, 0, templ->width0);
711 
712    return &res->b.b;
713 }
714 
si_resource_create(struct pipe_screen * screen,const struct pipe_resource * templ)715 static struct pipe_resource *si_resource_create(struct pipe_screen *screen,
716                                                 const struct pipe_resource *templ)
717 {
718    if (templ->target == PIPE_BUFFER) {
719       return si_buffer_create(screen, templ, 256);
720    } else {
721       return si_texture_create(screen, templ);
722    }
723 }
724 
si_buffer_commit(struct si_context * ctx,struct si_resource * res,struct pipe_box * box,bool commit)725 static bool si_buffer_commit(struct si_context *ctx, struct si_resource *res,
726                              struct pipe_box *box, bool commit)
727 {
728    return ctx->ws->buffer_commit(ctx->ws, res->buf, box->x, box->width, commit);
729 }
730 
si_resource_commit(struct pipe_context * pctx,struct pipe_resource * resource,unsigned level,struct pipe_box * box,bool commit)731 static bool si_resource_commit(struct pipe_context *pctx, struct pipe_resource *resource,
732                                unsigned level, struct pipe_box *box, bool commit)
733 {
734    struct si_context *ctx = (struct si_context *)pctx;
735    struct si_resource *res = si_resource(resource);
736 
737    /*
738     * Since buffer commitment changes cannot be pipelined, we need to
739     * (a) flush any pending commands that refer to the buffer we're about
740     *     to change, and
741     * (b) wait for threaded submit to finish, including those that were
742     *     triggered by some other, earlier operation.
743     */
744    if (radeon_emitted(&ctx->gfx_cs, ctx->initial_gfx_cs_size) &&
745        ctx->ws->cs_is_buffer_referenced(&ctx->gfx_cs, res->buf, RADEON_USAGE_READWRITE)) {
746       si_flush_gfx_cs(ctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
747    }
748    ctx->ws->cs_sync_flush(&ctx->gfx_cs);
749 
750    if (resource->target == PIPE_BUFFER)
751       return si_buffer_commit(ctx, res, box, commit);
752    else
753       return si_texture_commit(ctx, res, level, box, commit);
754 }
755 
si_init_screen_buffer_functions(struct si_screen * sscreen)756 void si_init_screen_buffer_functions(struct si_screen *sscreen)
757 {
758    sscreen->b.resource_create = si_resource_create;
759    sscreen->b.resource_destroy = si_resource_destroy;
760    sscreen->b.resource_from_user_memory = si_buffer_from_user_memory;
761 }
762 
si_init_buffer_functions(struct si_context * sctx)763 void si_init_buffer_functions(struct si_context *sctx)
764 {
765    sctx->b.invalidate_resource = si_invalidate_resource;
766    sctx->b.buffer_map = si_buffer_transfer_map;
767    sctx->b.transfer_flush_region = si_buffer_flush_region;
768    sctx->b.buffer_unmap = si_buffer_transfer_unmap;
769    sctx->b.texture_subdata = u_default_texture_subdata;
770    sctx->b.buffer_subdata = si_buffer_subdata;
771    sctx->b.resource_commit = si_resource_commit;
772 }
773