• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2011 Marek Olšák <maraeo@gmail.com>
3  * Copyright © 2015 Advanced Micro Devices, Inc.
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining
7  * a copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
16  * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17  * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
18  * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21  * USE OR OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * The above copyright notice and this permission notice (including the
24  * next paragraph) shall be included in all copies or substantial portions
25  * of the Software.
26  */
27 
28 #include "amdgpu_cs.h"
29 
30 #include "util/hash_table.h"
31 #include "util/os_time.h"
32 #include "util/u_hash_table.h"
33 #include "frontend/drm_driver.h"
34 #include "drm-uapi/amdgpu_drm.h"
35 #include <xf86drm.h>
36 #include <stdio.h>
37 #include <inttypes.h>
38 
39 #ifndef AMDGPU_VA_RANGE_HIGH
40 #define AMDGPU_VA_RANGE_HIGH	0x2
41 #endif
42 
43 /* Set to 1 for verbose output showing committed sparse buffer ranges. */
44 #define DEBUG_SPARSE_COMMITS 0
45 
46 struct amdgpu_sparse_backing_chunk {
47    uint32_t begin, end;
48 };
49 
amdgpu_bo_wait(struct radeon_winsys * rws,struct pb_buffer * _buf,uint64_t timeout,unsigned usage)50 static bool amdgpu_bo_wait(struct radeon_winsys *rws,
51                            struct pb_buffer *_buf, uint64_t timeout,
52                            unsigned usage)
53 {
54    struct amdgpu_winsys *ws = amdgpu_winsys(rws);
55    struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
56    int64_t abs_timeout = 0;
57 
58    if (timeout == 0) {
59       if (p_atomic_read(&bo->num_active_ioctls))
60          return false;
61 
62    } else {
63       abs_timeout = os_time_get_absolute_timeout(timeout);
64 
65       /* Wait if any ioctl is being submitted with this buffer. */
66       if (!os_wait_until_zero_abs_timeout(&bo->num_active_ioctls, abs_timeout))
67          return false;
68    }
69 
70    if (bo->bo && bo->u.real.is_shared) {
71       /* We can't use user fences for shared buffers, because user fences
72        * are local to this process only. If we want to wait for all buffer
73        * uses in all processes, we have to use amdgpu_bo_wait_for_idle.
74        */
75       bool buffer_busy = true;
76       int r;
77 
78       r = amdgpu_bo_wait_for_idle(bo->bo, timeout, &buffer_busy);
79       if (r)
80          fprintf(stderr, "%s: amdgpu_bo_wait_for_idle failed %i\n", __func__,
81                  r);
82       return !buffer_busy;
83    }
84 
85    if (timeout == 0) {
86       unsigned idle_fences;
87       bool buffer_idle;
88 
89       simple_mtx_lock(&ws->bo_fence_lock);
90 
91       for (idle_fences = 0; idle_fences < bo->num_fences; ++idle_fences) {
92          if (!amdgpu_fence_wait(bo->fences[idle_fences], 0, false))
93             break;
94       }
95 
96       /* Release the idle fences to avoid checking them again later. */
97       for (unsigned i = 0; i < idle_fences; ++i)
98          amdgpu_fence_reference(&bo->fences[i], NULL);
99 
100       memmove(&bo->fences[0], &bo->fences[idle_fences],
101               (bo->num_fences - idle_fences) * sizeof(*bo->fences));
102       bo->num_fences -= idle_fences;
103 
104       buffer_idle = !bo->num_fences;
105       simple_mtx_unlock(&ws->bo_fence_lock);
106 
107       return buffer_idle;
108    } else {
109       bool buffer_idle = true;
110 
111       simple_mtx_lock(&ws->bo_fence_lock);
112       while (bo->num_fences && buffer_idle) {
113          struct pipe_fence_handle *fence = NULL;
114          bool fence_idle = false;
115 
116          amdgpu_fence_reference(&fence, bo->fences[0]);
117 
118          /* Wait for the fence. */
119          simple_mtx_unlock(&ws->bo_fence_lock);
120          if (amdgpu_fence_wait(fence, abs_timeout, true))
121             fence_idle = true;
122          else
123             buffer_idle = false;
124          simple_mtx_lock(&ws->bo_fence_lock);
125 
126          /* Release an idle fence to avoid checking it again later, keeping in
127           * mind that the fence array may have been modified by other threads.
128           */
129          if (fence_idle && bo->num_fences && bo->fences[0] == fence) {
130             amdgpu_fence_reference(&bo->fences[0], NULL);
131             memmove(&bo->fences[0], &bo->fences[1],
132                     (bo->num_fences - 1) * sizeof(*bo->fences));
133             bo->num_fences--;
134          }
135 
136          amdgpu_fence_reference(&fence, NULL);
137       }
138       simple_mtx_unlock(&ws->bo_fence_lock);
139 
140       return buffer_idle;
141    }
142 }
143 
amdgpu_bo_get_initial_domain(struct pb_buffer * buf)144 static enum radeon_bo_domain amdgpu_bo_get_initial_domain(
145       struct pb_buffer *buf)
146 {
147    return ((struct amdgpu_winsys_bo*)buf)->base.placement;
148 }
149 
amdgpu_bo_get_flags(struct pb_buffer * buf)150 static enum radeon_bo_flag amdgpu_bo_get_flags(
151       struct pb_buffer *buf)
152 {
153    return ((struct amdgpu_winsys_bo*)buf)->base.usage;
154 }
155 
amdgpu_bo_remove_fences(struct amdgpu_winsys_bo * bo)156 static void amdgpu_bo_remove_fences(struct amdgpu_winsys_bo *bo)
157 {
158    for (unsigned i = 0; i < bo->num_fences; ++i)
159       amdgpu_fence_reference(&bo->fences[i], NULL);
160 
161    FREE(bo->fences);
162    bo->num_fences = 0;
163    bo->max_fences = 0;
164 }
165 
amdgpu_bo_destroy(struct amdgpu_winsys * ws,struct pb_buffer * _buf)166 void amdgpu_bo_destroy(struct amdgpu_winsys *ws, struct pb_buffer *_buf)
167 {
168    struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
169    struct amdgpu_screen_winsys *sws_iter;
170 
171    assert(bo->bo && "must not be called for slab entries");
172 
173    if (!bo->u.real.is_user_ptr && bo->u.real.cpu_ptr) {
174       bo->u.real.cpu_ptr = NULL;
175       amdgpu_bo_unmap(&ws->dummy_ws.base, &bo->base);
176    }
177    assert(bo->u.real.is_user_ptr || bo->u.real.map_count == 0);
178 
179 #if DEBUG
180    if (ws->debug_all_bos) {
181       simple_mtx_lock(&ws->global_bo_list_lock);
182       list_del(&bo->u.real.global_list_item);
183       ws->num_buffers--;
184       simple_mtx_unlock(&ws->global_bo_list_lock);
185    }
186 #endif
187 
188    /* Close all KMS handles retrieved for other DRM file descriptions */
189    simple_mtx_lock(&ws->sws_list_lock);
190    for (sws_iter = ws->sws_list; sws_iter; sws_iter = sws_iter->next) {
191       struct hash_entry *entry;
192 
193       if (!sws_iter->kms_handles)
194          continue;
195 
196       entry = _mesa_hash_table_search(sws_iter->kms_handles, bo);
197       if (entry) {
198          struct drm_gem_close args = { .handle = (uintptr_t)entry->data };
199 
200          drmIoctl(sws_iter->fd, DRM_IOCTL_GEM_CLOSE, &args);
201          _mesa_hash_table_remove(sws_iter->kms_handles, entry);
202       }
203    }
204    simple_mtx_unlock(&ws->sws_list_lock);
205 
206    simple_mtx_lock(&ws->bo_export_table_lock);
207    _mesa_hash_table_remove_key(ws->bo_export_table, bo->bo);
208    simple_mtx_unlock(&ws->bo_export_table_lock);
209 
210    if (bo->base.placement & RADEON_DOMAIN_VRAM_GTT) {
211       amdgpu_bo_va_op(bo->bo, 0, bo->base.size, bo->va, 0, AMDGPU_VA_OP_UNMAP);
212       amdgpu_va_range_free(bo->u.real.va_handle);
213    }
214    amdgpu_bo_free(bo->bo);
215 
216    amdgpu_bo_remove_fences(bo);
217 
218    if (bo->base.placement & RADEON_DOMAIN_VRAM)
219       ws->allocated_vram -= align64(bo->base.size, ws->info.gart_page_size);
220    else if (bo->base.placement & RADEON_DOMAIN_GTT)
221       ws->allocated_gtt -= align64(bo->base.size, ws->info.gart_page_size);
222 
223    simple_mtx_destroy(&bo->lock);
224    FREE(bo);
225 }
226 
amdgpu_bo_destroy_or_cache(struct radeon_winsys * rws,struct pb_buffer * _buf)227 static void amdgpu_bo_destroy_or_cache(struct radeon_winsys *rws, struct pb_buffer *_buf)
228 {
229    struct amdgpu_winsys *ws = amdgpu_winsys(rws);
230    struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
231 
232    assert(bo->bo); /* slab buffers have a separate vtbl */
233 
234    if (bo->u.real.use_reusable_pool)
235       pb_cache_add_buffer(bo->cache_entry);
236    else
237       amdgpu_bo_destroy(ws, _buf);
238 }
239 
amdgpu_clean_up_buffer_managers(struct amdgpu_winsys * ws)240 static void amdgpu_clean_up_buffer_managers(struct amdgpu_winsys *ws)
241 {
242    for (unsigned i = 0; i < NUM_SLAB_ALLOCATORS; i++)
243       pb_slabs_reclaim(&ws->bo_slabs[i]);
244 
245    pb_cache_release_all_buffers(&ws->bo_cache);
246 }
247 
amdgpu_bo_do_map(struct radeon_winsys * rws,struct amdgpu_winsys_bo * bo,void ** cpu)248 static bool amdgpu_bo_do_map(struct radeon_winsys *rws, struct amdgpu_winsys_bo *bo, void **cpu)
249 {
250    struct amdgpu_winsys *ws = amdgpu_winsys(rws);
251 
252    assert(!(bo->base.usage & RADEON_FLAG_SPARSE) && bo->bo && !bo->u.real.is_user_ptr);
253    int r = amdgpu_bo_cpu_map(bo->bo, cpu);
254    if (r) {
255       /* Clean up buffer managers and try again. */
256       amdgpu_clean_up_buffer_managers(ws);
257       r = amdgpu_bo_cpu_map(bo->bo, cpu);
258       if (r)
259          return false;
260    }
261 
262    if (p_atomic_inc_return(&bo->u.real.map_count) == 1) {
263       if (bo->base.placement & RADEON_DOMAIN_VRAM)
264          ws->mapped_vram += bo->base.size;
265       else if (bo->base.placement & RADEON_DOMAIN_GTT)
266          ws->mapped_gtt += bo->base.size;
267       ws->num_mapped_buffers++;
268    }
269 
270    return true;
271 }
272 
amdgpu_bo_map(struct radeon_winsys * rws,struct pb_buffer * buf,struct radeon_cmdbuf * rcs,enum pipe_map_flags usage)273 void *amdgpu_bo_map(struct radeon_winsys *rws,
274                     struct pb_buffer *buf,
275                     struct radeon_cmdbuf *rcs,
276                     enum pipe_map_flags usage)
277 {
278    struct amdgpu_winsys *ws = amdgpu_winsys(rws);
279    struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
280    struct amdgpu_winsys_bo *real;
281    struct amdgpu_cs *cs = rcs ? amdgpu_cs(rcs) : NULL;
282 
283    assert(!(bo->base.usage & RADEON_FLAG_SPARSE));
284 
285    /* If it's not unsynchronized bo_map, flush CS if needed and then wait. */
286    if (!(usage & PIPE_MAP_UNSYNCHRONIZED)) {
287       /* DONTBLOCK doesn't make sense with UNSYNCHRONIZED. */
288       if (usage & PIPE_MAP_DONTBLOCK) {
289          if (!(usage & PIPE_MAP_WRITE)) {
290             /* Mapping for read.
291              *
292              * Since we are mapping for read, we don't need to wait
293              * if the GPU is using the buffer for read too
294              * (neither one is changing it).
295              *
296              * Only check whether the buffer is being used for write. */
297             if (cs && amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo,
298                                                                RADEON_USAGE_WRITE)) {
299                cs->flush_cs(cs->flush_data,
300 			    RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
301                return NULL;
302             }
303 
304             if (!amdgpu_bo_wait(rws, (struct pb_buffer*)bo, 0,
305                                 RADEON_USAGE_WRITE)) {
306                return NULL;
307             }
308          } else {
309             if (cs && amdgpu_bo_is_referenced_by_cs(cs, bo)) {
310                cs->flush_cs(cs->flush_data,
311 			    RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
312                return NULL;
313             }
314 
315             if (!amdgpu_bo_wait(rws, (struct pb_buffer*)bo, 0,
316                                 RADEON_USAGE_READWRITE)) {
317                return NULL;
318             }
319          }
320       } else {
321          uint64_t time = os_time_get_nano();
322 
323          if (!(usage & PIPE_MAP_WRITE)) {
324             /* Mapping for read.
325              *
326              * Since we are mapping for read, we don't need to wait
327              * if the GPU is using the buffer for read too
328              * (neither one is changing it).
329              *
330              * Only check whether the buffer is being used for write. */
331             if (cs) {
332                if (amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo,
333                                                             RADEON_USAGE_WRITE)) {
334                   cs->flush_cs(cs->flush_data,
335 			       RADEON_FLUSH_START_NEXT_GFX_IB_NOW, NULL);
336                } else {
337                   /* Try to avoid busy-waiting in amdgpu_bo_wait. */
338                   if (p_atomic_read(&bo->num_active_ioctls))
339                      amdgpu_cs_sync_flush(rcs);
340                }
341             }
342 
343             amdgpu_bo_wait(rws, (struct pb_buffer*)bo, PIPE_TIMEOUT_INFINITE,
344                            RADEON_USAGE_WRITE);
345          } else {
346             /* Mapping for write. */
347             if (cs) {
348                if (amdgpu_bo_is_referenced_by_cs(cs, bo)) {
349                   cs->flush_cs(cs->flush_data,
350 			       RADEON_FLUSH_START_NEXT_GFX_IB_NOW, NULL);
351                } else {
352                   /* Try to avoid busy-waiting in amdgpu_bo_wait. */
353                   if (p_atomic_read(&bo->num_active_ioctls))
354                      amdgpu_cs_sync_flush(rcs);
355                }
356             }
357 
358             amdgpu_bo_wait(rws, (struct pb_buffer*)bo, PIPE_TIMEOUT_INFINITE,
359                            RADEON_USAGE_READWRITE);
360          }
361 
362          ws->buffer_wait_time += os_time_get_nano() - time;
363       }
364    }
365 
366    /* Buffer synchronization has been checked, now actually map the buffer. */
367    void *cpu = NULL;
368    uint64_t offset = 0;
369 
370    if (bo->bo) {
371       real = bo;
372    } else {
373       real = bo->u.slab.real;
374       offset = bo->va - real->va;
375    }
376 
377    if (usage & RADEON_MAP_TEMPORARY) {
378       if (real->u.real.is_user_ptr) {
379          cpu = real->u.real.cpu_ptr;
380       } else {
381          if (!amdgpu_bo_do_map(rws, real, &cpu))
382             return NULL;
383       }
384    } else {
385       cpu = p_atomic_read(&real->u.real.cpu_ptr);
386       if (!cpu) {
387          simple_mtx_lock(&real->lock);
388          /* Must re-check due to the possibility of a race. Re-check need not
389           * be atomic thanks to the lock. */
390          cpu = real->u.real.cpu_ptr;
391          if (!cpu) {
392             if (!amdgpu_bo_do_map(rws, real, &cpu)) {
393                simple_mtx_unlock(&real->lock);
394                return NULL;
395             }
396             p_atomic_set(&real->u.real.cpu_ptr, cpu);
397          }
398          simple_mtx_unlock(&real->lock);
399       }
400    }
401 
402    return (uint8_t*)cpu + offset;
403 }
404 
amdgpu_bo_unmap(struct radeon_winsys * rws,struct pb_buffer * buf)405 void amdgpu_bo_unmap(struct radeon_winsys *rws, struct pb_buffer *buf)
406 {
407    struct amdgpu_winsys *ws = amdgpu_winsys(rws);
408    struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
409    struct amdgpu_winsys_bo *real;
410 
411    assert(!(bo->base.usage & RADEON_FLAG_SPARSE));
412 
413    real = bo->bo ? bo : bo->u.slab.real;
414 
415    if (real->u.real.is_user_ptr)
416       return;
417 
418    assert(real->u.real.map_count != 0 && "too many unmaps");
419    if (p_atomic_dec_zero(&real->u.real.map_count)) {
420       assert(!real->u.real.cpu_ptr &&
421              "too many unmaps or forgot RADEON_MAP_TEMPORARY flag");
422 
423       if (real->base.placement & RADEON_DOMAIN_VRAM)
424          ws->mapped_vram -= real->base.size;
425       else if (real->base.placement & RADEON_DOMAIN_GTT)
426          ws->mapped_gtt -= real->base.size;
427       ws->num_mapped_buffers--;
428    }
429 
430    amdgpu_bo_cpu_unmap(real->bo);
431 }
432 
433 static const struct pb_vtbl amdgpu_winsys_bo_vtbl = {
434    /* Cast to void* because one of the function parameters is a struct pointer instead of void*. */
435    (void*)amdgpu_bo_destroy_or_cache
436    /* other functions are never called */
437 };
438 
amdgpu_add_buffer_to_global_list(struct amdgpu_winsys * ws,struct amdgpu_winsys_bo * bo)439 static void amdgpu_add_buffer_to_global_list(struct amdgpu_winsys *ws, struct amdgpu_winsys_bo *bo)
440 {
441 #if DEBUG
442    assert(bo->bo);
443 
444    if (ws->debug_all_bos) {
445       simple_mtx_lock(&ws->global_bo_list_lock);
446       list_addtail(&bo->u.real.global_list_item, &ws->global_bo_list);
447       ws->num_buffers++;
448       simple_mtx_unlock(&ws->global_bo_list_lock);
449    }
450 #endif
451 }
452 
amdgpu_get_optimal_alignment(struct amdgpu_winsys * ws,uint64_t size,unsigned alignment)453 static unsigned amdgpu_get_optimal_alignment(struct amdgpu_winsys *ws,
454                                              uint64_t size, unsigned alignment)
455 {
456    /* Increase the alignment for faster address translation and better memory
457     * access pattern.
458     */
459    if (size >= ws->info.pte_fragment_size) {
460       alignment = MAX2(alignment, ws->info.pte_fragment_size);
461    } else if (size) {
462       unsigned msb = util_last_bit(size);
463 
464       alignment = MAX2(alignment, 1u << (msb - 1));
465    }
466    return alignment;
467 }
468 
amdgpu_create_bo(struct amdgpu_winsys * ws,uint64_t size,unsigned alignment,enum radeon_bo_domain initial_domain,unsigned flags,int heap)469 static struct amdgpu_winsys_bo *amdgpu_create_bo(struct amdgpu_winsys *ws,
470                                                  uint64_t size,
471                                                  unsigned alignment,
472                                                  enum radeon_bo_domain initial_domain,
473                                                  unsigned flags,
474                                                  int heap)
475 {
476    struct amdgpu_bo_alloc_request request = {0};
477    amdgpu_bo_handle buf_handle;
478    uint64_t va = 0;
479    struct amdgpu_winsys_bo *bo;
480    amdgpu_va_handle va_handle = NULL;
481    int r;
482    bool init_pb_cache;
483 
484    /* VRAM or GTT must be specified, but not both at the same time. */
485    assert(util_bitcount(initial_domain & (RADEON_DOMAIN_VRAM_GTT |
486                                           RADEON_DOMAIN_GDS |
487                                           RADEON_DOMAIN_OA)) == 1);
488 
489    alignment = amdgpu_get_optimal_alignment(ws, size, alignment);
490 
491    init_pb_cache = heap >= 0 && (flags & RADEON_FLAG_NO_INTERPROCESS_SHARING);
492 
493    bo = CALLOC(1, sizeof(struct amdgpu_winsys_bo) +
494                   init_pb_cache * sizeof(struct pb_cache_entry));
495    if (!bo) {
496       return NULL;
497    }
498 
499    if (init_pb_cache) {
500       bo->u.real.use_reusable_pool = true;
501       pb_cache_init_entry(&ws->bo_cache, bo->cache_entry, &bo->base,
502                           heap);
503    }
504    request.alloc_size = size;
505    request.phys_alignment = alignment;
506 
507    if (initial_domain & RADEON_DOMAIN_VRAM) {
508       request.preferred_heap |= AMDGPU_GEM_DOMAIN_VRAM;
509 
510       /* Since VRAM and GTT have almost the same performance on APUs, we could
511        * just set GTT. However, in order to decrease GTT(RAM) usage, which is
512        * shared with the OS, allow VRAM placements too. The idea is not to use
513        * VRAM usefully, but to use it so that it's not unused and wasted.
514        */
515       if (!ws->info.has_dedicated_vram)
516          request.preferred_heap |= AMDGPU_GEM_DOMAIN_GTT;
517    }
518 
519    if (initial_domain & RADEON_DOMAIN_GTT)
520       request.preferred_heap |= AMDGPU_GEM_DOMAIN_GTT;
521    if (initial_domain & RADEON_DOMAIN_GDS)
522       request.preferred_heap |= AMDGPU_GEM_DOMAIN_GDS;
523    if (initial_domain & RADEON_DOMAIN_OA)
524       request.preferred_heap |= AMDGPU_GEM_DOMAIN_OA;
525 
526    if (flags & RADEON_FLAG_NO_CPU_ACCESS)
527       request.flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
528    if (flags & RADEON_FLAG_GTT_WC)
529       request.flags |= AMDGPU_GEM_CREATE_CPU_GTT_USWC;
530 
531    if (flags & RADEON_FLAG_DISCARDABLE &&
532        ws->info.drm_minor >= 47)
533       request.flags |= AMDGPU_GEM_CREATE_DISCARDABLE;
534 
535    if (ws->zero_all_vram_allocs &&
536        (request.preferred_heap & AMDGPU_GEM_DOMAIN_VRAM))
537       request.flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED;
538 
539    if ((flags & RADEON_FLAG_ENCRYPTED) &&
540        ws->info.has_tmz_support) {
541       request.flags |= AMDGPU_GEM_CREATE_ENCRYPTED;
542 
543       if (!(flags & RADEON_FLAG_DRIVER_INTERNAL)) {
544          struct amdgpu_screen_winsys *sws_iter;
545          simple_mtx_lock(&ws->sws_list_lock);
546          for (sws_iter = ws->sws_list; sws_iter; sws_iter = sws_iter->next) {
547             *((bool*) &sws_iter->base.uses_secure_bos) = true;
548          }
549          simple_mtx_unlock(&ws->sws_list_lock);
550       }
551    }
552 
553    r = amdgpu_bo_alloc(ws->dev, &request, &buf_handle);
554    if (r) {
555       fprintf(stderr, "amdgpu: Failed to allocate a buffer:\n");
556       fprintf(stderr, "amdgpu:    size      : %"PRIu64" bytes\n", size);
557       fprintf(stderr, "amdgpu:    alignment : %u bytes\n", alignment);
558       fprintf(stderr, "amdgpu:    domains   : %u\n", initial_domain);
559       fprintf(stderr, "amdgpu:    flags   : %" PRIx64 "\n", request.flags);
560       goto error_bo_alloc;
561    }
562 
563    if (initial_domain & RADEON_DOMAIN_VRAM_GTT) {
564       unsigned va_gap_size = ws->check_vm ? MAX2(4 * alignment, 64 * 1024) : 0;
565 
566       r = amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general,
567                                 size + va_gap_size, alignment,
568                                 0, &va, &va_handle,
569                                 (flags & RADEON_FLAG_32BIT ? AMDGPU_VA_RANGE_32_BIT : 0) |
570                                 AMDGPU_VA_RANGE_HIGH);
571       if (r)
572          goto error_va_alloc;
573 
574       unsigned vm_flags = AMDGPU_VM_PAGE_READABLE |
575                           AMDGPU_VM_PAGE_EXECUTABLE;
576 
577       if (!(flags & RADEON_FLAG_READ_ONLY))
578          vm_flags |= AMDGPU_VM_PAGE_WRITEABLE;
579 
580       if (flags & RADEON_FLAG_GL2_BYPASS)
581          vm_flags |= AMDGPU_VM_MTYPE_UC;
582 
583       if (flags & RADEON_FLAG_MALL_NOALLOC &&
584           ws->info.drm_minor >= 47)
585          vm_flags |= AMDGPU_VM_PAGE_NOALLOC;
586 
587       r = amdgpu_bo_va_op_raw(ws->dev, buf_handle, 0, size, va, vm_flags,
588 			   AMDGPU_VA_OP_MAP);
589       if (r)
590          goto error_va_map;
591    }
592 
593    simple_mtx_init(&bo->lock, mtx_plain);
594    pipe_reference_init(&bo->base.reference, 1);
595    bo->base.alignment_log2 = util_logbase2(alignment);
596    bo->base.size = size;
597    bo->base.vtbl = &amdgpu_winsys_bo_vtbl;
598    bo->bo = buf_handle;
599    bo->va = va;
600    bo->u.real.va_handle = va_handle;
601    bo->base.placement = initial_domain;
602    bo->base.usage = flags;
603    bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
604 
605    if (initial_domain & RADEON_DOMAIN_VRAM)
606       ws->allocated_vram += align64(size, ws->info.gart_page_size);
607    else if (initial_domain & RADEON_DOMAIN_GTT)
608       ws->allocated_gtt += align64(size, ws->info.gart_page_size);
609 
610    amdgpu_bo_export(bo->bo, amdgpu_bo_handle_type_kms, &bo->u.real.kms_handle);
611 
612    amdgpu_add_buffer_to_global_list(ws, bo);
613 
614    return bo;
615 
616 error_va_map:
617    amdgpu_va_range_free(va_handle);
618 
619 error_va_alloc:
620    amdgpu_bo_free(buf_handle);
621 
622 error_bo_alloc:
623    FREE(bo);
624    return NULL;
625 }
626 
amdgpu_bo_can_reclaim(struct amdgpu_winsys * ws,struct pb_buffer * _buf)627 bool amdgpu_bo_can_reclaim(struct amdgpu_winsys *ws, struct pb_buffer *_buf)
628 {
629    return amdgpu_bo_wait(&ws->dummy_ws.base, _buf, 0, RADEON_USAGE_READWRITE);
630 }
631 
amdgpu_bo_can_reclaim_slab(void * priv,struct pb_slab_entry * entry)632 bool amdgpu_bo_can_reclaim_slab(void *priv, struct pb_slab_entry *entry)
633 {
634    struct amdgpu_winsys_bo *bo = container_of(entry, struct amdgpu_winsys_bo, u.slab.entry);
635 
636    return amdgpu_bo_can_reclaim(priv, &bo->base);
637 }
638 
get_slabs(struct amdgpu_winsys * ws,uint64_t size)639 static struct pb_slabs *get_slabs(struct amdgpu_winsys *ws, uint64_t size)
640 {
641    /* Find the correct slab allocator for the given size. */
642    for (unsigned i = 0; i < NUM_SLAB_ALLOCATORS; i++) {
643       struct pb_slabs *slabs = &ws->bo_slabs[i];
644 
645       if (size <= 1 << (slabs->min_order + slabs->num_orders - 1))
646          return slabs;
647    }
648 
649    assert(0);
650    return NULL;
651 }
652 
get_slab_wasted_size(struct amdgpu_winsys * ws,struct amdgpu_winsys_bo * bo)653 static unsigned get_slab_wasted_size(struct amdgpu_winsys *ws, struct amdgpu_winsys_bo *bo)
654 {
655    assert(bo->base.size <= bo->u.slab.entry.entry_size);
656    assert(bo->base.size < (1 << bo->base.alignment_log2) ||
657           bo->base.size < 1 << ws->bo_slabs[0].min_order ||
658           bo->base.size > bo->u.slab.entry.entry_size / 2);
659    return bo->u.slab.entry.entry_size - bo->base.size;
660 }
661 
amdgpu_bo_slab_destroy(struct radeon_winsys * rws,struct pb_buffer * _buf)662 static void amdgpu_bo_slab_destroy(struct radeon_winsys *rws, struct pb_buffer *_buf)
663 {
664    struct amdgpu_winsys *ws = amdgpu_winsys(rws);
665    struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
666    struct pb_slabs *slabs;
667 
668    assert(!bo->bo);
669 
670    slabs = get_slabs(ws, bo->base.size);
671 
672    if (bo->base.placement & RADEON_DOMAIN_VRAM)
673       ws->slab_wasted_vram -= get_slab_wasted_size(ws, bo);
674    else
675       ws->slab_wasted_gtt -= get_slab_wasted_size(ws, bo);
676 
677    pb_slab_free(slabs, &bo->u.slab.entry);
678 }
679 
680 static const struct pb_vtbl amdgpu_winsys_bo_slab_vtbl = {
681    /* Cast to void* because one of the function parameters is a struct pointer instead of void*. */
682    (void*)amdgpu_bo_slab_destroy
683    /* other functions are never called */
684 };
685 
686 /* Return the power of two size of a slab entry matching the input size. */
get_slab_pot_entry_size(struct amdgpu_winsys * ws,unsigned size)687 static unsigned get_slab_pot_entry_size(struct amdgpu_winsys *ws, unsigned size)
688 {
689    unsigned entry_size = util_next_power_of_two(size);
690    unsigned min_entry_size = 1 << ws->bo_slabs[0].min_order;
691 
692    return MAX2(entry_size, min_entry_size);
693 }
694 
695 /* Return the slab entry alignment. */
get_slab_entry_alignment(struct amdgpu_winsys * ws,unsigned size)696 static unsigned get_slab_entry_alignment(struct amdgpu_winsys *ws, unsigned size)
697 {
698    unsigned entry_size = get_slab_pot_entry_size(ws, size);
699 
700    if (size <= entry_size * 3 / 4)
701       return entry_size / 4;
702 
703    return entry_size;
704 }
705 
amdgpu_bo_slab_alloc(void * priv,unsigned heap,unsigned entry_size,unsigned group_index)706 struct pb_slab *amdgpu_bo_slab_alloc(void *priv, unsigned heap, unsigned entry_size,
707                                      unsigned group_index)
708 {
709    struct amdgpu_winsys *ws = priv;
710    struct amdgpu_slab *slab = CALLOC_STRUCT(amdgpu_slab);
711    enum radeon_bo_domain domains = radeon_domain_from_heap(heap);
712    enum radeon_bo_flag flags = radeon_flags_from_heap(heap);
713    uint32_t base_id;
714    unsigned slab_size = 0;
715 
716    if (!slab)
717       return NULL;
718 
719    /* Determine the slab buffer size. */
720    for (unsigned i = 0; i < NUM_SLAB_ALLOCATORS; i++) {
721       unsigned max_entry_size = 1 << (ws->bo_slabs[i].min_order + ws->bo_slabs[i].num_orders - 1);
722 
723       if (entry_size <= max_entry_size) {
724          /* The slab size is twice the size of the largest possible entry. */
725          slab_size = max_entry_size * 2;
726 
727          if (!util_is_power_of_two_nonzero(entry_size)) {
728             assert(util_is_power_of_two_nonzero(entry_size * 4 / 3));
729 
730             /* If the entry size is 3/4 of a power of two, we would waste space and not gain
731              * anything if we allocated only twice the power of two for the backing buffer:
732              *   2 * 3/4 = 1.5 usable with buffer size 2
733              *
734              * Allocating 5 times the entry size leads us to the next power of two and results
735              * in a much better memory utilization:
736              *   5 * 3/4 = 3.75 usable with buffer size 4
737              */
738             if (entry_size * 5 > slab_size)
739                slab_size = util_next_power_of_two(entry_size * 5);
740          }
741 
742          /* The largest slab should have the same size as the PTE fragment
743           * size to get faster address translation.
744           */
745          if (i == NUM_SLAB_ALLOCATORS - 1 &&
746              slab_size < ws->info.pte_fragment_size)
747             slab_size = ws->info.pte_fragment_size;
748          break;
749       }
750    }
751    assert(slab_size != 0);
752 
753    slab->buffer = amdgpu_winsys_bo(amdgpu_bo_create(ws,
754                                                     slab_size, slab_size,
755                                                     domains, flags));
756    if (!slab->buffer)
757       goto fail;
758 
759    slab_size = slab->buffer->base.size;
760 
761    slab->base.num_entries = slab_size / entry_size;
762    slab->base.num_free = slab->base.num_entries;
763    slab->entry_size = entry_size;
764    slab->entries = CALLOC(slab->base.num_entries, sizeof(*slab->entries));
765    if (!slab->entries)
766       goto fail_buffer;
767 
768    list_inithead(&slab->base.free);
769 
770    base_id = __sync_fetch_and_add(&ws->next_bo_unique_id, slab->base.num_entries);
771 
772    for (unsigned i = 0; i < slab->base.num_entries; ++i) {
773       struct amdgpu_winsys_bo *bo = &slab->entries[i];
774 
775       simple_mtx_init(&bo->lock, mtx_plain);
776       bo->base.alignment_log2 = util_logbase2(get_slab_entry_alignment(ws, entry_size));
777       bo->base.size = entry_size;
778       bo->base.vtbl = &amdgpu_winsys_bo_slab_vtbl;
779       bo->va = slab->buffer->va + i * entry_size;
780       bo->base.placement = domains;
781       bo->unique_id = base_id + i;
782       bo->u.slab.entry.slab = &slab->base;
783       bo->u.slab.entry.group_index = group_index;
784       bo->u.slab.entry.entry_size = entry_size;
785 
786       if (slab->buffer->bo) {
787          /* The slab is not suballocated. */
788          bo->u.slab.real = slab->buffer;
789       } else {
790          /* The slab is allocated out of a bigger slab. */
791          bo->u.slab.real = slab->buffer->u.slab.real;
792          assert(bo->u.slab.real->bo);
793       }
794 
795       list_addtail(&bo->u.slab.entry.head, &slab->base.free);
796    }
797 
798    /* Wasted alignment due to slabs with 3/4 allocations being aligned to a power of two. */
799    assert(slab->base.num_entries * entry_size <= slab_size);
800    if (domains & RADEON_DOMAIN_VRAM)
801       ws->slab_wasted_vram += slab_size - slab->base.num_entries * entry_size;
802    else
803       ws->slab_wasted_gtt += slab_size - slab->base.num_entries * entry_size;
804 
805    return &slab->base;
806 
807 fail_buffer:
808    amdgpu_winsys_bo_reference(ws, &slab->buffer, NULL);
809 fail:
810    FREE(slab);
811    return NULL;
812 }
813 
amdgpu_bo_slab_free(struct amdgpu_winsys * ws,struct pb_slab * pslab)814 void amdgpu_bo_slab_free(struct amdgpu_winsys *ws, struct pb_slab *pslab)
815 {
816    struct amdgpu_slab *slab = amdgpu_slab(pslab);
817    unsigned slab_size = slab->buffer->base.size;
818 
819    assert(slab->base.num_entries * slab->entry_size <= slab_size);
820    if (slab->buffer->base.placement & RADEON_DOMAIN_VRAM)
821       ws->slab_wasted_vram -= slab_size - slab->base.num_entries * slab->entry_size;
822    else
823       ws->slab_wasted_gtt -= slab_size - slab->base.num_entries * slab->entry_size;
824 
825    for (unsigned i = 0; i < slab->base.num_entries; ++i) {
826       amdgpu_bo_remove_fences(&slab->entries[i]);
827       simple_mtx_destroy(&slab->entries[i].lock);
828    }
829 
830    FREE(slab->entries);
831    amdgpu_winsys_bo_reference(ws, &slab->buffer, NULL);
832    FREE(slab);
833 }
834 
835 #if DEBUG_SPARSE_COMMITS
836 static void
sparse_dump(struct amdgpu_winsys_bo * bo,const char * func)837 sparse_dump(struct amdgpu_winsys_bo *bo, const char *func)
838 {
839    fprintf(stderr, "%s: %p (size=%"PRIu64", num_va_pages=%u) @ %s\n"
840                    "Commitments:\n",
841            __func__, bo, bo->base.size, bo->u.sparse.num_va_pages, func);
842 
843    struct amdgpu_sparse_backing *span_backing = NULL;
844    uint32_t span_first_backing_page = 0;
845    uint32_t span_first_va_page = 0;
846    uint32_t va_page = 0;
847 
848    for (;;) {
849       struct amdgpu_sparse_backing *backing = 0;
850       uint32_t backing_page = 0;
851 
852       if (va_page < bo->u.sparse.num_va_pages) {
853          backing = bo->u.sparse.commitments[va_page].backing;
854          backing_page = bo->u.sparse.commitments[va_page].page;
855       }
856 
857       if (span_backing &&
858           (backing != span_backing ||
859            backing_page != span_first_backing_page + (va_page - span_first_va_page))) {
860          fprintf(stderr, " %u..%u: backing=%p:%u..%u\n",
861                  span_first_va_page, va_page - 1, span_backing,
862                  span_first_backing_page,
863                  span_first_backing_page + (va_page - span_first_va_page) - 1);
864 
865          span_backing = NULL;
866       }
867 
868       if (va_page >= bo->u.sparse.num_va_pages)
869          break;
870 
871       if (backing && !span_backing) {
872          span_backing = backing;
873          span_first_backing_page = backing_page;
874          span_first_va_page = va_page;
875       }
876 
877       va_page++;
878    }
879 
880    fprintf(stderr, "Backing:\n");
881 
882    list_for_each_entry(struct amdgpu_sparse_backing, backing, &bo->u.sparse.backing, list) {
883       fprintf(stderr, " %p (size=%"PRIu64")\n", backing, backing->bo->base.size);
884       for (unsigned i = 0; i < backing->num_chunks; ++i)
885          fprintf(stderr, "   %u..%u\n", backing->chunks[i].begin, backing->chunks[i].end);
886    }
887 }
888 #endif
889 
890 /*
891  * Attempt to allocate the given number of backing pages. Fewer pages may be
892  * allocated (depending on the fragmentation of existing backing buffers),
893  * which will be reflected by a change to *pnum_pages.
894  */
895 static struct amdgpu_sparse_backing *
sparse_backing_alloc(struct amdgpu_winsys * ws,struct amdgpu_winsys_bo * bo,uint32_t * pstart_page,uint32_t * pnum_pages)896 sparse_backing_alloc(struct amdgpu_winsys *ws, struct amdgpu_winsys_bo *bo,
897                      uint32_t *pstart_page, uint32_t *pnum_pages)
898 {
899    struct amdgpu_sparse_backing *best_backing;
900    unsigned best_idx;
901    uint32_t best_num_pages;
902 
903    best_backing = NULL;
904    best_idx = 0;
905    best_num_pages = 0;
906 
907    /* This is a very simple and inefficient best-fit algorithm. */
908    list_for_each_entry(struct amdgpu_sparse_backing, backing, &bo->u.sparse.backing, list) {
909       for (unsigned idx = 0; idx < backing->num_chunks; ++idx) {
910          uint32_t cur_num_pages = backing->chunks[idx].end - backing->chunks[idx].begin;
911          if ((best_num_pages < *pnum_pages && cur_num_pages > best_num_pages) ||
912             (best_num_pages > *pnum_pages && cur_num_pages < best_num_pages)) {
913             best_backing = backing;
914             best_idx = idx;
915             best_num_pages = cur_num_pages;
916          }
917       }
918    }
919 
920    /* Allocate a new backing buffer if necessary. */
921    if (!best_backing) {
922       struct pb_buffer *buf;
923       uint64_t size;
924       uint32_t pages;
925 
926       best_backing = CALLOC_STRUCT(amdgpu_sparse_backing);
927       if (!best_backing)
928          return NULL;
929 
930       best_backing->max_chunks = 4;
931       best_backing->chunks = CALLOC(best_backing->max_chunks,
932                                     sizeof(*best_backing->chunks));
933       if (!best_backing->chunks) {
934          FREE(best_backing);
935          return NULL;
936       }
937 
938       assert(bo->u.sparse.num_backing_pages < DIV_ROUND_UP(bo->base.size, RADEON_SPARSE_PAGE_SIZE));
939 
940       size = MIN3(bo->base.size / 16,
941                   8 * 1024 * 1024,
942                   bo->base.size - (uint64_t)bo->u.sparse.num_backing_pages * RADEON_SPARSE_PAGE_SIZE);
943       size = MAX2(size, RADEON_SPARSE_PAGE_SIZE);
944 
945       buf = amdgpu_bo_create(ws, size, RADEON_SPARSE_PAGE_SIZE,
946                              bo->base.placement,
947                              (bo->base.usage & ~RADEON_FLAG_SPARSE &
948                               /* Set the interprocess sharing flag to disable pb_cache because
949                                * amdgpu_bo_wait doesn't wait for active CS jobs.
950                                */
951                               ~RADEON_FLAG_NO_INTERPROCESS_SHARING) | RADEON_FLAG_NO_SUBALLOC);
952       if (!buf) {
953          FREE(best_backing->chunks);
954          FREE(best_backing);
955          return NULL;
956       }
957 
958       /* We might have gotten a bigger buffer than requested via caching. */
959       pages = buf->size / RADEON_SPARSE_PAGE_SIZE;
960 
961       best_backing->bo = amdgpu_winsys_bo(buf);
962       best_backing->num_chunks = 1;
963       best_backing->chunks[0].begin = 0;
964       best_backing->chunks[0].end = pages;
965 
966       list_add(&best_backing->list, &bo->u.sparse.backing);
967       bo->u.sparse.num_backing_pages += pages;
968 
969       best_idx = 0;
970       best_num_pages = pages;
971    }
972 
973    *pnum_pages = MIN2(*pnum_pages, best_num_pages);
974    *pstart_page = best_backing->chunks[best_idx].begin;
975    best_backing->chunks[best_idx].begin += *pnum_pages;
976 
977    if (best_backing->chunks[best_idx].begin >= best_backing->chunks[best_idx].end) {
978       memmove(&best_backing->chunks[best_idx], &best_backing->chunks[best_idx + 1],
979               sizeof(*best_backing->chunks) * (best_backing->num_chunks - best_idx - 1));
980       best_backing->num_chunks--;
981    }
982 
983    return best_backing;
984 }
985 
986 static void
sparse_free_backing_buffer(struct amdgpu_winsys * ws,struct amdgpu_winsys_bo * bo,struct amdgpu_sparse_backing * backing)987 sparse_free_backing_buffer(struct amdgpu_winsys *ws, struct amdgpu_winsys_bo *bo,
988                            struct amdgpu_sparse_backing *backing)
989 {
990    bo->u.sparse.num_backing_pages -= backing->bo->base.size / RADEON_SPARSE_PAGE_SIZE;
991 
992    simple_mtx_lock(&ws->bo_fence_lock);
993    amdgpu_add_fences(backing->bo, bo->num_fences, bo->fences);
994    simple_mtx_unlock(&ws->bo_fence_lock);
995 
996    list_del(&backing->list);
997    amdgpu_winsys_bo_reference(ws, &backing->bo, NULL);
998    FREE(backing->chunks);
999    FREE(backing);
1000 }
1001 
1002 /*
1003  * Return a range of pages from the given backing buffer back into the
1004  * free structure.
1005  */
1006 static bool
sparse_backing_free(struct amdgpu_winsys * ws,struct amdgpu_winsys_bo * bo,struct amdgpu_sparse_backing * backing,uint32_t start_page,uint32_t num_pages)1007 sparse_backing_free(struct amdgpu_winsys *ws, struct amdgpu_winsys_bo *bo,
1008                     struct amdgpu_sparse_backing *backing,
1009                     uint32_t start_page, uint32_t num_pages)
1010 {
1011    uint32_t end_page = start_page + num_pages;
1012    unsigned low = 0;
1013    unsigned high = backing->num_chunks;
1014 
1015    /* Find the first chunk with begin >= start_page. */
1016    while (low < high) {
1017       unsigned mid = low + (high - low) / 2;
1018 
1019       if (backing->chunks[mid].begin >= start_page)
1020          high = mid;
1021       else
1022          low = mid + 1;
1023    }
1024 
1025    assert(low >= backing->num_chunks || end_page <= backing->chunks[low].begin);
1026    assert(low == 0 || backing->chunks[low - 1].end <= start_page);
1027 
1028    if (low > 0 && backing->chunks[low - 1].end == start_page) {
1029       backing->chunks[low - 1].end = end_page;
1030 
1031       if (low < backing->num_chunks && end_page == backing->chunks[low].begin) {
1032          backing->chunks[low - 1].end = backing->chunks[low].end;
1033          memmove(&backing->chunks[low], &backing->chunks[low + 1],
1034                  sizeof(*backing->chunks) * (backing->num_chunks - low - 1));
1035          backing->num_chunks--;
1036       }
1037    } else if (low < backing->num_chunks && end_page == backing->chunks[low].begin) {
1038       backing->chunks[low].begin = start_page;
1039    } else {
1040       if (backing->num_chunks >= backing->max_chunks) {
1041          unsigned new_max_chunks = 2 * backing->max_chunks;
1042          struct amdgpu_sparse_backing_chunk *new_chunks =
1043             REALLOC(backing->chunks,
1044                     sizeof(*backing->chunks) * backing->max_chunks,
1045                     sizeof(*backing->chunks) * new_max_chunks);
1046          if (!new_chunks)
1047             return false;
1048 
1049          backing->max_chunks = new_max_chunks;
1050          backing->chunks = new_chunks;
1051       }
1052 
1053       memmove(&backing->chunks[low + 1], &backing->chunks[low],
1054               sizeof(*backing->chunks) * (backing->num_chunks - low));
1055       backing->chunks[low].begin = start_page;
1056       backing->chunks[low].end = end_page;
1057       backing->num_chunks++;
1058    }
1059 
1060    if (backing->num_chunks == 1 && backing->chunks[0].begin == 0 &&
1061        backing->chunks[0].end == backing->bo->base.size / RADEON_SPARSE_PAGE_SIZE)
1062       sparse_free_backing_buffer(ws, bo, backing);
1063 
1064    return true;
1065 }
1066 
amdgpu_bo_sparse_destroy(struct radeon_winsys * rws,struct pb_buffer * _buf)1067 static void amdgpu_bo_sparse_destroy(struct radeon_winsys *rws, struct pb_buffer *_buf)
1068 {
1069    struct amdgpu_winsys *ws = amdgpu_winsys(rws);
1070    struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
1071    int r;
1072 
1073    assert(!bo->bo && bo->base.usage & RADEON_FLAG_SPARSE);
1074 
1075    r = amdgpu_bo_va_op_raw(ws->dev, NULL, 0,
1076                            (uint64_t)bo->u.sparse.num_va_pages * RADEON_SPARSE_PAGE_SIZE,
1077                            bo->va, 0, AMDGPU_VA_OP_CLEAR);
1078    if (r) {
1079       fprintf(stderr, "amdgpu: clearing PRT VA region on destroy failed (%d)\n", r);
1080    }
1081 
1082    while (!list_is_empty(&bo->u.sparse.backing)) {
1083       sparse_free_backing_buffer(ws, bo,
1084                                  container_of(bo->u.sparse.backing.next,
1085                                               struct amdgpu_sparse_backing, list));
1086    }
1087 
1088    amdgpu_va_range_free(bo->u.sparse.va_handle);
1089    FREE(bo->u.sparse.commitments);
1090    simple_mtx_destroy(&bo->lock);
1091    FREE(bo);
1092 }
1093 
1094 static const struct pb_vtbl amdgpu_winsys_bo_sparse_vtbl = {
1095    /* Cast to void* because one of the function parameters is a struct pointer instead of void*. */
1096    (void*)amdgpu_bo_sparse_destroy
1097    /* other functions are never called */
1098 };
1099 
1100 static struct pb_buffer *
amdgpu_bo_sparse_create(struct amdgpu_winsys * ws,uint64_t size,enum radeon_bo_domain domain,enum radeon_bo_flag flags)1101 amdgpu_bo_sparse_create(struct amdgpu_winsys *ws, uint64_t size,
1102                         enum radeon_bo_domain domain,
1103                         enum radeon_bo_flag flags)
1104 {
1105    struct amdgpu_winsys_bo *bo;
1106    uint64_t map_size;
1107    uint64_t va_gap_size;
1108    int r;
1109 
1110    /* We use 32-bit page numbers; refuse to attempt allocating sparse buffers
1111     * that exceed this limit. This is not really a restriction: we don't have
1112     * that much virtual address space anyway.
1113     */
1114    if (size > (uint64_t)INT32_MAX * RADEON_SPARSE_PAGE_SIZE)
1115       return NULL;
1116 
1117    bo = CALLOC_STRUCT(amdgpu_winsys_bo);
1118    if (!bo)
1119       return NULL;
1120 
1121    simple_mtx_init(&bo->lock, mtx_plain);
1122    pipe_reference_init(&bo->base.reference, 1);
1123    bo->base.alignment_log2 = util_logbase2(RADEON_SPARSE_PAGE_SIZE);
1124    bo->base.size = size;
1125    bo->base.vtbl = &amdgpu_winsys_bo_sparse_vtbl;
1126    bo->base.placement = domain;
1127    bo->unique_id =  __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
1128    bo->base.usage = flags;
1129 
1130    bo->u.sparse.num_va_pages = DIV_ROUND_UP(size, RADEON_SPARSE_PAGE_SIZE);
1131    bo->u.sparse.commitments = CALLOC(bo->u.sparse.num_va_pages,
1132                                      sizeof(*bo->u.sparse.commitments));
1133    if (!bo->u.sparse.commitments)
1134       goto error_alloc_commitments;
1135 
1136    list_inithead(&bo->u.sparse.backing);
1137 
1138    /* For simplicity, we always map a multiple of the page size. */
1139    map_size = align64(size, RADEON_SPARSE_PAGE_SIZE);
1140    va_gap_size = ws->check_vm ? 4 * RADEON_SPARSE_PAGE_SIZE : 0;
1141    r = amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general,
1142                              map_size + va_gap_size, RADEON_SPARSE_PAGE_SIZE,
1143                              0, &bo->va, &bo->u.sparse.va_handle,
1144 			     AMDGPU_VA_RANGE_HIGH);
1145    if (r)
1146       goto error_va_alloc;
1147 
1148    r = amdgpu_bo_va_op_raw(ws->dev, NULL, 0, map_size, bo->va,
1149                            AMDGPU_VM_PAGE_PRT, AMDGPU_VA_OP_MAP);
1150    if (r)
1151       goto error_va_map;
1152 
1153    return &bo->base;
1154 
1155 error_va_map:
1156    amdgpu_va_range_free(bo->u.sparse.va_handle);
1157 error_va_alloc:
1158    FREE(bo->u.sparse.commitments);
1159 error_alloc_commitments:
1160    simple_mtx_destroy(&bo->lock);
1161    FREE(bo);
1162    return NULL;
1163 }
1164 
1165 static bool
amdgpu_bo_sparse_commit(struct radeon_winsys * rws,struct pb_buffer * buf,uint64_t offset,uint64_t size,bool commit)1166 amdgpu_bo_sparse_commit(struct radeon_winsys *rws, struct pb_buffer *buf,
1167                         uint64_t offset, uint64_t size, bool commit)
1168 {
1169    struct amdgpu_winsys *ws = amdgpu_winsys(rws);
1170    struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(buf);
1171    struct amdgpu_sparse_commitment *comm;
1172    uint32_t va_page, end_va_page;
1173    bool ok = true;
1174    int r;
1175 
1176    assert(bo->base.usage & RADEON_FLAG_SPARSE);
1177    assert(offset % RADEON_SPARSE_PAGE_SIZE == 0);
1178    assert(offset <= bo->base.size);
1179    assert(size <= bo->base.size - offset);
1180    assert(size % RADEON_SPARSE_PAGE_SIZE == 0 || offset + size == bo->base.size);
1181 
1182    comm = bo->u.sparse.commitments;
1183    va_page = offset / RADEON_SPARSE_PAGE_SIZE;
1184    end_va_page = va_page + DIV_ROUND_UP(size, RADEON_SPARSE_PAGE_SIZE);
1185 
1186    simple_mtx_lock(&bo->lock);
1187 
1188 #if DEBUG_SPARSE_COMMITS
1189    sparse_dump(bo, __func__);
1190 #endif
1191 
1192    if (commit) {
1193       while (va_page < end_va_page) {
1194          uint32_t span_va_page;
1195 
1196          /* Skip pages that are already committed. */
1197          if (comm[va_page].backing) {
1198             va_page++;
1199             continue;
1200          }
1201 
1202          /* Determine length of uncommitted span. */
1203          span_va_page = va_page;
1204          while (va_page < end_va_page && !comm[va_page].backing)
1205             va_page++;
1206 
1207          /* Fill the uncommitted span with chunks of backing memory. */
1208          while (span_va_page < va_page) {
1209             struct amdgpu_sparse_backing *backing;
1210             uint32_t backing_start, backing_size;
1211 
1212             backing_size = va_page - span_va_page;
1213             backing = sparse_backing_alloc(ws, bo, &backing_start, &backing_size);
1214             if (!backing) {
1215                ok = false;
1216                goto out;
1217             }
1218 
1219             r = amdgpu_bo_va_op_raw(ws->dev, backing->bo->bo,
1220                                     (uint64_t)backing_start * RADEON_SPARSE_PAGE_SIZE,
1221                                     (uint64_t)backing_size * RADEON_SPARSE_PAGE_SIZE,
1222                                     bo->va + (uint64_t)span_va_page * RADEON_SPARSE_PAGE_SIZE,
1223                                     AMDGPU_VM_PAGE_READABLE |
1224                                     AMDGPU_VM_PAGE_WRITEABLE |
1225                                     AMDGPU_VM_PAGE_EXECUTABLE,
1226                                     AMDGPU_VA_OP_REPLACE);
1227             if (r) {
1228                ok = sparse_backing_free(ws, bo, backing, backing_start, backing_size);
1229                assert(ok && "sufficient memory should already be allocated");
1230 
1231                ok = false;
1232                goto out;
1233             }
1234 
1235             while (backing_size) {
1236                comm[span_va_page].backing = backing;
1237                comm[span_va_page].page = backing_start;
1238                span_va_page++;
1239                backing_start++;
1240                backing_size--;
1241             }
1242          }
1243       }
1244    } else {
1245       r = amdgpu_bo_va_op_raw(ws->dev, NULL, 0,
1246                               (uint64_t)(end_va_page - va_page) * RADEON_SPARSE_PAGE_SIZE,
1247                               bo->va + (uint64_t)va_page * RADEON_SPARSE_PAGE_SIZE,
1248                               AMDGPU_VM_PAGE_PRT, AMDGPU_VA_OP_REPLACE);
1249       if (r) {
1250          ok = false;
1251          goto out;
1252       }
1253 
1254       while (va_page < end_va_page) {
1255          struct amdgpu_sparse_backing *backing;
1256          uint32_t backing_start;
1257          uint32_t span_pages;
1258 
1259          /* Skip pages that are already uncommitted. */
1260          if (!comm[va_page].backing) {
1261             va_page++;
1262             continue;
1263          }
1264 
1265          /* Group contiguous spans of pages. */
1266          backing = comm[va_page].backing;
1267          backing_start = comm[va_page].page;
1268          comm[va_page].backing = NULL;
1269 
1270          span_pages = 1;
1271          va_page++;
1272 
1273          while (va_page < end_va_page &&
1274                 comm[va_page].backing == backing &&
1275                 comm[va_page].page == backing_start + span_pages) {
1276             comm[va_page].backing = NULL;
1277             va_page++;
1278             span_pages++;
1279          }
1280 
1281          if (!sparse_backing_free(ws, bo, backing, backing_start, span_pages)) {
1282             /* Couldn't allocate tracking data structures, so we have to leak */
1283             fprintf(stderr, "amdgpu: leaking PRT backing memory\n");
1284             ok = false;
1285          }
1286       }
1287    }
1288 out:
1289 
1290    simple_mtx_unlock(&bo->lock);
1291 
1292    return ok;
1293 }
1294 
amdgpu_buffer_get_metadata(struct radeon_winsys * rws,struct pb_buffer * _buf,struct radeon_bo_metadata * md,struct radeon_surf * surf)1295 static void amdgpu_buffer_get_metadata(struct radeon_winsys *rws,
1296                                        struct pb_buffer *_buf,
1297                                        struct radeon_bo_metadata *md,
1298                                        struct radeon_surf *surf)
1299 {
1300    struct amdgpu_winsys *ws = amdgpu_winsys(rws);
1301    struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
1302    struct amdgpu_bo_info info = {0};
1303    int r;
1304 
1305    assert(bo->bo && "must not be called for slab entries");
1306 
1307    r = amdgpu_bo_query_info(bo->bo, &info);
1308    if (r)
1309       return;
1310 
1311    ac_surface_set_bo_metadata(&ws->info, surf, info.metadata.tiling_info,
1312                               &md->mode);
1313 
1314    md->size_metadata = info.metadata.size_metadata;
1315    memcpy(md->metadata, info.metadata.umd_metadata, sizeof(md->metadata));
1316 }
1317 
amdgpu_buffer_set_metadata(struct radeon_winsys * rws,struct pb_buffer * _buf,struct radeon_bo_metadata * md,struct radeon_surf * surf)1318 static void amdgpu_buffer_set_metadata(struct radeon_winsys *rws,
1319                                        struct pb_buffer *_buf,
1320                                        struct radeon_bo_metadata *md,
1321                                        struct radeon_surf *surf)
1322 {
1323    struct amdgpu_winsys *ws = amdgpu_winsys(rws);
1324    struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
1325    struct amdgpu_bo_metadata metadata = {0};
1326 
1327    assert(bo->bo && "must not be called for slab entries");
1328 
1329    ac_surface_get_bo_metadata(&ws->info, surf, &metadata.tiling_info);
1330 
1331    metadata.size_metadata = md->size_metadata;
1332    memcpy(metadata.umd_metadata, md->metadata, sizeof(md->metadata));
1333 
1334    amdgpu_bo_set_metadata(bo->bo, &metadata);
1335 }
1336 
1337 struct pb_buffer *
amdgpu_bo_create(struct amdgpu_winsys * ws,uint64_t size,unsigned alignment,enum radeon_bo_domain domain,enum radeon_bo_flag flags)1338 amdgpu_bo_create(struct amdgpu_winsys *ws,
1339                  uint64_t size,
1340                  unsigned alignment,
1341                  enum radeon_bo_domain domain,
1342                  enum radeon_bo_flag flags)
1343 {
1344    struct amdgpu_winsys_bo *bo;
1345 
1346    radeon_canonicalize_bo_flags(&domain, &flags);
1347 
1348    /* Handle sparse buffers first. */
1349    if (flags & RADEON_FLAG_SPARSE) {
1350       assert(RADEON_SPARSE_PAGE_SIZE % alignment == 0);
1351 
1352       return amdgpu_bo_sparse_create(ws, size, domain, flags);
1353    }
1354 
1355    struct pb_slabs *last_slab = &ws->bo_slabs[NUM_SLAB_ALLOCATORS - 1];
1356    unsigned max_slab_entry_size = 1 << (last_slab->min_order + last_slab->num_orders - 1);
1357    int heap = radeon_get_heap_index(domain, flags);
1358 
1359    /* Sub-allocate small buffers from slabs. */
1360    if (heap >= 0 && size <= max_slab_entry_size) {
1361       struct pb_slab_entry *entry;
1362       unsigned alloc_size = size;
1363 
1364       /* Always use slabs for sizes less than 4 KB because the kernel aligns
1365        * everything to 4 KB.
1366        */
1367       if (size < alignment && alignment <= 4 * 1024)
1368          alloc_size = alignment;
1369 
1370       if (alignment > get_slab_entry_alignment(ws, alloc_size)) {
1371          /* 3/4 allocations can return too small alignment. Try again with a power of two
1372           * allocation size.
1373           */
1374          unsigned pot_size = get_slab_pot_entry_size(ws, alloc_size);
1375 
1376          if (alignment <= pot_size) {
1377             /* This size works but wastes some memory to fulfil the alignment. */
1378             alloc_size = pot_size;
1379          } else {
1380             goto no_slab; /* can't fulfil alignment requirements */
1381          }
1382       }
1383 
1384       struct pb_slabs *slabs = get_slabs(ws, alloc_size);
1385       entry = pb_slab_alloc(slabs, alloc_size, heap);
1386       if (!entry) {
1387          /* Clean up buffer managers and try again. */
1388          amdgpu_clean_up_buffer_managers(ws);
1389 
1390          entry = pb_slab_alloc(slabs, alloc_size, heap);
1391       }
1392       if (!entry)
1393          return NULL;
1394 
1395       bo = container_of(entry, struct amdgpu_winsys_bo, u.slab.entry);
1396       pipe_reference_init(&bo->base.reference, 1);
1397       bo->base.size = size;
1398       assert(alignment <= 1 << bo->base.alignment_log2);
1399 
1400       if (domain & RADEON_DOMAIN_VRAM)
1401          ws->slab_wasted_vram += get_slab_wasted_size(ws, bo);
1402       else
1403          ws->slab_wasted_gtt += get_slab_wasted_size(ws, bo);
1404 
1405       return &bo->base;
1406    }
1407 no_slab:
1408 
1409    /* Align size to page size. This is the minimum alignment for normal
1410     * BOs. Aligning this here helps the cached bufmgr. Especially small BOs,
1411     * like constant/uniform buffers, can benefit from better and more reuse.
1412     */
1413    if (domain & RADEON_DOMAIN_VRAM_GTT) {
1414       size = align64(size, ws->info.gart_page_size);
1415       alignment = align(alignment, ws->info.gart_page_size);
1416    }
1417 
1418    bool use_reusable_pool = flags & RADEON_FLAG_NO_INTERPROCESS_SHARING &&
1419                             !(flags & RADEON_FLAG_DISCARDABLE);
1420 
1421    if (use_reusable_pool) {
1422        /* RADEON_FLAG_NO_SUBALLOC is irrelevant for the cache. */
1423        heap = radeon_get_heap_index(domain, flags & ~RADEON_FLAG_NO_SUBALLOC);
1424        assert(heap >= 0 && heap < RADEON_NUM_HEAPS);
1425 
1426        /* Get a buffer from the cache. */
1427        bo = (struct amdgpu_winsys_bo*)
1428             pb_cache_reclaim_buffer(&ws->bo_cache, size, alignment, 0, heap);
1429        if (bo)
1430           return &bo->base;
1431    }
1432 
1433    /* Create a new one. */
1434    bo = amdgpu_create_bo(ws, size, alignment, domain, flags, heap);
1435    if (!bo) {
1436       /* Clean up buffer managers and try again. */
1437       amdgpu_clean_up_buffer_managers(ws);
1438 
1439       bo = amdgpu_create_bo(ws, size, alignment, domain, flags, heap);
1440       if (!bo)
1441          return NULL;
1442    }
1443 
1444    return &bo->base;
1445 }
1446 
1447 static struct pb_buffer *
amdgpu_buffer_create(struct radeon_winsys * ws,uint64_t size,unsigned alignment,enum radeon_bo_domain domain,enum radeon_bo_flag flags)1448 amdgpu_buffer_create(struct radeon_winsys *ws,
1449                      uint64_t size,
1450                      unsigned alignment,
1451                      enum radeon_bo_domain domain,
1452                      enum radeon_bo_flag flags)
1453 {
1454    struct pb_buffer * res = amdgpu_bo_create(amdgpu_winsys(ws), size, alignment, domain,
1455                            flags);
1456    return res;
1457 }
1458 
amdgpu_bo_from_handle(struct radeon_winsys * rws,struct winsys_handle * whandle,unsigned vm_alignment,bool is_prime_linear_buffer)1459 static struct pb_buffer *amdgpu_bo_from_handle(struct radeon_winsys *rws,
1460                                                struct winsys_handle *whandle,
1461                                                unsigned vm_alignment,
1462                                                bool is_prime_linear_buffer)
1463 {
1464    struct amdgpu_winsys *ws = amdgpu_winsys(rws);
1465    struct amdgpu_winsys_bo *bo = NULL;
1466    enum amdgpu_bo_handle_type type;
1467    struct amdgpu_bo_import_result result = {0};
1468    uint64_t va;
1469    amdgpu_va_handle va_handle = NULL;
1470    struct amdgpu_bo_info info = {0};
1471    enum radeon_bo_domain initial = 0;
1472    enum radeon_bo_flag flags = 0;
1473    int r;
1474 
1475    switch (whandle->type) {
1476    case WINSYS_HANDLE_TYPE_SHARED:
1477       type = amdgpu_bo_handle_type_gem_flink_name;
1478       break;
1479    case WINSYS_HANDLE_TYPE_FD:
1480       type = amdgpu_bo_handle_type_dma_buf_fd;
1481       break;
1482    default:
1483       return NULL;
1484    }
1485 
1486    r = amdgpu_bo_import(ws->dev, type, whandle->handle, &result);
1487    if (r)
1488       return NULL;
1489 
1490    simple_mtx_lock(&ws->bo_export_table_lock);
1491    bo = util_hash_table_get(ws->bo_export_table, result.buf_handle);
1492 
1493    /* If the amdgpu_winsys_bo instance already exists, bump the reference
1494     * counter and return it.
1495     */
1496    if (bo) {
1497       p_atomic_inc(&bo->base.reference.count);
1498       simple_mtx_unlock(&ws->bo_export_table_lock);
1499 
1500       /* Release the buffer handle, because we don't need it anymore.
1501        * This function is returning an existing buffer, which has its own
1502        * handle.
1503        */
1504       amdgpu_bo_free(result.buf_handle);
1505       return &bo->base;
1506    }
1507 
1508    /* Get initial domains. */
1509    r = amdgpu_bo_query_info(result.buf_handle, &info);
1510    if (r)
1511       goto error;
1512 
1513    r = amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general,
1514                              result.alloc_size,
1515                              amdgpu_get_optimal_alignment(ws, result.alloc_size,
1516                                                           vm_alignment),
1517                              0, &va, &va_handle, AMDGPU_VA_RANGE_HIGH);
1518    if (r)
1519       goto error;
1520 
1521    bo = CALLOC_STRUCT(amdgpu_winsys_bo);
1522    if (!bo)
1523       goto error;
1524 
1525    r = amdgpu_bo_va_op_raw(ws->dev, result.buf_handle, 0, result.alloc_size, va,
1526                            AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
1527                            AMDGPU_VM_PAGE_EXECUTABLE |
1528                            (is_prime_linear_buffer ? AMDGPU_VM_MTYPE_UC : 0),
1529                            AMDGPU_VA_OP_MAP);
1530    if (r)
1531       goto error;
1532 
1533    if (info.preferred_heap & AMDGPU_GEM_DOMAIN_VRAM)
1534       initial |= RADEON_DOMAIN_VRAM;
1535    if (info.preferred_heap & AMDGPU_GEM_DOMAIN_GTT)
1536       initial |= RADEON_DOMAIN_GTT;
1537    if (info.alloc_flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
1538       flags |= RADEON_FLAG_NO_CPU_ACCESS;
1539    if (info.alloc_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
1540       flags |= RADEON_FLAG_GTT_WC;
1541    if (info.alloc_flags & AMDGPU_GEM_CREATE_ENCRYPTED) {
1542       /* Imports are always possible even if the importer isn't using TMZ.
1543        * For instance libweston needs to import the buffer to be able to determine
1544        * if it can be used for scanout.
1545        */
1546       flags |= RADEON_FLAG_ENCRYPTED;
1547       *((bool*)&rws->uses_secure_bos) = true;
1548    }
1549 
1550    /* Initialize the structure. */
1551    simple_mtx_init(&bo->lock, mtx_plain);
1552    pipe_reference_init(&bo->base.reference, 1);
1553    bo->base.alignment_log2 = util_logbase2(info.phys_alignment);
1554    bo->bo = result.buf_handle;
1555    bo->base.size = result.alloc_size;
1556    bo->base.vtbl = &amdgpu_winsys_bo_vtbl;
1557    bo->va = va;
1558    bo->u.real.va_handle = va_handle;
1559    bo->base.placement = initial;
1560    bo->base.usage = flags;
1561    bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
1562    bo->u.real.is_shared = true;
1563 
1564    if (bo->base.placement & RADEON_DOMAIN_VRAM)
1565       ws->allocated_vram += align64(bo->base.size, ws->info.gart_page_size);
1566    else if (bo->base.placement & RADEON_DOMAIN_GTT)
1567       ws->allocated_gtt += align64(bo->base.size, ws->info.gart_page_size);
1568 
1569    amdgpu_bo_export(bo->bo, amdgpu_bo_handle_type_kms, &bo->u.real.kms_handle);
1570 
1571    amdgpu_add_buffer_to_global_list(ws, bo);
1572 
1573    _mesa_hash_table_insert(ws->bo_export_table, bo->bo, bo);
1574    simple_mtx_unlock(&ws->bo_export_table_lock);
1575 
1576    return &bo->base;
1577 
1578 error:
1579    simple_mtx_unlock(&ws->bo_export_table_lock);
1580    if (bo)
1581       FREE(bo);
1582    if (va_handle)
1583       amdgpu_va_range_free(va_handle);
1584    amdgpu_bo_free(result.buf_handle);
1585    return NULL;
1586 }
1587 
amdgpu_bo_get_handle(struct radeon_winsys * rws,struct pb_buffer * buffer,struct winsys_handle * whandle)1588 static bool amdgpu_bo_get_handle(struct radeon_winsys *rws,
1589                                  struct pb_buffer *buffer,
1590                                  struct winsys_handle *whandle)
1591 {
1592    struct amdgpu_screen_winsys *sws = amdgpu_screen_winsys(rws);
1593    struct amdgpu_winsys *ws = amdgpu_winsys(rws);
1594    struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(buffer);
1595    enum amdgpu_bo_handle_type type;
1596    struct hash_entry *entry;
1597    int r;
1598 
1599    /* Don't allow exports of slab entries and sparse buffers. */
1600    if (!bo->bo)
1601       return false;
1602 
1603    bo->u.real.use_reusable_pool = false;
1604 
1605    switch (whandle->type) {
1606    case WINSYS_HANDLE_TYPE_SHARED:
1607       type = amdgpu_bo_handle_type_gem_flink_name;
1608       break;
1609    case WINSYS_HANDLE_TYPE_KMS:
1610       if (sws->fd == ws->fd) {
1611          whandle->handle = bo->u.real.kms_handle;
1612 
1613          if (bo->u.real.is_shared)
1614             return true;
1615 
1616          goto hash_table_set;
1617       }
1618 
1619       simple_mtx_lock(&ws->sws_list_lock);
1620       entry = _mesa_hash_table_search(sws->kms_handles, bo);
1621       simple_mtx_unlock(&ws->sws_list_lock);
1622       if (entry) {
1623          whandle->handle = (uintptr_t)entry->data;
1624          return true;
1625       }
1626       FALLTHROUGH;
1627    case WINSYS_HANDLE_TYPE_FD:
1628       type = amdgpu_bo_handle_type_dma_buf_fd;
1629       break;
1630    default:
1631       return false;
1632    }
1633 
1634    r = amdgpu_bo_export(bo->bo, type, &whandle->handle);
1635    if (r)
1636       return false;
1637 
1638    if (whandle->type == WINSYS_HANDLE_TYPE_KMS) {
1639       int dma_fd = whandle->handle;
1640 
1641       r = drmPrimeFDToHandle(sws->fd, dma_fd, &whandle->handle);
1642       close(dma_fd);
1643 
1644       if (r)
1645          return false;
1646 
1647       simple_mtx_lock(&ws->sws_list_lock);
1648       _mesa_hash_table_insert_pre_hashed(sws->kms_handles,
1649                                          bo->u.real.kms_handle, bo,
1650                                          (void*)(uintptr_t)whandle->handle);
1651       simple_mtx_unlock(&ws->sws_list_lock);
1652    }
1653 
1654  hash_table_set:
1655    simple_mtx_lock(&ws->bo_export_table_lock);
1656    _mesa_hash_table_insert(ws->bo_export_table, bo->bo, bo);
1657    simple_mtx_unlock(&ws->bo_export_table_lock);
1658 
1659    bo->u.real.is_shared = true;
1660    return true;
1661 }
1662 
amdgpu_bo_from_ptr(struct radeon_winsys * rws,void * pointer,uint64_t size,enum radeon_bo_flag flags)1663 static struct pb_buffer *amdgpu_bo_from_ptr(struct radeon_winsys *rws,
1664 					    void *pointer, uint64_t size,
1665 					    enum radeon_bo_flag flags)
1666 {
1667     struct amdgpu_winsys *ws = amdgpu_winsys(rws);
1668     amdgpu_bo_handle buf_handle;
1669     struct amdgpu_winsys_bo *bo;
1670     uint64_t va;
1671     amdgpu_va_handle va_handle;
1672     /* Avoid failure when the size is not page aligned */
1673     uint64_t aligned_size = align64(size, ws->info.gart_page_size);
1674 
1675     bo = CALLOC_STRUCT(amdgpu_winsys_bo);
1676     if (!bo)
1677         return NULL;
1678 
1679     if (amdgpu_create_bo_from_user_mem(ws->dev, pointer,
1680                                        aligned_size, &buf_handle))
1681         goto error;
1682 
1683     if (amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general,
1684                               aligned_size,
1685                               amdgpu_get_optimal_alignment(ws, aligned_size,
1686                                                            ws->info.gart_page_size),
1687                               0, &va, &va_handle, AMDGPU_VA_RANGE_HIGH))
1688         goto error_va_alloc;
1689 
1690     if (amdgpu_bo_va_op(buf_handle, 0, aligned_size, va, 0, AMDGPU_VA_OP_MAP))
1691         goto error_va_map;
1692 
1693     /* Initialize it. */
1694     bo->u.real.is_user_ptr = true;
1695     pipe_reference_init(&bo->base.reference, 1);
1696     simple_mtx_init(&bo->lock, mtx_plain);
1697     bo->bo = buf_handle;
1698     bo->base.alignment_log2 = 0;
1699     bo->base.size = size;
1700     bo->base.vtbl = &amdgpu_winsys_bo_vtbl;
1701     bo->u.real.cpu_ptr = pointer;
1702     bo->va = va;
1703     bo->u.real.va_handle = va_handle;
1704     bo->base.placement = RADEON_DOMAIN_GTT;
1705     bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
1706 
1707     ws->allocated_gtt += aligned_size;
1708 
1709     amdgpu_add_buffer_to_global_list(ws, bo);
1710 
1711     amdgpu_bo_export(bo->bo, amdgpu_bo_handle_type_kms, &bo->u.real.kms_handle);
1712 
1713     return (struct pb_buffer*)bo;
1714 
1715 error_va_map:
1716     amdgpu_va_range_free(va_handle);
1717 
1718 error_va_alloc:
1719     amdgpu_bo_free(buf_handle);
1720 
1721 error:
1722     FREE(bo);
1723     return NULL;
1724 }
1725 
amdgpu_bo_is_user_ptr(struct pb_buffer * buf)1726 static bool amdgpu_bo_is_user_ptr(struct pb_buffer *buf)
1727 {
1728    struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
1729 
1730    return bo->bo ? bo->u.real.is_user_ptr : false;
1731 }
1732 
amdgpu_bo_is_suballocated(struct pb_buffer * buf)1733 static bool amdgpu_bo_is_suballocated(struct pb_buffer *buf)
1734 {
1735    struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
1736 
1737    return !bo->bo && !(bo->base.usage & RADEON_FLAG_SPARSE);
1738 }
1739 
amdgpu_bo_get_va(struct pb_buffer * buf)1740 static uint64_t amdgpu_bo_get_va(struct pb_buffer *buf)
1741 {
1742    return ((struct amdgpu_winsys_bo*)buf)->va;
1743 }
1744 
amdgpu_bo_init_functions(struct amdgpu_screen_winsys * ws)1745 void amdgpu_bo_init_functions(struct amdgpu_screen_winsys *ws)
1746 {
1747    ws->base.buffer_set_metadata = amdgpu_buffer_set_metadata;
1748    ws->base.buffer_get_metadata = amdgpu_buffer_get_metadata;
1749    ws->base.buffer_map = amdgpu_bo_map;
1750    ws->base.buffer_unmap = amdgpu_bo_unmap;
1751    ws->base.buffer_wait = amdgpu_bo_wait;
1752    ws->base.buffer_create = amdgpu_buffer_create;
1753    ws->base.buffer_from_handle = amdgpu_bo_from_handle;
1754    ws->base.buffer_from_ptr = amdgpu_bo_from_ptr;
1755    ws->base.buffer_is_user_ptr = amdgpu_bo_is_user_ptr;
1756    ws->base.buffer_is_suballocated = amdgpu_bo_is_suballocated;
1757    ws->base.buffer_get_handle = amdgpu_bo_get_handle;
1758    ws->base.buffer_commit = amdgpu_bo_sparse_commit;
1759    ws->base.buffer_get_virtual_address = amdgpu_bo_get_va;
1760    ws->base.buffer_get_initial_domain = amdgpu_bo_get_initial_domain;
1761    ws->base.buffer_get_flags = amdgpu_bo_get_flags;
1762 }
1763