• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2011 Marek Olšák <maraeo@gmail.com>
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining
6  * a copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
14  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
15  * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
16  * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
17  * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20  * USE OR OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * The above copyright notice and this permission notice (including the
23  * next paragraph) shall be included in all copies or substantial portions
24  * of the Software.
25  */
26 
27 #define _FILE_OFFSET_BITS 64
28 #include "radeon_drm_cs.h"
29 
30 #include "util/u_hash_table.h"
31 #include "util/u_memory.h"
32 #include "util/u_simple_list.h"
33 #include "util/u_double_list.h"
34 #include "os/os_thread.h"
35 #include "os/os_mman.h"
36 
37 #include "state_tracker/drm_driver.h"
38 
39 #include <sys/ioctl.h>
40 #include <xf86drm.h>
41 #include <errno.h>
42 
43 /*
44  * this are copy from radeon_drm, once an updated libdrm is released
45  * we should bump configure.ac requirement for it and remove the following
46  * field
47  */
48 #define RADEON_BO_FLAGS_MACRO_TILE  1
49 #define RADEON_BO_FLAGS_MICRO_TILE  2
50 #define RADEON_BO_FLAGS_MICRO_TILE_SQUARE 0x20
51 
52 #ifndef DRM_RADEON_GEM_WAIT
53 #define DRM_RADEON_GEM_WAIT     0x2b
54 
55 #define RADEON_GEM_NO_WAIT      0x1
56 #define RADEON_GEM_USAGE_READ   0x2
57 #define RADEON_GEM_USAGE_WRITE  0x4
58 
59 struct drm_radeon_gem_wait {
60     uint32_t    handle;
61     uint32_t    flags;  /* one of RADEON_GEM_* */
62 };
63 
64 #endif
65 
66 #ifndef RADEON_VA_MAP
67 
68 #define RADEON_VA_MAP               1
69 #define RADEON_VA_UNMAP             2
70 
71 #define RADEON_VA_RESULT_OK         0
72 #define RADEON_VA_RESULT_ERROR      1
73 #define RADEON_VA_RESULT_VA_EXIST   2
74 
75 #define RADEON_VM_PAGE_VALID        (1 << 0)
76 #define RADEON_VM_PAGE_READABLE     (1 << 1)
77 #define RADEON_VM_PAGE_WRITEABLE    (1 << 2)
78 #define RADEON_VM_PAGE_SYSTEM       (1 << 3)
79 #define RADEON_VM_PAGE_SNOOPED      (1 << 4)
80 
81 struct drm_radeon_gem_va {
82     uint32_t    handle;
83     uint32_t    operation;
84     uint32_t    vm_id;
85     uint32_t    flags;
86     uint64_t    offset;
87 };
88 
89 #define DRM_RADEON_GEM_VA   0x2b
90 #endif
91 
92 
93 
94 extern const struct pb_vtbl radeon_bo_vtbl;
95 
96 
radeon_bo(struct pb_buffer * bo)97 static INLINE struct radeon_bo *radeon_bo(struct pb_buffer *bo)
98 {
99     assert(bo->vtbl == &radeon_bo_vtbl);
100     return (struct radeon_bo *)bo;
101 }
102 
103 struct radeon_bo_va_hole {
104     struct list_head list;
105     uint64_t         offset;
106     uint64_t         size;
107 };
108 
109 struct radeon_bomgr {
110     /* Base class. */
111     struct pb_manager base;
112 
113     /* Winsys. */
114     struct radeon_drm_winsys *rws;
115 
116     /* List of buffer handles and its mutex. */
117     struct util_hash_table *bo_handles;
118     pipe_mutex bo_handles_mutex;
119     pipe_mutex bo_va_mutex;
120 
121     /* is virtual address supported */
122     bool va;
123     uint64_t va_offset;
124     struct list_head va_holes;
125 };
126 
radeon_bomgr(struct pb_manager * mgr)127 static INLINE struct radeon_bomgr *radeon_bomgr(struct pb_manager *mgr)
128 {
129     return (struct radeon_bomgr *)mgr;
130 }
131 
get_radeon_bo(struct pb_buffer * _buf)132 static struct radeon_bo *get_radeon_bo(struct pb_buffer *_buf)
133 {
134     struct radeon_bo *bo = NULL;
135 
136     if (_buf->vtbl == &radeon_bo_vtbl) {
137         bo = radeon_bo(_buf);
138     } else {
139         struct pb_buffer *base_buf;
140         pb_size offset;
141         pb_get_base_buffer(_buf, &base_buf, &offset);
142 
143         if (base_buf->vtbl == &radeon_bo_vtbl)
144             bo = radeon_bo(base_buf);
145     }
146 
147     return bo;
148 }
149 
radeon_bo_wait(struct pb_buffer * _buf,enum radeon_bo_usage usage)150 static void radeon_bo_wait(struct pb_buffer *_buf, enum radeon_bo_usage usage)
151 {
152     struct radeon_bo *bo = get_radeon_bo(_buf);
153 
154     while (p_atomic_read(&bo->num_active_ioctls)) {
155         sched_yield();
156     }
157 
158     /* XXX use this when it's ready */
159     /*if (bo->rws->info.drm_minor >= 12) {
160         struct drm_radeon_gem_wait args = {};
161         args.handle = bo->handle;
162         args.flags = usage;
163         while (drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_WAIT,
164                                    &args, sizeof(args)) == -EBUSY);
165     } else*/ {
166         struct drm_radeon_gem_wait_idle args;
167         memset(&args, 0, sizeof(args));
168         args.handle = bo->handle;
169         while (drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_WAIT_IDLE,
170                                    &args, sizeof(args)) == -EBUSY);
171     }
172 }
173 
radeon_bo_is_busy(struct pb_buffer * _buf,enum radeon_bo_usage usage)174 static boolean radeon_bo_is_busy(struct pb_buffer *_buf,
175                                  enum radeon_bo_usage usage)
176 {
177     struct radeon_bo *bo = get_radeon_bo(_buf);
178 
179     if (p_atomic_read(&bo->num_active_ioctls)) {
180         return TRUE;
181     }
182 
183     /* XXX use this when it's ready */
184     /*if (bo->rws->info.drm_minor >= 12) {
185         struct drm_radeon_gem_wait args = {};
186         args.handle = bo->handle;
187         args.flags = usage | RADEON_GEM_NO_WAIT;
188         return drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_WAIT,
189                                    &args, sizeof(args)) != 0;
190     } else*/ {
191         struct drm_radeon_gem_busy args;
192         memset(&args, 0, sizeof(args));
193         args.handle = bo->handle;
194         return drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_BUSY,
195                                    &args, sizeof(args)) != 0;
196     }
197 }
198 
radeon_bomgr_find_va(struct radeon_bomgr * mgr,uint64_t size,uint64_t alignment)199 static uint64_t radeon_bomgr_find_va(struct radeon_bomgr *mgr, uint64_t size, uint64_t alignment)
200 {
201     struct radeon_bo_va_hole *hole, *n;
202     uint64_t offset = 0, waste = 0;
203 
204     pipe_mutex_lock(mgr->bo_va_mutex);
205     /* first look for a hole */
206     LIST_FOR_EACH_ENTRY_SAFE(hole, n, &mgr->va_holes, list) {
207         offset = hole->offset;
208         waste = 0;
209         if (alignment) {
210             waste = offset % alignment;
211             waste = waste ? alignment - waste : 0;
212         }
213         offset += waste;
214         if (offset >= (hole->offset + hole->size)) {
215             continue;
216         }
217         if (!waste && hole->size == size) {
218             offset = hole->offset;
219             list_del(&hole->list);
220             FREE(hole);
221             pipe_mutex_unlock(mgr->bo_va_mutex);
222             return offset;
223         }
224         if ((hole->size - waste) > size) {
225             if (waste) {
226                 n = CALLOC_STRUCT(radeon_bo_va_hole);
227                 n->size = waste;
228                 n->offset = hole->offset;
229                 list_add(&n->list, &hole->list);
230             }
231             hole->size -= (size + waste);
232             hole->offset += size + waste;
233             pipe_mutex_unlock(mgr->bo_va_mutex);
234             return offset;
235         }
236         if ((hole->size - waste) == size) {
237             hole->size = waste;
238             pipe_mutex_unlock(mgr->bo_va_mutex);
239             return offset;
240         }
241     }
242 
243     offset = mgr->va_offset;
244     waste = 0;
245     if (alignment) {
246         waste = offset % alignment;
247         waste = waste ? alignment - waste : 0;
248     }
249     if (waste) {
250         n = CALLOC_STRUCT(radeon_bo_va_hole);
251         n->size = waste;
252         n->offset = offset;
253         list_add(&n->list, &mgr->va_holes);
254     }
255     offset += waste;
256     mgr->va_offset += size + waste;
257     pipe_mutex_unlock(mgr->bo_va_mutex);
258     return offset;
259 }
260 
radeon_bomgr_force_va(struct radeon_bomgr * mgr,uint64_t va,uint64_t size)261 static void radeon_bomgr_force_va(struct radeon_bomgr *mgr, uint64_t va, uint64_t size)
262 {
263     pipe_mutex_lock(mgr->bo_va_mutex);
264     if (va >= mgr->va_offset) {
265         if (va > mgr->va_offset) {
266             struct radeon_bo_va_hole *hole;
267             hole = CALLOC_STRUCT(radeon_bo_va_hole);
268             if (hole) {
269                 hole->size = va - mgr->va_offset;
270                 hole->offset = mgr->va_offset;
271                 list_add(&hole->list, &mgr->va_holes);
272             }
273         }
274         mgr->va_offset = va + size;
275     } else {
276         struct radeon_bo_va_hole *hole, *n;
277         uint64_t hole_end, va_end;
278 
279         /* Prune/free all holes that fall into the range
280          */
281         LIST_FOR_EACH_ENTRY_SAFE(hole, n, &mgr->va_holes, list) {
282             hole_end = hole->offset + hole->size;
283             va_end = va + size;
284             if (hole->offset >= va_end || hole_end <= va)
285                 continue;
286             if (hole->offset >= va && hole_end <= va_end) {
287                 list_del(&hole->list);
288                 FREE(hole);
289                 continue;
290             }
291             if (hole->offset >= va)
292                 hole->offset = va_end;
293             else
294                 hole_end = va;
295             hole->size = hole_end - hole->offset;
296         }
297     }
298     pipe_mutex_unlock(mgr->bo_va_mutex);
299 }
300 
radeon_bomgr_free_va(struct radeon_bomgr * mgr,uint64_t va,uint64_t size)301 static void radeon_bomgr_free_va(struct radeon_bomgr *mgr, uint64_t va, uint64_t size)
302 {
303     struct radeon_bo_va_hole *hole;
304 
305     pipe_mutex_lock(mgr->bo_va_mutex);
306     if ((va + size) == mgr->va_offset) {
307         mgr->va_offset = va;
308         /* Delete uppermost hole if it reaches the new top */
309         if (!LIST_IS_EMPTY(&mgr->va_holes)) {
310             hole = container_of(mgr->va_holes.next, hole, list);
311             if ((hole->offset + hole->size) == va) {
312                 mgr->va_offset = hole->offset;
313                 list_del(&hole->list);
314                 FREE(hole);
315             }
316         }
317     } else {
318         struct radeon_bo_va_hole *next;
319 
320         hole = container_of(&mgr->va_holes, hole, list);
321         LIST_FOR_EACH_ENTRY(next, &mgr->va_holes, list) {
322 	    if (next->offset < va)
323 	        break;
324             hole = next;
325         }
326 
327         if (&hole->list != &mgr->va_holes) {
328             /* Grow upper hole if it's adjacent */
329             if (hole->offset == (va + size)) {
330                 hole->offset = va;
331                 hole->size += size;
332                 /* Merge lower hole if it's adjacent */
333                 if (next != hole && &next->list != &mgr->va_holes &&
334                     (next->offset + next->size) == va) {
335                     next->size += hole->size;
336                     list_del(&hole->list);
337                     FREE(hole);
338                 }
339                 goto out;
340             }
341         }
342 
343         /* Grow lower hole if it's adjacent */
344         if (next != hole && &next->list != &mgr->va_holes &&
345             (next->offset + next->size) == va) {
346             next->size += size;
347             goto out;
348         }
349 
350         /* FIXME on allocation failure we just lose virtual address space
351          * maybe print a warning
352          */
353         next = CALLOC_STRUCT(radeon_bo_va_hole);
354         if (next) {
355             next->size = size;
356             next->offset = va;
357             list_add(&next->list, &hole->list);
358         }
359     }
360 out:
361     pipe_mutex_unlock(mgr->bo_va_mutex);
362 }
363 
radeon_bo_destroy(struct pb_buffer * _buf)364 static void radeon_bo_destroy(struct pb_buffer *_buf)
365 {
366     struct radeon_bo *bo = radeon_bo(_buf);
367     struct radeon_bomgr *mgr = bo->mgr;
368     struct drm_gem_close args;
369 
370     memset(&args, 0, sizeof(args));
371 
372     if (bo->name) {
373         pipe_mutex_lock(bo->mgr->bo_handles_mutex);
374         util_hash_table_remove(bo->mgr->bo_handles,
375                                (void*)(uintptr_t)bo->name);
376         pipe_mutex_unlock(bo->mgr->bo_handles_mutex);
377     }
378 
379     if (bo->ptr)
380         os_munmap(bo->ptr, bo->base.size);
381 
382     /* Close object. */
383     args.handle = bo->handle;
384     drmIoctl(bo->rws->fd, DRM_IOCTL_GEM_CLOSE, &args);
385 
386     if (mgr->va) {
387         radeon_bomgr_free_va(mgr, bo->va, bo->va_size);
388     }
389 
390     pipe_mutex_destroy(bo->map_mutex);
391     FREE(bo);
392 }
393 
radeon_bo_map(struct radeon_winsys_cs_handle * buf,struct radeon_winsys_cs * rcs,enum pipe_transfer_usage usage)394 static void *radeon_bo_map(struct radeon_winsys_cs_handle *buf,
395                            struct radeon_winsys_cs *rcs,
396                            enum pipe_transfer_usage usage)
397 {
398     struct radeon_bo *bo = (struct radeon_bo*)buf;
399     struct radeon_drm_cs *cs = (struct radeon_drm_cs*)rcs;
400     struct drm_radeon_gem_mmap args = {0};
401     void *ptr;
402 
403     /* If it's not unsynchronized bo_map, flush CS if needed and then wait. */
404     if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
405         /* DONTBLOCK doesn't make sense with UNSYNCHRONIZED. */
406         if (usage & PIPE_TRANSFER_DONTBLOCK) {
407             if (!(usage & PIPE_TRANSFER_WRITE)) {
408                 /* Mapping for read.
409                  *
410                  * Since we are mapping for read, we don't need to wait
411                  * if the GPU is using the buffer for read too
412                  * (neither one is changing it).
413                  *
414                  * Only check whether the buffer is being used for write. */
415                 if (radeon_bo_is_referenced_by_cs_for_write(cs, bo)) {
416                     cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC);
417                     return NULL;
418                 }
419 
420                 if (radeon_bo_is_busy((struct pb_buffer*)bo,
421                                       RADEON_USAGE_WRITE)) {
422                     return NULL;
423                 }
424             } else {
425                 if (radeon_bo_is_referenced_by_cs(cs, bo)) {
426                     cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC);
427                     return NULL;
428                 }
429 
430                 if (radeon_bo_is_busy((struct pb_buffer*)bo,
431                                       RADEON_USAGE_READWRITE)) {
432                     return NULL;
433                 }
434             }
435         } else {
436             if (!(usage & PIPE_TRANSFER_WRITE)) {
437                 /* Mapping for read.
438                  *
439                  * Since we are mapping for read, we don't need to wait
440                  * if the GPU is using the buffer for read too
441                  * (neither one is changing it).
442                  *
443                  * Only check whether the buffer is being used for write. */
444                 if (radeon_bo_is_referenced_by_cs_for_write(cs, bo)) {
445                     cs->flush_cs(cs->flush_data, 0);
446                 }
447                 radeon_bo_wait((struct pb_buffer*)bo,
448                                RADEON_USAGE_WRITE);
449             } else {
450                 /* Mapping for write. */
451                 if (radeon_bo_is_referenced_by_cs(cs, bo)) {
452                     cs->flush_cs(cs->flush_data, 0);
453                 } else {
454                     /* Try to avoid busy-waiting in radeon_bo_wait. */
455                     if (p_atomic_read(&bo->num_active_ioctls))
456                         radeon_drm_cs_sync_flush(cs);
457                 }
458 
459                 radeon_bo_wait((struct pb_buffer*)bo, RADEON_USAGE_READWRITE);
460             }
461         }
462     }
463 
464     /* Return the pointer if it's already mapped. */
465     if (bo->ptr)
466         return bo->ptr;
467 
468     /* Map the buffer. */
469     pipe_mutex_lock(bo->map_mutex);
470     /* Return the pointer if it's already mapped (in case of a race). */
471     if (bo->ptr) {
472         pipe_mutex_unlock(bo->map_mutex);
473         return bo->ptr;
474     }
475     args.handle = bo->handle;
476     args.offset = 0;
477     args.size = (uint64_t)bo->base.size;
478     if (drmCommandWriteRead(bo->rws->fd,
479                             DRM_RADEON_GEM_MMAP,
480                             &args,
481                             sizeof(args))) {
482         pipe_mutex_unlock(bo->map_mutex);
483         fprintf(stderr, "radeon: gem_mmap failed: %p 0x%08X\n",
484                 bo, bo->handle);
485         return NULL;
486     }
487 
488     ptr = os_mmap(0, args.size, PROT_READ|PROT_WRITE, MAP_SHARED,
489                bo->rws->fd, args.addr_ptr);
490     if (ptr == MAP_FAILED) {
491         pipe_mutex_unlock(bo->map_mutex);
492         fprintf(stderr, "radeon: mmap failed, errno: %i\n", errno);
493         return NULL;
494     }
495     bo->ptr = ptr;
496     pipe_mutex_unlock(bo->map_mutex);
497 
498     return bo->ptr;
499 }
500 
radeon_bo_unmap(struct radeon_winsys_cs_handle * _buf)501 static void radeon_bo_unmap(struct radeon_winsys_cs_handle *_buf)
502 {
503     /* NOP */
504 }
505 
radeon_bo_get_base_buffer(struct pb_buffer * buf,struct pb_buffer ** base_buf,unsigned * offset)506 static void radeon_bo_get_base_buffer(struct pb_buffer *buf,
507                                       struct pb_buffer **base_buf,
508                                       unsigned *offset)
509 {
510     *base_buf = buf;
511     *offset = 0;
512 }
513 
radeon_bo_validate(struct pb_buffer * _buf,struct pb_validate * vl,unsigned flags)514 static enum pipe_error radeon_bo_validate(struct pb_buffer *_buf,
515                                           struct pb_validate *vl,
516                                           unsigned flags)
517 {
518     /* Always pinned */
519     return PIPE_OK;
520 }
521 
radeon_bo_fence(struct pb_buffer * buf,struct pipe_fence_handle * fence)522 static void radeon_bo_fence(struct pb_buffer *buf,
523                             struct pipe_fence_handle *fence)
524 {
525 }
526 
527 const struct pb_vtbl radeon_bo_vtbl = {
528     radeon_bo_destroy,
529     NULL, /* never called */
530     NULL, /* never called */
531     radeon_bo_validate,
532     radeon_bo_fence,
533     radeon_bo_get_base_buffer,
534 };
535 
radeon_bomgr_create_bo(struct pb_manager * _mgr,pb_size size,const struct pb_desc * desc)536 static struct pb_buffer *radeon_bomgr_create_bo(struct pb_manager *_mgr,
537                                                 pb_size size,
538                                                 const struct pb_desc *desc)
539 {
540     struct radeon_bomgr *mgr = radeon_bomgr(_mgr);
541     struct radeon_drm_winsys *rws = mgr->rws;
542     struct radeon_bo *bo;
543     struct drm_radeon_gem_create args;
544     struct radeon_bo_desc *rdesc = (struct radeon_bo_desc*)desc;
545     int r;
546 
547     memset(&args, 0, sizeof(args));
548 
549     assert(rdesc->initial_domains);
550     assert((rdesc->initial_domains &
551             ~(RADEON_GEM_DOMAIN_GTT | RADEON_GEM_DOMAIN_VRAM)) == 0);
552 
553     args.size = size;
554     args.alignment = desc->alignment;
555     args.initial_domain = rdesc->initial_domains;
556 
557     if (drmCommandWriteRead(rws->fd, DRM_RADEON_GEM_CREATE,
558                             &args, sizeof(args))) {
559         fprintf(stderr, "radeon: Failed to allocate a buffer:\n");
560         fprintf(stderr, "radeon:    size      : %d bytes\n", size);
561         fprintf(stderr, "radeon:    alignment : %d bytes\n", desc->alignment);
562         fprintf(stderr, "radeon:    domains   : %d\n", args.initial_domain);
563         return NULL;
564     }
565 
566     bo = CALLOC_STRUCT(radeon_bo);
567     if (!bo)
568         return NULL;
569 
570     pipe_reference_init(&bo->base.reference, 1);
571     bo->base.alignment = desc->alignment;
572     bo->base.usage = desc->usage;
573     bo->base.size = size;
574     bo->base.vtbl = &radeon_bo_vtbl;
575     bo->mgr = mgr;
576     bo->rws = mgr->rws;
577     bo->handle = args.handle;
578     bo->va = 0;
579     pipe_mutex_init(bo->map_mutex);
580 
581     if (mgr->va) {
582         struct drm_radeon_gem_va va;
583 
584         bo->va_size = align(size,  4096);
585         bo->va = radeon_bomgr_find_va(mgr, bo->va_size, desc->alignment);
586 
587         va.handle = bo->handle;
588         va.vm_id = 0;
589         va.operation = RADEON_VA_MAP;
590         va.flags = RADEON_VM_PAGE_READABLE |
591                    RADEON_VM_PAGE_WRITEABLE |
592                    RADEON_VM_PAGE_SNOOPED;
593         va.offset = bo->va;
594         r = drmCommandWriteRead(rws->fd, DRM_RADEON_GEM_VA, &va, sizeof(va));
595         if (r && va.operation == RADEON_VA_RESULT_ERROR) {
596             fprintf(stderr, "radeon: Failed to allocate a buffer:\n");
597             fprintf(stderr, "radeon:    size      : %d bytes\n", size);
598             fprintf(stderr, "radeon:    alignment : %d bytes\n", desc->alignment);
599             fprintf(stderr, "radeon:    domains   : %d\n", args.initial_domain);
600             radeon_bo_destroy(&bo->base);
601             return NULL;
602         }
603         if (va.operation == RADEON_VA_RESULT_VA_EXIST) {
604             radeon_bomgr_free_va(mgr, bo->va, bo->va_size);
605             bo->va = va.offset;
606             radeon_bomgr_force_va(mgr, bo->va, bo->va_size);
607         }
608     }
609 
610     return &bo->base;
611 }
612 
radeon_bomgr_flush(struct pb_manager * mgr)613 static void radeon_bomgr_flush(struct pb_manager *mgr)
614 {
615     /* NOP */
616 }
617 
618 /* This is for the cache bufmgr. */
radeon_bomgr_is_buffer_busy(struct pb_manager * _mgr,struct pb_buffer * _buf)619 static boolean radeon_bomgr_is_buffer_busy(struct pb_manager *_mgr,
620                                            struct pb_buffer *_buf)
621 {
622    struct radeon_bo *bo = radeon_bo(_buf);
623 
624    if (radeon_bo_is_referenced_by_any_cs(bo)) {
625        return TRUE;
626    }
627 
628    if (radeon_bo_is_busy((struct pb_buffer*)bo, RADEON_USAGE_READWRITE)) {
629        return TRUE;
630    }
631 
632    return FALSE;
633 }
634 
radeon_bomgr_destroy(struct pb_manager * _mgr)635 static void radeon_bomgr_destroy(struct pb_manager *_mgr)
636 {
637     struct radeon_bomgr *mgr = radeon_bomgr(_mgr);
638     util_hash_table_destroy(mgr->bo_handles);
639     pipe_mutex_destroy(mgr->bo_handles_mutex);
640     pipe_mutex_destroy(mgr->bo_va_mutex);
641     FREE(mgr);
642 }
643 
644 #define PTR_TO_UINT(x) ((unsigned)((intptr_t)(x)))
645 
handle_hash(void * key)646 static unsigned handle_hash(void *key)
647 {
648     return PTR_TO_UINT(key);
649 }
650 
handle_compare(void * key1,void * key2)651 static int handle_compare(void *key1, void *key2)
652 {
653     return PTR_TO_UINT(key1) != PTR_TO_UINT(key2);
654 }
655 
radeon_bomgr_create(struct radeon_drm_winsys * rws)656 struct pb_manager *radeon_bomgr_create(struct radeon_drm_winsys *rws)
657 {
658     struct radeon_bomgr *mgr;
659 
660     mgr = CALLOC_STRUCT(radeon_bomgr);
661     if (!mgr)
662         return NULL;
663 
664     mgr->base.destroy = radeon_bomgr_destroy;
665     mgr->base.create_buffer = radeon_bomgr_create_bo;
666     mgr->base.flush = radeon_bomgr_flush;
667     mgr->base.is_buffer_busy = radeon_bomgr_is_buffer_busy;
668 
669     mgr->rws = rws;
670     mgr->bo_handles = util_hash_table_create(handle_hash, handle_compare);
671     pipe_mutex_init(mgr->bo_handles_mutex);
672     pipe_mutex_init(mgr->bo_va_mutex);
673 
674     mgr->va = rws->info.r600_virtual_address;
675     mgr->va_offset = rws->info.r600_va_start;
676     list_inithead(&mgr->va_holes);
677 
678     return &mgr->base;
679 }
680 
eg_tile_split(unsigned tile_split)681 static unsigned eg_tile_split(unsigned tile_split)
682 {
683     switch (tile_split) {
684     case 0:     tile_split = 64;    break;
685     case 1:     tile_split = 128;   break;
686     case 2:     tile_split = 256;   break;
687     case 3:     tile_split = 512;   break;
688     default:
689     case 4:     tile_split = 1024;  break;
690     case 5:     tile_split = 2048;  break;
691     case 6:     tile_split = 4096;  break;
692     }
693     return tile_split;
694 }
695 
eg_tile_split_rev(unsigned eg_tile_split)696 static unsigned eg_tile_split_rev(unsigned eg_tile_split)
697 {
698     switch (eg_tile_split) {
699     case 64:    return 0;
700     case 128:   return 1;
701     case 256:   return 2;
702     case 512:   return 3;
703     default:
704     case 1024:  return 4;
705     case 2048:  return 5;
706     case 4096:  return 6;
707     }
708 }
709 
radeon_bo_get_tiling(struct pb_buffer * _buf,enum radeon_bo_layout * microtiled,enum radeon_bo_layout * macrotiled,unsigned * bankw,unsigned * bankh,unsigned * tile_split,unsigned * stencil_tile_split,unsigned * mtilea)710 static void radeon_bo_get_tiling(struct pb_buffer *_buf,
711                                  enum radeon_bo_layout *microtiled,
712                                  enum radeon_bo_layout *macrotiled,
713                                  unsigned *bankw, unsigned *bankh,
714                                  unsigned *tile_split,
715                                  unsigned *stencil_tile_split,
716                                  unsigned *mtilea)
717 {
718     struct radeon_bo *bo = get_radeon_bo(_buf);
719     struct drm_radeon_gem_set_tiling args;
720 
721     memset(&args, 0, sizeof(args));
722 
723     args.handle = bo->handle;
724 
725     drmCommandWriteRead(bo->rws->fd,
726                         DRM_RADEON_GEM_GET_TILING,
727                         &args,
728                         sizeof(args));
729 
730     *microtiled = RADEON_LAYOUT_LINEAR;
731     *macrotiled = RADEON_LAYOUT_LINEAR;
732     if (args.tiling_flags & RADEON_BO_FLAGS_MICRO_TILE)
733         *microtiled = RADEON_LAYOUT_TILED;
734 
735     if (args.tiling_flags & RADEON_BO_FLAGS_MACRO_TILE)
736         *macrotiled = RADEON_LAYOUT_TILED;
737     if (bankw && tile_split && stencil_tile_split && mtilea && tile_split) {
738         *bankw = (args.tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
739         *bankh = (args.tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
740         *tile_split = (args.tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
741         *stencil_tile_split = (args.tiling_flags >> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK;
742         *mtilea = (args.tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
743         *tile_split = eg_tile_split(*tile_split);
744     }
745 }
746 
radeon_bo_set_tiling(struct pb_buffer * _buf,struct radeon_winsys_cs * rcs,enum radeon_bo_layout microtiled,enum radeon_bo_layout macrotiled,unsigned bankw,unsigned bankh,unsigned tile_split,unsigned stencil_tile_split,unsigned mtilea,uint32_t pitch)747 static void radeon_bo_set_tiling(struct pb_buffer *_buf,
748                                  struct radeon_winsys_cs *rcs,
749                                  enum radeon_bo_layout microtiled,
750                                  enum radeon_bo_layout macrotiled,
751                                  unsigned bankw, unsigned bankh,
752                                  unsigned tile_split,
753                                  unsigned stencil_tile_split,
754                                  unsigned mtilea,
755                                  uint32_t pitch)
756 {
757     struct radeon_bo *bo = get_radeon_bo(_buf);
758     struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
759     struct drm_radeon_gem_set_tiling args;
760 
761     memset(&args, 0, sizeof(args));
762 
763     /* Tiling determines how DRM treats the buffer data.
764      * We must flush CS when changing it if the buffer is referenced. */
765     if (cs && radeon_bo_is_referenced_by_cs(cs, bo)) {
766         cs->flush_cs(cs->flush_data, 0);
767     }
768 
769     while (p_atomic_read(&bo->num_active_ioctls)) {
770         sched_yield();
771     }
772 
773     if (microtiled == RADEON_LAYOUT_TILED)
774         args.tiling_flags |= RADEON_BO_FLAGS_MICRO_TILE;
775     else if (microtiled == RADEON_LAYOUT_SQUARETILED)
776         args.tiling_flags |= RADEON_BO_FLAGS_MICRO_TILE_SQUARE;
777 
778     if (macrotiled == RADEON_LAYOUT_TILED)
779         args.tiling_flags |= RADEON_BO_FLAGS_MACRO_TILE;
780 
781     args.tiling_flags |= (bankw & RADEON_TILING_EG_BANKW_MASK) <<
782         RADEON_TILING_EG_BANKW_SHIFT;
783     args.tiling_flags |= (bankh & RADEON_TILING_EG_BANKH_MASK) <<
784         RADEON_TILING_EG_BANKH_SHIFT;
785     if (tile_split) {
786 	args.tiling_flags |= (eg_tile_split_rev(tile_split) &
787 			      RADEON_TILING_EG_TILE_SPLIT_MASK) <<
788 	    RADEON_TILING_EG_TILE_SPLIT_SHIFT;
789     }
790     args.tiling_flags |= (stencil_tile_split &
791 			  RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK) <<
792         RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT;
793     args.tiling_flags |= (mtilea & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK) <<
794         RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT;
795 
796     args.handle = bo->handle;
797     args.pitch = pitch;
798 
799     drmCommandWriteRead(bo->rws->fd,
800                         DRM_RADEON_GEM_SET_TILING,
801                         &args,
802                         sizeof(args));
803 }
804 
radeon_drm_get_cs_handle(struct pb_buffer * _buf)805 static struct radeon_winsys_cs_handle *radeon_drm_get_cs_handle(
806         struct pb_buffer *_buf)
807 {
808     /* return radeon_bo. */
809     return (struct radeon_winsys_cs_handle*)get_radeon_bo(_buf);
810 }
811 
812 static struct pb_buffer *
radeon_winsys_bo_create(struct radeon_winsys * rws,unsigned size,unsigned alignment,unsigned bind,enum radeon_bo_domain domain)813 radeon_winsys_bo_create(struct radeon_winsys *rws,
814                         unsigned size,
815                         unsigned alignment,
816                         unsigned bind,
817                         enum radeon_bo_domain domain)
818 {
819     struct radeon_drm_winsys *ws = radeon_drm_winsys(rws);
820     struct radeon_bo_desc desc;
821     struct pb_manager *provider;
822     struct pb_buffer *buffer;
823 
824     memset(&desc, 0, sizeof(desc));
825     desc.base.alignment = alignment;
826 
827     /* Additional criteria for the cache manager. */
828     desc.base.usage = domain;
829     desc.initial_domains = domain;
830 
831     /* Assign a buffer manager. */
832     if (bind & (PIPE_BIND_VERTEX_BUFFER | PIPE_BIND_INDEX_BUFFER |
833                 PIPE_BIND_CONSTANT_BUFFER | PIPE_BIND_CUSTOM))
834         provider = ws->cman;
835     else
836         provider = ws->kman;
837 
838     buffer = provider->create_buffer(provider, size, &desc.base);
839     if (!buffer)
840         return NULL;
841 
842     return (struct pb_buffer*)buffer;
843 }
844 
radeon_winsys_bo_from_handle(struct radeon_winsys * rws,struct winsys_handle * whandle,unsigned * stride)845 static struct pb_buffer *radeon_winsys_bo_from_handle(struct radeon_winsys *rws,
846                                                       struct winsys_handle *whandle,
847                                                       unsigned *stride)
848 {
849     struct radeon_drm_winsys *ws = radeon_drm_winsys(rws);
850     struct radeon_bo *bo;
851     struct radeon_bomgr *mgr = radeon_bomgr(ws->kman);
852     struct drm_gem_open open_arg = {};
853     int r;
854 
855     memset(&open_arg, 0, sizeof(open_arg));
856 
857     /* We must maintain a list of pairs <handle, bo>, so that we always return
858      * the same BO for one particular handle. If we didn't do that and created
859      * more than one BO for the same handle and then relocated them in a CS,
860      * we would hit a deadlock in the kernel.
861      *
862      * The list of pairs is guarded by a mutex, of course. */
863     pipe_mutex_lock(mgr->bo_handles_mutex);
864 
865     /* First check if there already is an existing bo for the handle. */
866     bo = util_hash_table_get(mgr->bo_handles, (void*)(uintptr_t)whandle->handle);
867     if (bo) {
868         /* Increase the refcount. */
869         struct pb_buffer *b = NULL;
870         pb_reference(&b, &bo->base);
871         goto done;
872     }
873 
874     /* There isn't, create a new one. */
875     bo = CALLOC_STRUCT(radeon_bo);
876     if (!bo) {
877         goto fail;
878     }
879 
880     /* Open the BO. */
881     open_arg.name = whandle->handle;
882     if (drmIoctl(ws->fd, DRM_IOCTL_GEM_OPEN, &open_arg)) {
883         FREE(bo);
884         goto fail;
885     }
886     bo->handle = open_arg.handle;
887     bo->name = whandle->handle;
888 
889     /* Initialize it. */
890     pipe_reference_init(&bo->base.reference, 1);
891     bo->base.alignment = 0;
892     bo->base.usage = PB_USAGE_GPU_WRITE | PB_USAGE_GPU_READ;
893     bo->base.size = open_arg.size;
894     bo->base.vtbl = &radeon_bo_vtbl;
895     bo->mgr = mgr;
896     bo->rws = mgr->rws;
897     bo->va = 0;
898     pipe_mutex_init(bo->map_mutex);
899 
900     util_hash_table_set(mgr->bo_handles, (void*)(uintptr_t)whandle->handle, bo);
901 
902 done:
903     pipe_mutex_unlock(mgr->bo_handles_mutex);
904 
905     if (stride)
906         *stride = whandle->stride;
907 
908     if (mgr->va && !bo->va) {
909         struct drm_radeon_gem_va va;
910 
911         bo->va_size = ((bo->base.size + 4095) & ~4095);
912         bo->va = radeon_bomgr_find_va(mgr, bo->va_size, 1 << 20);
913 
914         va.handle = bo->handle;
915         va.operation = RADEON_VA_MAP;
916         va.vm_id = 0;
917         va.offset = bo->va;
918         va.flags = RADEON_VM_PAGE_READABLE |
919                    RADEON_VM_PAGE_WRITEABLE |
920                    RADEON_VM_PAGE_SNOOPED;
921         va.offset = bo->va;
922         r = drmCommandWriteRead(ws->fd, DRM_RADEON_GEM_VA, &va, sizeof(va));
923         if (r && va.operation == RADEON_VA_RESULT_ERROR) {
924             fprintf(stderr, "radeon: Failed to assign virtual address space\n");
925             radeon_bo_destroy(&bo->base);
926             return NULL;
927         }
928         if (va.operation == RADEON_VA_RESULT_VA_EXIST) {
929             radeon_bomgr_free_va(mgr, bo->va, bo->va_size);
930             bo->va = va.offset;
931             radeon_bomgr_force_va(mgr, bo->va, bo->va_size);
932         }
933     }
934 
935     return (struct pb_buffer*)bo;
936 
937 fail:
938     pipe_mutex_unlock(mgr->bo_handles_mutex);
939     return NULL;
940 }
941 
radeon_winsys_bo_get_handle(struct pb_buffer * buffer,unsigned stride,struct winsys_handle * whandle)942 static boolean radeon_winsys_bo_get_handle(struct pb_buffer *buffer,
943                                            unsigned stride,
944                                            struct winsys_handle *whandle)
945 {
946     struct drm_gem_flink flink;
947     struct radeon_bo *bo = get_radeon_bo(buffer);
948 
949     memset(&flink, 0, sizeof(flink));
950 
951     if (whandle->type == DRM_API_HANDLE_TYPE_SHARED) {
952         if (!bo->flinked) {
953             flink.handle = bo->handle;
954 
955             if (ioctl(bo->rws->fd, DRM_IOCTL_GEM_FLINK, &flink)) {
956                 return FALSE;
957             }
958 
959             bo->flinked = TRUE;
960             bo->flink = flink.name;
961         }
962         whandle->handle = bo->flink;
963     } else if (whandle->type == DRM_API_HANDLE_TYPE_KMS) {
964         whandle->handle = bo->handle;
965     }
966 
967     whandle->stride = stride;
968     return TRUE;
969 }
970 
radeon_winsys_bo_va(struct radeon_winsys_cs_handle * buf)971 static uint64_t radeon_winsys_bo_va(struct radeon_winsys_cs_handle *buf)
972 {
973     return ((struct radeon_bo*)buf)->va;
974 }
975 
radeon_bomgr_init_functions(struct radeon_drm_winsys * ws)976 void radeon_bomgr_init_functions(struct radeon_drm_winsys *ws)
977 {
978     ws->base.buffer_get_cs_handle = radeon_drm_get_cs_handle;
979     ws->base.buffer_set_tiling = radeon_bo_set_tiling;
980     ws->base.buffer_get_tiling = radeon_bo_get_tiling;
981     ws->base.buffer_map = radeon_bo_map;
982     ws->base.buffer_unmap = radeon_bo_unmap;
983     ws->base.buffer_wait = radeon_bo_wait;
984     ws->base.buffer_is_busy = radeon_bo_is_busy;
985     ws->base.buffer_create = radeon_winsys_bo_create;
986     ws->base.buffer_from_handle = radeon_winsys_bo_from_handle;
987     ws->base.buffer_get_handle = radeon_winsys_bo_get_handle;
988     ws->base.buffer_get_virtual_address = radeon_winsys_bo_va;
989 }
990