• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2008 Jérôme Glisse
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining
6  * a copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
14  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
15  * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
16  * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
17  * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20  * USE OR OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * The above copyright notice and this permission notice (including the
23  * next paragraph) shall be included in all copies or substantial portions
24  * of the Software.
25  */
26 /*
27  * Authors:
28  *      Aapo Tahkola <aet@rasterburn.org>
29  *      Nicolai Haehnle <prefect_@gmx.net>
30  *      Jérôme Glisse <glisse@freedesktop.org>
31  */
32 #include <assert.h>
33 #include <errno.h>
34 #include <stdlib.h>
35 #include <string.h>
36 #include <pthread.h>
37 #include <sys/ioctl.h>
38 #include "radeon_cs.h"
39 #include "radeon_cs_int.h"
40 #include "radeon_bo_int.h"
41 #include "radeon_cs_gem.h"
42 #include "radeon_bo_gem.h"
43 #include "drm.h"
44 #include "libdrm_macros.h"
45 #include "xf86drm.h"
46 #include "xf86atomic.h"
47 #include "radeon_drm.h"
48 
49 /* Add LIBDRM_RADEON_BOF_FILES to libdrm_radeon_la_SOURCES when building with BOF_DUMP */
50 #define CS_BOF_DUMP 0
51 #if CS_BOF_DUMP
52 #include "bof.h"
53 #endif
54 
55 struct radeon_cs_manager_gem {
56     struct radeon_cs_manager    base;
57     uint32_t                    device_id;
58     unsigned                    nbof;
59 };
60 
61 #pragma pack(1)
62 struct cs_reloc_gem {
63     uint32_t    handle;
64     uint32_t    read_domain;
65     uint32_t    write_domain;
66     uint32_t    flags;
67 };
68 
69 #pragma pack()
70 #define RELOC_SIZE (sizeof(struct cs_reloc_gem) / sizeof(uint32_t))
71 
72 struct cs_gem {
73     struct radeon_cs_int        base;
74     struct drm_radeon_cs        cs;
75     struct drm_radeon_cs_chunk  chunks[2];
76     unsigned                    nrelocs;
77     uint32_t                    *relocs;
78     struct radeon_bo_int        **relocs_bo;
79 };
80 
81 static pthread_mutex_t id_mutex = PTHREAD_MUTEX_INITIALIZER;
82 static uint32_t cs_id_source = 0;
83 
84 /**
85  * result is undefined if called with ~0
86  */
get_first_zero(const uint32_t n)87 static uint32_t get_first_zero(const uint32_t n)
88 {
89     /* __builtin_ctz returns number of trailing zeros. */
90     return 1 << __builtin_ctz(~n);
91 }
92 
93 /**
94  * Returns a free id for cs.
95  * If there is no free id we return zero
96  **/
generate_id(void)97 static uint32_t generate_id(void)
98 {
99     uint32_t r = 0;
100     pthread_mutex_lock( &id_mutex );
101     /* check for free ids */
102     if (cs_id_source != ~r) {
103         /* find first zero bit */
104         r = get_first_zero(cs_id_source);
105 
106         /* set id as reserved */
107         cs_id_source |= r;
108     }
109     pthread_mutex_unlock( &id_mutex );
110     return r;
111 }
112 
113 /**
114  * Free the id for later reuse
115  **/
free_id(uint32_t id)116 static void free_id(uint32_t id)
117 {
118     pthread_mutex_lock( &id_mutex );
119 
120     cs_id_source &= ~id;
121 
122     pthread_mutex_unlock( &id_mutex );
123 }
124 
cs_gem_create(struct radeon_cs_manager * csm,uint32_t ndw)125 static struct radeon_cs_int *cs_gem_create(struct radeon_cs_manager *csm,
126                                        uint32_t ndw)
127 {
128     struct cs_gem *csg;
129 
130     /* max cmd buffer size is 64Kb */
131     if (ndw > (64 * 1024 / 4)) {
132         return NULL;
133     }
134     csg = (struct cs_gem*)calloc(1, sizeof(struct cs_gem));
135     if (csg == NULL) {
136         return NULL;
137     }
138     csg->base.csm = csm;
139     csg->base.ndw = 64 * 1024 / 4;
140     csg->base.packets = (uint32_t*)calloc(1, 64 * 1024);
141     if (csg->base.packets == NULL) {
142         free(csg);
143         return NULL;
144     }
145     csg->base.relocs_total_size = 0;
146     csg->base.crelocs = 0;
147     csg->base.id = generate_id();
148     csg->nrelocs = 4096 / (4 * 4) ;
149     csg->relocs_bo = (struct radeon_bo_int**)calloc(1,
150                                                 csg->nrelocs*sizeof(void*));
151     if (csg->relocs_bo == NULL) {
152         free(csg->base.packets);
153         free(csg);
154         return NULL;
155     }
156     csg->base.relocs = csg->relocs = (uint32_t*)calloc(1, 4096);
157     if (csg->relocs == NULL) {
158         free(csg->relocs_bo);
159         free(csg->base.packets);
160         free(csg);
161         return NULL;
162     }
163     csg->chunks[0].chunk_id = RADEON_CHUNK_ID_IB;
164     csg->chunks[0].length_dw = 0;
165     csg->chunks[0].chunk_data = (uint64_t)(uintptr_t)csg->base.packets;
166     csg->chunks[1].chunk_id = RADEON_CHUNK_ID_RELOCS;
167     csg->chunks[1].length_dw = 0;
168     csg->chunks[1].chunk_data = (uint64_t)(uintptr_t)csg->relocs;
169     return (struct radeon_cs_int*)csg;
170 }
171 
cs_gem_write_reloc(struct radeon_cs_int * cs,struct radeon_bo * bo,uint32_t read_domain,uint32_t write_domain,uint32_t flags)172 static int cs_gem_write_reloc(struct radeon_cs_int *cs,
173                               struct radeon_bo *bo,
174                               uint32_t read_domain,
175                               uint32_t write_domain,
176                               uint32_t flags)
177 {
178     struct radeon_bo_int *boi = (struct radeon_bo_int *)bo;
179     struct cs_gem *csg = (struct cs_gem*)cs;
180     struct cs_reloc_gem *reloc;
181     uint32_t idx;
182     unsigned i;
183 
184     assert(boi->space_accounted);
185 
186     /* check domains */
187     if ((read_domain && write_domain) || (!read_domain && !write_domain)) {
188         /* in one CS a bo can only be in read or write domain but not
189          * in read & write domain at the same time
190          */
191         return -EINVAL;
192     }
193     if (read_domain == RADEON_GEM_DOMAIN_CPU) {
194         return -EINVAL;
195     }
196     if (write_domain == RADEON_GEM_DOMAIN_CPU) {
197         return -EINVAL;
198     }
199     /* use bit field hash function to determine
200        if this bo is for sure not in this cs.*/
201     if ((atomic_read((atomic_t *)radeon_gem_get_reloc_in_cs(bo)) & cs->id)) {
202         /* check if bo is already referenced.
203          * Scanning from end to begin reduces cycles with mesa because
204          * it often relocates same shared dma bo again. */
205         for(i = cs->crelocs; i != 0;) {
206             --i;
207             idx = i * RELOC_SIZE;
208             reloc = (struct cs_reloc_gem*)&csg->relocs[idx];
209             if (reloc->handle == bo->handle) {
210                 /* Check domains must be in read or write. As we check already
211                  * checked that in argument one of the read or write domain was
212                  * set we only need to check that if previous reloc as the read
213                  * domain set then the read_domain should also be set for this
214                  * new relocation.
215                  */
216                 /* the DDX expects to read and write from same pixmap */
217                 if (write_domain && (reloc->read_domain & write_domain)) {
218                     reloc->read_domain = 0;
219                     reloc->write_domain = write_domain;
220                 } else if (read_domain & reloc->write_domain) {
221                     reloc->read_domain = 0;
222                 } else {
223                     if (write_domain != reloc->write_domain)
224                         return -EINVAL;
225                     if (read_domain != reloc->read_domain)
226                         return -EINVAL;
227                 }
228 
229                 reloc->read_domain |= read_domain;
230                 reloc->write_domain |= write_domain;
231                 /* update flags */
232                 reloc->flags |= (flags & reloc->flags);
233                 /* write relocation packet */
234                 radeon_cs_write_dword((struct radeon_cs *)cs, 0xc0001000);
235                 radeon_cs_write_dword((struct radeon_cs *)cs, idx);
236                 return 0;
237             }
238         }
239     }
240     /* new relocation */
241     if (csg->base.crelocs >= csg->nrelocs) {
242         /* allocate more memory (TODO: should use a slab allocator maybe) */
243         uint32_t *tmp, size;
244         size = ((csg->nrelocs + 1) * sizeof(struct radeon_bo*));
245         tmp = (uint32_t*)realloc(csg->relocs_bo, size);
246         if (tmp == NULL) {
247             return -ENOMEM;
248         }
249         csg->relocs_bo = (struct radeon_bo_int **)tmp;
250         size = ((csg->nrelocs + 1) * RELOC_SIZE * 4);
251         tmp = (uint32_t*)realloc(csg->relocs, size);
252         if (tmp == NULL) {
253             return -ENOMEM;
254         }
255         cs->relocs = csg->relocs = tmp;
256         csg->nrelocs += 1;
257         csg->chunks[1].chunk_data = (uint64_t)(uintptr_t)csg->relocs;
258     }
259     csg->relocs_bo[csg->base.crelocs] = boi;
260     idx = (csg->base.crelocs++) * RELOC_SIZE;
261     reloc = (struct cs_reloc_gem*)&csg->relocs[idx];
262     reloc->handle = bo->handle;
263     reloc->read_domain = read_domain;
264     reloc->write_domain = write_domain;
265     reloc->flags = flags;
266     csg->chunks[1].length_dw += RELOC_SIZE;
267     radeon_bo_ref(bo);
268     /* bo might be referenced from another context so have to use atomic operations */
269     atomic_add((atomic_t *)radeon_gem_get_reloc_in_cs(bo), cs->id);
270     cs->relocs_total_size += boi->size;
271     radeon_cs_write_dword((struct radeon_cs *)cs, 0xc0001000);
272     radeon_cs_write_dword((struct radeon_cs *)cs, idx);
273     return 0;
274 }
275 
cs_gem_begin(struct radeon_cs_int * cs,uint32_t ndw,const char * file,const char * func,int line)276 static int cs_gem_begin(struct radeon_cs_int *cs,
277                         uint32_t ndw,
278                         const char *file,
279                         const char *func,
280                         int line)
281 {
282 
283     if (cs->section_ndw) {
284         fprintf(stderr, "CS already in a section(%s,%s,%d)\n",
285                 cs->section_file, cs->section_func, cs->section_line);
286         fprintf(stderr, "CS can't start section(%s,%s,%d)\n",
287                 file, func, line);
288         return -EPIPE;
289     }
290     cs->section_ndw = ndw;
291     cs->section_cdw = 0;
292     cs->section_file = file;
293     cs->section_func = func;
294     cs->section_line = line;
295 
296     if (cs->cdw + ndw > cs->ndw) {
297         uint32_t tmp, *ptr;
298 
299         /* round up the required size to a multiple of 1024 */
300         tmp = (cs->cdw + ndw + 0x3FF) & (~0x3FF);
301         ptr = (uint32_t*)realloc(cs->packets, 4 * tmp);
302         if (ptr == NULL) {
303             return -ENOMEM;
304         }
305         cs->packets = ptr;
306         cs->ndw = tmp;
307     }
308     return 0;
309 }
310 
cs_gem_end(struct radeon_cs_int * cs,const char * file,const char * func,int line)311 static int cs_gem_end(struct radeon_cs_int *cs,
312                       const char *file,
313                       const char *func,
314                       int line)
315 
316 {
317     if (!cs->section_ndw) {
318         fprintf(stderr, "CS no section to end at (%s,%s,%d)\n",
319                 file, func, line);
320         return -EPIPE;
321     }
322     if (cs->section_ndw != cs->section_cdw) {
323         fprintf(stderr, "CS section size mismatch start at (%s,%s,%d) %d vs %d\n",
324                 cs->section_file, cs->section_func, cs->section_line, cs->section_ndw, cs->section_cdw);
325         fprintf(stderr, "CS section end at (%s,%s,%d)\n",
326                 file, func, line);
327 
328         /* We must reset the section even when there is error. */
329         cs->section_ndw = 0;
330         return -EPIPE;
331     }
332     cs->section_ndw = 0;
333     return 0;
334 }
335 
336 #if CS_BOF_DUMP
cs_gem_dump_bof(struct radeon_cs_int * cs)337 static void cs_gem_dump_bof(struct radeon_cs_int *cs)
338 {
339     struct cs_gem *csg = (struct cs_gem*)cs;
340     struct radeon_cs_manager_gem *csm;
341     bof_t *bcs, *blob, *array, *bo, *size, *handle, *device_id, *root;
342     char tmp[256];
343     unsigned i;
344 
345     csm = (struct radeon_cs_manager_gem *)cs->csm;
346     root = device_id = bcs = blob = array = bo = size = handle = NULL;
347     root = bof_object();
348     if (root == NULL)
349         goto out_err;
350     device_id = bof_int32(csm->device_id);
351     if (device_id == NULL)
352         return;
353     if (bof_object_set(root, "device_id", device_id))
354         goto out_err;
355     bof_decref(device_id);
356     device_id = NULL;
357     /* dump relocs */
358     blob = bof_blob(csg->nrelocs * 16, csg->relocs);
359     if (blob == NULL)
360         goto out_err;
361     if (bof_object_set(root, "reloc", blob))
362         goto out_err;
363     bof_decref(blob);
364     blob = NULL;
365     /* dump cs */
366     blob = bof_blob(cs->cdw * 4, cs->packets);
367     if (blob == NULL)
368         goto out_err;
369     if (bof_object_set(root, "pm4", blob))
370         goto out_err;
371     bof_decref(blob);
372     blob = NULL;
373     /* dump bo */
374     array = bof_array();
375     if (array == NULL)
376         goto out_err;
377     for (i = 0; i < csg->base.crelocs; i++) {
378         bo = bof_object();
379         if (bo == NULL)
380             goto out_err;
381         size = bof_int32(csg->relocs_bo[i]->size);
382         if (size == NULL)
383             goto out_err;
384         if (bof_object_set(bo, "size", size))
385             goto out_err;
386         bof_decref(size);
387         size = NULL;
388         handle = bof_int32(csg->relocs_bo[i]->handle);
389         if (handle == NULL)
390             goto out_err;
391         if (bof_object_set(bo, "handle", handle))
392             goto out_err;
393         bof_decref(handle);
394         handle = NULL;
395         radeon_bo_map((struct radeon_bo*)csg->relocs_bo[i], 0);
396         blob = bof_blob(csg->relocs_bo[i]->size, csg->relocs_bo[i]->ptr);
397         radeon_bo_unmap((struct radeon_bo*)csg->relocs_bo[i]);
398         if (blob == NULL)
399             goto out_err;
400         if (bof_object_set(bo, "data", blob))
401             goto out_err;
402         bof_decref(blob);
403         blob = NULL;
404         if (bof_array_append(array, bo))
405             goto out_err;
406         bof_decref(bo);
407         bo = NULL;
408     }
409     if (bof_object_set(root, "bo", array))
410         goto out_err;
411     sprintf(tmp, "d-0x%04X-%08d.bof", csm->device_id, csm->nbof++);
412     bof_dump_file(root, tmp);
413 out_err:
414     bof_decref(blob);
415     bof_decref(array);
416     bof_decref(bo);
417     bof_decref(size);
418     bof_decref(handle);
419     bof_decref(device_id);
420     bof_decref(root);
421 }
422 #endif
423 
cs_gem_emit(struct radeon_cs_int * cs)424 static int cs_gem_emit(struct radeon_cs_int *cs)
425 {
426     struct cs_gem *csg = (struct cs_gem*)cs;
427     uint64_t chunk_array[2];
428     unsigned i;
429     int r;
430 
431     while (cs->cdw & 7)
432 	radeon_cs_write_dword((struct radeon_cs *)cs, 0x80000000);
433 
434 #if CS_BOF_DUMP
435     cs_gem_dump_bof(cs);
436 #endif
437     csg->chunks[0].length_dw = cs->cdw;
438 
439     chunk_array[0] = (uint64_t)(uintptr_t)&csg->chunks[0];
440     chunk_array[1] = (uint64_t)(uintptr_t)&csg->chunks[1];
441 
442     csg->cs.num_chunks = 2;
443     csg->cs.chunks = (uint64_t)(uintptr_t)chunk_array;
444 
445     r = drmCommandWriteRead(cs->csm->fd, DRM_RADEON_CS,
446                             &csg->cs, sizeof(struct drm_radeon_cs));
447     for (i = 0; i < csg->base.crelocs; i++) {
448         csg->relocs_bo[i]->space_accounted = 0;
449         /* bo might be referenced from another context so have to use atomic operations */
450         atomic_dec((atomic_t *)radeon_gem_get_reloc_in_cs((struct radeon_bo*)csg->relocs_bo[i]), cs->id);
451         radeon_bo_unref((struct radeon_bo *)csg->relocs_bo[i]);
452         csg->relocs_bo[i] = NULL;
453     }
454 
455     cs->csm->read_used = 0;
456     cs->csm->vram_write_used = 0;
457     cs->csm->gart_write_used = 0;
458     return r;
459 }
460 
cs_gem_destroy(struct radeon_cs_int * cs)461 static int cs_gem_destroy(struct radeon_cs_int *cs)
462 {
463     struct cs_gem *csg = (struct cs_gem*)cs;
464 
465     free_id(cs->id);
466     free(csg->relocs_bo);
467     free(cs->relocs);
468     free(cs->packets);
469     free(cs);
470     return 0;
471 }
472 
cs_gem_erase(struct radeon_cs_int * cs)473 static int cs_gem_erase(struct radeon_cs_int *cs)
474 {
475     struct cs_gem *csg = (struct cs_gem*)cs;
476     unsigned i;
477 
478     if (csg->relocs_bo) {
479         for (i = 0; i < csg->base.crelocs; i++) {
480             if (csg->relocs_bo[i]) {
481                 /* bo might be referenced from another context so have to use atomic operations */
482                 atomic_dec((atomic_t *)radeon_gem_get_reloc_in_cs((struct radeon_bo*)csg->relocs_bo[i]), cs->id);
483                 radeon_bo_unref((struct radeon_bo *)csg->relocs_bo[i]);
484                 csg->relocs_bo[i] = NULL;
485             }
486         }
487     }
488     cs->relocs_total_size = 0;
489     cs->cdw = 0;
490     cs->section_ndw = 0;
491     cs->crelocs = 0;
492     csg->chunks[0].length_dw = 0;
493     csg->chunks[1].length_dw = 0;
494     return 0;
495 }
496 
cs_gem_need_flush(struct radeon_cs_int * cs)497 static int cs_gem_need_flush(struct radeon_cs_int *cs)
498 {
499     return 0; //(cs->relocs_total_size > (32*1024*1024));
500 }
501 
cs_gem_print(struct radeon_cs_int * cs,FILE * file)502 static void cs_gem_print(struct radeon_cs_int *cs, FILE *file)
503 {
504     struct radeon_cs_manager_gem *csm;
505     unsigned int i;
506 
507     csm = (struct radeon_cs_manager_gem *)cs->csm;
508     fprintf(file, "VENDORID:DEVICEID 0x%04X:0x%04X\n", 0x1002, csm->device_id);
509     for (i = 0; i < cs->cdw; i++) {
510         fprintf(file, "0x%08X\n", cs->packets[i]);
511     }
512 }
513 
514 static const struct radeon_cs_funcs radeon_cs_gem_funcs = {
515     .cs_create = cs_gem_create,
516     .cs_write_reloc = cs_gem_write_reloc,
517     .cs_begin = cs_gem_begin,
518     .cs_end = cs_gem_end,
519     .cs_emit = cs_gem_emit,
520     .cs_destroy = cs_gem_destroy,
521     .cs_erase = cs_gem_erase,
522     .cs_need_flush = cs_gem_need_flush,
523     .cs_print = cs_gem_print,
524 };
525 
radeon_get_device_id(int fd,uint32_t * device_id)526 static int radeon_get_device_id(int fd, uint32_t *device_id)
527 {
528     struct drm_radeon_info info = {};
529     int r;
530 
531     *device_id = 0;
532     info.request = RADEON_INFO_DEVICE_ID;
533     info.value = (uintptr_t)device_id;
534     r = drmCommandWriteRead(fd, DRM_RADEON_INFO, &info,
535                             sizeof(struct drm_radeon_info));
536     return r;
537 }
538 
radeon_cs_manager_gem_ctor(int fd)539 drm_public struct radeon_cs_manager *radeon_cs_manager_gem_ctor(int fd)
540 {
541     struct radeon_cs_manager_gem *csm;
542 
543     csm = calloc(1, sizeof(struct radeon_cs_manager_gem));
544     if (csm == NULL) {
545         return NULL;
546     }
547     csm->base.funcs = &radeon_cs_gem_funcs;
548     csm->base.fd = fd;
549     radeon_get_device_id(fd, &csm->device_id);
550     return &csm->base;
551 }
552 
radeon_cs_manager_gem_dtor(struct radeon_cs_manager * csm)553 drm_public void radeon_cs_manager_gem_dtor(struct radeon_cs_manager *csm)
554 {
555     free(csm);
556 }
557