1 /*
2 * Copyright © 2008 Dave Airlie
3 * Copyright © 2008 Jérôme Glisse
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
16 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
18 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * The above copyright notice and this permission notice (including the
24 * next paragraph) shall be included in all copies or substantial portions
25 * of the Software.
26 */
27 /*
28 * Authors:
29 * Dave Airlie
30 * Jérôme Glisse <glisse@freedesktop.org>
31 */
32 #ifdef HAVE_CONFIG_H
33 #include <config.h>
34 #endif
35 #include <stdio.h>
36 #include <stdint.h>
37 #include <stdlib.h>
38 #include <string.h>
39 #include <errno.h>
40 #include "libdrm_macros.h"
41 #include "xf86drm.h"
42 #include "xf86atomic.h"
43 #include "drm.h"
44 #include "radeon_drm.h"
45 #include "radeon_bo.h"
46 #include "radeon_bo_int.h"
47 #include "radeon_bo_gem.h"
48 #include <fcntl.h>
49 struct radeon_bo_gem {
50 struct radeon_bo_int base;
51 uint32_t name;
52 int map_count;
53 atomic_t reloc_in_cs;
54 void *priv_ptr;
55 };
56
57 struct bo_manager_gem {
58 struct radeon_bo_manager base;
59 };
60
61 static int bo_wait(struct radeon_bo_int *boi);
62
bo_open(struct radeon_bo_manager * bom,uint32_t handle,uint32_t size,uint32_t alignment,uint32_t domains,uint32_t flags)63 static struct radeon_bo *bo_open(struct radeon_bo_manager *bom,
64 uint32_t handle,
65 uint32_t size,
66 uint32_t alignment,
67 uint32_t domains,
68 uint32_t flags)
69 {
70 struct radeon_bo_gem *bo;
71 int r;
72
73 bo = (struct radeon_bo_gem*)calloc(1, sizeof(struct radeon_bo_gem));
74 if (bo == NULL) {
75 return NULL;
76 }
77
78 bo->base.bom = bom;
79 bo->base.handle = 0;
80 bo->base.size = size;
81 bo->base.alignment = alignment;
82 bo->base.domains = domains;
83 bo->base.flags = flags;
84 bo->base.ptr = NULL;
85 atomic_set(&bo->reloc_in_cs, 0);
86 bo->map_count = 0;
87 if (handle) {
88 struct drm_gem_open open_arg;
89
90 memset(&open_arg, 0, sizeof(open_arg));
91 open_arg.name = handle;
92 r = drmIoctl(bom->fd, DRM_IOCTL_GEM_OPEN, &open_arg);
93 if (r != 0) {
94 free(bo);
95 return NULL;
96 }
97 bo->base.handle = open_arg.handle;
98 bo->base.size = open_arg.size;
99 bo->name = handle;
100 } else {
101 struct drm_radeon_gem_create args;
102
103 args.size = size;
104 args.alignment = alignment;
105 args.initial_domain = bo->base.domains;
106 args.flags = flags;
107 args.handle = 0;
108 r = drmCommandWriteRead(bom->fd, DRM_RADEON_GEM_CREATE,
109 &args, sizeof(args));
110 bo->base.handle = args.handle;
111 if (r) {
112 fprintf(stderr, "Failed to allocate :\n");
113 fprintf(stderr, " size : %d bytes\n", size);
114 fprintf(stderr, " alignment : %d bytes\n", alignment);
115 fprintf(stderr, " domains : %d\n", bo->base.domains);
116 free(bo);
117 return NULL;
118 }
119 }
120 radeon_bo_ref((struct radeon_bo*)bo);
121 return (struct radeon_bo*)bo;
122 }
123
bo_ref(struct radeon_bo_int * boi)124 static void bo_ref(struct radeon_bo_int *boi)
125 {
126 }
127
bo_unref(struct radeon_bo_int * boi)128 static struct radeon_bo *bo_unref(struct radeon_bo_int *boi)
129 {
130 struct radeon_bo_gem *bo_gem = (struct radeon_bo_gem*)boi;
131 struct drm_gem_close args;
132
133 if (boi->cref) {
134 return (struct radeon_bo *)boi;
135 }
136 if (bo_gem->priv_ptr) {
137 drm_munmap(bo_gem->priv_ptr, boi->size);
138 }
139
140 /* Zero out args to make valgrind happy */
141 memset(&args, 0, sizeof(args));
142
143 /* close object */
144 args.handle = boi->handle;
145 drmIoctl(boi->bom->fd, DRM_IOCTL_GEM_CLOSE, &args);
146 memset(bo_gem, 0, sizeof(struct radeon_bo_gem));
147 free(bo_gem);
148 return NULL;
149 }
150
bo_map(struct radeon_bo_int * boi,int write)151 static int bo_map(struct radeon_bo_int *boi, int write)
152 {
153 struct radeon_bo_gem *bo_gem = (struct radeon_bo_gem*)boi;
154 struct drm_radeon_gem_mmap args;
155 int r;
156 void *ptr;
157
158 if (bo_gem->map_count++ != 0) {
159 return 0;
160 }
161 if (bo_gem->priv_ptr) {
162 goto wait;
163 }
164
165 boi->ptr = NULL;
166
167 /* Zero out args to make valgrind happy */
168 memset(&args, 0, sizeof(args));
169 args.handle = boi->handle;
170 args.offset = 0;
171 args.size = (uint64_t)boi->size;
172 r = drmCommandWriteRead(boi->bom->fd,
173 DRM_RADEON_GEM_MMAP,
174 &args,
175 sizeof(args));
176 if (r) {
177 fprintf(stderr, "error mapping %p 0x%08X (error = %d)\n",
178 boi, boi->handle, r);
179 return r;
180 }
181 ptr = drm_mmap(0, args.size, PROT_READ|PROT_WRITE, MAP_SHARED, boi->bom->fd, args.addr_ptr);
182 if (ptr == MAP_FAILED)
183 return -errno;
184 bo_gem->priv_ptr = ptr;
185 wait:
186 boi->ptr = bo_gem->priv_ptr;
187 r = bo_wait(boi);
188 if (r)
189 return r;
190 return 0;
191 }
192
bo_unmap(struct radeon_bo_int * boi)193 static int bo_unmap(struct radeon_bo_int *boi)
194 {
195 struct radeon_bo_gem *bo_gem = (struct radeon_bo_gem*)boi;
196
197 if (--bo_gem->map_count > 0) {
198 return 0;
199 }
200 //drm_munmap(bo->ptr, bo->size);
201 boi->ptr = NULL;
202 return 0;
203 }
204
bo_wait(struct radeon_bo_int * boi)205 static int bo_wait(struct radeon_bo_int *boi)
206 {
207 struct drm_radeon_gem_wait_idle args;
208 int ret;
209
210 /* Zero out args to make valgrind happy */
211 memset(&args, 0, sizeof(args));
212 args.handle = boi->handle;
213 do {
214 ret = drmCommandWrite(boi->bom->fd, DRM_RADEON_GEM_WAIT_IDLE,
215 &args, sizeof(args));
216 } while (ret == -EBUSY);
217 return ret;
218 }
219
bo_is_busy(struct radeon_bo_int * boi,uint32_t * domain)220 static int bo_is_busy(struct radeon_bo_int *boi, uint32_t *domain)
221 {
222 struct drm_radeon_gem_busy args;
223 int ret;
224
225 args.handle = boi->handle;
226 args.domain = 0;
227
228 ret = drmCommandWriteRead(boi->bom->fd, DRM_RADEON_GEM_BUSY,
229 &args, sizeof(args));
230
231 *domain = args.domain;
232 return ret;
233 }
234
bo_set_tiling(struct radeon_bo_int * boi,uint32_t tiling_flags,uint32_t pitch)235 static int bo_set_tiling(struct radeon_bo_int *boi, uint32_t tiling_flags,
236 uint32_t pitch)
237 {
238 struct drm_radeon_gem_set_tiling args;
239 int r;
240
241 args.handle = boi->handle;
242 args.tiling_flags = tiling_flags;
243 args.pitch = pitch;
244
245 r = drmCommandWriteRead(boi->bom->fd,
246 DRM_RADEON_GEM_SET_TILING,
247 &args,
248 sizeof(args));
249 return r;
250 }
251
bo_get_tiling(struct radeon_bo_int * boi,uint32_t * tiling_flags,uint32_t * pitch)252 static int bo_get_tiling(struct radeon_bo_int *boi, uint32_t *tiling_flags,
253 uint32_t *pitch)
254 {
255 struct drm_radeon_gem_set_tiling args = {};
256 int r;
257
258 args.handle = boi->handle;
259
260 r = drmCommandWriteRead(boi->bom->fd,
261 DRM_RADEON_GEM_GET_TILING,
262 &args,
263 sizeof(args));
264
265 if (r)
266 return r;
267
268 *tiling_flags = args.tiling_flags;
269 *pitch = args.pitch;
270 return r;
271 }
272
273 static const struct radeon_bo_funcs bo_gem_funcs = {
274 .bo_open = bo_open,
275 .bo_ref = bo_ref,
276 .bo_unref = bo_unref,
277 .bo_map = bo_map,
278 .bo_unmap = bo_unmap,
279 .bo_wait = bo_wait,
280 .bo_is_static = NULL,
281 .bo_set_tiling = bo_set_tiling,
282 .bo_get_tiling = bo_get_tiling,
283 .bo_is_busy = bo_is_busy,
284 .bo_is_referenced_by_cs = NULL,
285 };
286
radeon_bo_manager_gem_ctor(int fd)287 struct radeon_bo_manager *radeon_bo_manager_gem_ctor(int fd)
288 {
289 struct bo_manager_gem *bomg;
290
291 bomg = (struct bo_manager_gem*)calloc(1, sizeof(struct bo_manager_gem));
292 if (bomg == NULL) {
293 return NULL;
294 }
295 bomg->base.funcs = &bo_gem_funcs;
296 bomg->base.fd = fd;
297 return (struct radeon_bo_manager*)bomg;
298 }
299
radeon_bo_manager_gem_dtor(struct radeon_bo_manager * bom)300 void radeon_bo_manager_gem_dtor(struct radeon_bo_manager *bom)
301 {
302 struct bo_manager_gem *bomg = (struct bo_manager_gem*)bom;
303
304 if (bom == NULL) {
305 return;
306 }
307 free(bomg);
308 }
309
310 uint32_t
radeon_gem_name_bo(struct radeon_bo * bo)311 radeon_gem_name_bo(struct radeon_bo *bo)
312 {
313 struct radeon_bo_gem *bo_gem = (struct radeon_bo_gem*)bo;
314 return bo_gem->name;
315 }
316
317 void *
radeon_gem_get_reloc_in_cs(struct radeon_bo * bo)318 radeon_gem_get_reloc_in_cs(struct radeon_bo *bo)
319 {
320 struct radeon_bo_gem *bo_gem = (struct radeon_bo_gem*)bo;
321 return &bo_gem->reloc_in_cs;
322 }
323
324 int
radeon_gem_get_kernel_name(struct radeon_bo * bo,uint32_t * name)325 radeon_gem_get_kernel_name(struct radeon_bo *bo, uint32_t *name)
326 {
327 struct radeon_bo_gem *bo_gem = (struct radeon_bo_gem*)bo;
328 struct radeon_bo_int *boi = (struct radeon_bo_int *)bo;
329 struct drm_gem_flink flink;
330 int r;
331
332 if (bo_gem->name) {
333 *name = bo_gem->name;
334 return 0;
335 }
336 flink.handle = bo->handle;
337 r = drmIoctl(boi->bom->fd, DRM_IOCTL_GEM_FLINK, &flink);
338 if (r) {
339 return r;
340 }
341 bo_gem->name = flink.name;
342 *name = flink.name;
343 return 0;
344 }
345
346 int
radeon_gem_set_domain(struct radeon_bo * bo,uint32_t read_domains,uint32_t write_domain)347 radeon_gem_set_domain(struct radeon_bo *bo, uint32_t read_domains, uint32_t write_domain)
348 {
349 struct radeon_bo_int *boi = (struct radeon_bo_int *)bo;
350 struct drm_radeon_gem_set_domain args;
351 int r;
352
353 args.handle = bo->handle;
354 args.read_domains = read_domains;
355 args.write_domain = write_domain;
356
357 r = drmCommandWriteRead(boi->bom->fd,
358 DRM_RADEON_GEM_SET_DOMAIN,
359 &args,
360 sizeof(args));
361 return r;
362 }
363
radeon_gem_prime_share_bo(struct radeon_bo * bo,int * handle)364 int radeon_gem_prime_share_bo(struct radeon_bo *bo, int *handle)
365 {
366 struct radeon_bo_gem *bo_gem = (struct radeon_bo_gem*)bo;
367 int ret;
368
369 ret = drmPrimeHandleToFD(bo_gem->base.bom->fd, bo->handle, DRM_CLOEXEC, handle);
370 return ret;
371 }
372
373 struct radeon_bo *
radeon_gem_bo_open_prime(struct radeon_bo_manager * bom,int fd_handle,uint32_t size)374 radeon_gem_bo_open_prime(struct radeon_bo_manager *bom, int fd_handle, uint32_t size)
375 {
376 struct radeon_bo_gem *bo;
377 int r;
378 uint32_t handle;
379
380 bo = (struct radeon_bo_gem*)calloc(1, sizeof(struct radeon_bo_gem));
381 if (bo == NULL) {
382 return NULL;
383 }
384
385 bo->base.bom = bom;
386 bo->base.handle = 0;
387 bo->base.size = size;
388 bo->base.alignment = 0;
389 bo->base.domains = RADEON_GEM_DOMAIN_GTT;
390 bo->base.flags = 0;
391 bo->base.ptr = NULL;
392 atomic_set(&bo->reloc_in_cs, 0);
393 bo->map_count = 0;
394
395 r = drmPrimeFDToHandle(bom->fd, fd_handle, &handle);
396 if (r != 0) {
397 free(bo);
398 return NULL;
399 }
400
401 bo->base.handle = handle;
402 bo->name = handle;
403
404 radeon_bo_ref((struct radeon_bo *)bo);
405 return (struct radeon_bo *)bo;
406
407 }
408