1 /* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */ 2 3 /* 4 * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org> 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the next 14 * paragraph) shall be included in all copies or substantial portions of the 15 * Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 23 * SOFTWARE. 24 * 25 * Authors: 26 * Rob Clark <robclark@freedesktop.org> 27 */ 28 29 #include "freedreno_drmif.h" 30 #include "freedreno_priv.h" 31 32 drm_private pthread_mutex_t table_lock = PTHREAD_MUTEX_INITIALIZER; 33 drm_private void bo_del(struct fd_bo *bo); 34 35 /* set buffer name, and add to table, call w/ table_lock held: */ 36 static void set_name(struct fd_bo *bo, uint32_t name) 37 { 38 bo->name = name; 39 /* add ourself into the handle table: */ 40 drmHashInsert(bo->dev->name_table, name, bo); 41 } 42 43 /* lookup a buffer, call w/ table_lock held: */ 44 static struct fd_bo * lookup_bo(void *tbl, uint32_t key) 45 { 46 struct fd_bo *bo = NULL; 47 if (!drmHashLookup(tbl, key, (void **)&bo)) { 48 /* found, incr refcnt and return: */ 49 bo = fd_bo_ref(bo); 50 51 /* don't break the bucket if this bo was found in one */ 52 list_delinit(&bo->list); 53 } 54 return bo; 55 } 56 57 /* allocate a new buffer object, call w/ table_lock held */ 58 static struct fd_bo * bo_from_handle(struct fd_device *dev, 59 uint32_t size, uint32_t handle) 60 { 61 struct fd_bo *bo; 62 63 bo = dev->funcs->bo_from_handle(dev, size, handle); 64 if (!bo) { 65 drmCloseBufferHandle(dev->fd, handle); 66 return NULL; 67 } 68 bo->dev = fd_device_ref(dev); 69 bo->size = size; 70 bo->handle = handle; 71 atomic_set(&bo->refcnt, 1); 72 list_inithead(&bo->list); 73 /* add ourself into the handle table: */ 74 drmHashInsert(dev->handle_table, handle, bo); 75 return bo; 76 } 77 78 static struct fd_bo * 79 bo_new(struct fd_device *dev, uint32_t size, uint32_t flags, 80 struct fd_bo_cache *cache) 81 { 82 struct fd_bo *bo = NULL; 83 uint32_t handle; 84 int ret; 85 86 bo = fd_bo_cache_alloc(cache, &size, flags); 87 if (bo) 88 return bo; 89 90 ret = dev->funcs->bo_new_handle(dev, size, flags, &handle); 91 if (ret) 92 return NULL; 93 94 pthread_mutex_lock(&table_lock); 95 bo = bo_from_handle(dev, size, handle); 96 pthread_mutex_unlock(&table_lock); 97 98 VG_BO_ALLOC(bo); 99 100 return bo; 101 } 102 103 drm_public struct fd_bo * 104 fd_bo_new(struct fd_device *dev, uint32_t size, uint32_t flags) 105 { 106 struct fd_bo *bo = bo_new(dev, size, flags, &dev->bo_cache); 107 if (bo) 108 bo->bo_reuse = BO_CACHE; 109 return bo; 110 } 111 112 /* internal function to allocate bo's that use the ringbuffer cache 113 * instead of the normal bo_cache. The purpose is, because cmdstream 114 * bo's get vmap'd on the kernel side, and that is expensive, we want 115 * to re-use cmdstream bo's for cmdstream and not unrelated purposes. 116 */ 117 drm_private struct fd_bo * 118 fd_bo_new_ring(struct fd_device *dev, uint32_t size, uint32_t flags) 119 { 120 struct fd_bo *bo = bo_new(dev, size, flags, &dev->ring_cache); 121 if (bo) 122 bo->bo_reuse = RING_CACHE; 123 return bo; 124 } 125 126 drm_public struct fd_bo * 127 fd_bo_from_handle(struct fd_device *dev, uint32_t handle, uint32_t size) 128 { 129 struct fd_bo *bo = NULL; 130 131 pthread_mutex_lock(&table_lock); 132 133 bo = lookup_bo(dev->handle_table, handle); 134 if (bo) 135 goto out_unlock; 136 137 bo = bo_from_handle(dev, size, handle); 138 139 VG_BO_ALLOC(bo); 140 141 out_unlock: 142 pthread_mutex_unlock(&table_lock); 143 144 return bo; 145 } 146 147 drm_public struct fd_bo * 148 fd_bo_from_dmabuf(struct fd_device *dev, int fd) 149 { 150 int ret, size; 151 uint32_t handle; 152 struct fd_bo *bo; 153 154 pthread_mutex_lock(&table_lock); 155 ret = drmPrimeFDToHandle(dev->fd, fd, &handle); 156 if (ret) { 157 pthread_mutex_unlock(&table_lock); 158 return NULL; 159 } 160 161 bo = lookup_bo(dev->handle_table, handle); 162 if (bo) 163 goto out_unlock; 164 165 /* lseek() to get bo size */ 166 size = lseek(fd, 0, SEEK_END); 167 lseek(fd, 0, SEEK_CUR); 168 169 bo = bo_from_handle(dev, size, handle); 170 171 VG_BO_ALLOC(bo); 172 173 out_unlock: 174 pthread_mutex_unlock(&table_lock); 175 176 return bo; 177 } 178 179 drm_public struct fd_bo * fd_bo_from_name(struct fd_device *dev, uint32_t name) 180 { 181 struct drm_gem_open req = { 182 .name = name, 183 }; 184 struct fd_bo *bo; 185 186 pthread_mutex_lock(&table_lock); 187 188 /* check name table first, to see if bo is already open: */ 189 bo = lookup_bo(dev->name_table, name); 190 if (bo) 191 goto out_unlock; 192 193 if (drmIoctl(dev->fd, DRM_IOCTL_GEM_OPEN, &req)) { 194 ERROR_MSG("gem-open failed: %s", strerror(errno)); 195 goto out_unlock; 196 } 197 198 bo = lookup_bo(dev->handle_table, req.handle); 199 if (bo) 200 goto out_unlock; 201 202 bo = bo_from_handle(dev, req.size, req.handle); 203 if (bo) { 204 set_name(bo, name); 205 VG_BO_ALLOC(bo); 206 } 207 208 out_unlock: 209 pthread_mutex_unlock(&table_lock); 210 211 return bo; 212 } 213 214 drm_public uint64_t fd_bo_get_iova(struct fd_bo *bo) 215 { 216 return bo->funcs->iova(bo); 217 } 218 219 drm_public void fd_bo_put_iova(struct fd_bo *bo) 220 { 221 /* currently a no-op */ 222 } 223 224 drm_public struct fd_bo * fd_bo_ref(struct fd_bo *bo) 225 { 226 atomic_inc(&bo->refcnt); 227 return bo; 228 } 229 230 drm_public void fd_bo_del(struct fd_bo *bo) 231 { 232 struct fd_device *dev = bo->dev; 233 234 if (!atomic_dec_and_test(&bo->refcnt)) 235 return; 236 237 pthread_mutex_lock(&table_lock); 238 239 if ((bo->bo_reuse == BO_CACHE) && (fd_bo_cache_free(&dev->bo_cache, bo) == 0)) 240 goto out; 241 if ((bo->bo_reuse == RING_CACHE) && (fd_bo_cache_free(&dev->ring_cache, bo) == 0)) 242 goto out; 243 244 bo_del(bo); 245 fd_device_del_locked(dev); 246 out: 247 pthread_mutex_unlock(&table_lock); 248 } 249 250 /* Called under table_lock */ 251 drm_private void bo_del(struct fd_bo *bo) 252 { 253 VG_BO_FREE(bo); 254 255 if (bo->map) 256 drm_munmap(bo->map, bo->size); 257 258 /* TODO probably bo's in bucket list get removed from 259 * handle table?? 260 */ 261 262 if (bo->handle) { 263 drmHashDelete(bo->dev->handle_table, bo->handle); 264 if (bo->name) 265 drmHashDelete(bo->dev->name_table, bo->name); 266 drmCloseBufferHandle(bo->dev->fd, bo->handle); 267 } 268 269 bo->funcs->destroy(bo); 270 } 271 272 drm_public int fd_bo_get_name(struct fd_bo *bo, uint32_t *name) 273 { 274 if (!bo->name) { 275 struct drm_gem_flink req = { 276 .handle = bo->handle, 277 }; 278 int ret; 279 280 ret = drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_FLINK, &req); 281 if (ret) { 282 return ret; 283 } 284 285 pthread_mutex_lock(&table_lock); 286 set_name(bo, req.name); 287 pthread_mutex_unlock(&table_lock); 288 bo->bo_reuse = NO_CACHE; 289 } 290 291 *name = bo->name; 292 293 return 0; 294 } 295 296 drm_public uint32_t fd_bo_handle(struct fd_bo *bo) 297 { 298 return bo->handle; 299 } 300 301 drm_public int fd_bo_dmabuf(struct fd_bo *bo) 302 { 303 int ret, prime_fd; 304 305 ret = drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC, 306 &prime_fd); 307 if (ret) { 308 ERROR_MSG("failed to get dmabuf fd: %d", ret); 309 return ret; 310 } 311 312 bo->bo_reuse = NO_CACHE; 313 314 return prime_fd; 315 } 316 317 drm_public uint32_t fd_bo_size(struct fd_bo *bo) 318 { 319 return bo->size; 320 } 321 322 drm_public void * fd_bo_map(struct fd_bo *bo) 323 { 324 if (!bo->map) { 325 uint64_t offset; 326 int ret; 327 328 ret = bo->funcs->offset(bo, &offset); 329 if (ret) { 330 return NULL; 331 } 332 333 bo->map = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED, 334 bo->dev->fd, offset); 335 if (bo->map == MAP_FAILED) { 336 ERROR_MSG("mmap failed: %s", strerror(errno)); 337 bo->map = NULL; 338 } 339 } 340 return bo->map; 341 } 342 343 /* a bit odd to take the pipe as an arg, but it's a, umm, quirk of kgsl.. */ 344 drm_public int fd_bo_cpu_prep(struct fd_bo *bo, struct fd_pipe *pipe, uint32_t op) 345 { 346 return bo->funcs->cpu_prep(bo, pipe, op); 347 } 348 349 drm_public void fd_bo_cpu_fini(struct fd_bo *bo) 350 { 351 bo->funcs->cpu_fini(bo); 352 } 353 354 #if !HAVE_FREEDRENO_KGSL 355 drm_public struct fd_bo * fd_bo_from_fbdev(struct fd_pipe *pipe, int fbfd, uint32_t size) 356 { 357 return NULL; 358 } 359 #endif 360