• Home
  • Raw
  • Download

Lines Matching refs:qdev

62 	struct qxl_device *qdev;  in qxl_fence_wait()  local
68 qdev = container_of(fence->lock, struct qxl_device, release_lock); in qxl_fence_wait()
78 qxl_io_notify_oom(qdev); in qxl_fence_wait()
81 if (!qxl_queue_garbage_collect(qdev, true)) in qxl_fence_wait()
127 qxl_release_alloc(struct qxl_device *qdev, int type, in qxl_release_alloc() argument
146 spin_lock(&qdev->release_idr_lock); in qxl_release_alloc()
147 handle = idr_alloc(&qdev->release_idr, release, 1, 0, GFP_NOWAIT); in qxl_release_alloc()
148 release->base.seqno = ++qdev->release_seqno; in qxl_release_alloc()
149 spin_unlock(&qdev->release_idr_lock); in qxl_release_alloc()
157 QXL_INFO(qdev, "allocated release %d\n", handle); in qxl_release_alloc()
179 qxl_release_free(struct qxl_device *qdev, in qxl_release_free() argument
182 QXL_INFO(qdev, "release %d, type %d\n", release->id, in qxl_release_free()
186 qxl_surface_id_dealloc(qdev, release->surface_release_id); in qxl_release_free()
188 spin_lock(&qdev->release_idr_lock); in qxl_release_free()
189 idr_remove(&qdev->release_idr, release->id); in qxl_release_free()
190 spin_unlock(&qdev->release_idr_lock); in qxl_release_free()
204 static int qxl_release_bo_alloc(struct qxl_device *qdev, in qxl_release_bo_alloc() argument
208 return qxl_bo_create(qdev, PAGE_SIZE, false, true, in qxl_release_bo_alloc()
293 int qxl_alloc_surface_release_reserved(struct qxl_device *qdev, in qxl_alloc_surface_release_reserved() argument
305 idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release); in qxl_alloc_surface_release_reserved()
314 info = qxl_release_map(qdev, *release); in qxl_alloc_surface_release_reserved()
316 qxl_release_unmap(qdev, *release, info); in qxl_alloc_surface_release_reserved()
320 return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_surface_cmd), in qxl_alloc_surface_release_reserved()
324 int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size, in qxl_alloc_release_reserved() argument
345 idr_ret = qxl_release_alloc(qdev, type, release); in qxl_alloc_release_reserved()
352 mutex_lock(&qdev->release_mutex); in qxl_alloc_release_reserved()
353 if (qdev->current_release_bo_offset[cur_idx] + 1 >= releases_per_bo[cur_idx]) { in qxl_alloc_release_reserved()
354 qxl_bo_unref(&qdev->current_release_bo[cur_idx]); in qxl_alloc_release_reserved()
355 qdev->current_release_bo_offset[cur_idx] = 0; in qxl_alloc_release_reserved()
356 qdev->current_release_bo[cur_idx] = NULL; in qxl_alloc_release_reserved()
358 if (!qdev->current_release_bo[cur_idx]) { in qxl_alloc_release_reserved()
359 ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx]); in qxl_alloc_release_reserved()
361 mutex_unlock(&qdev->release_mutex); in qxl_alloc_release_reserved()
362 qxl_release_free(qdev, *release); in qxl_alloc_release_reserved()
367 bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]); in qxl_alloc_release_reserved()
369 …(*release)->release_offset = qdev->current_release_bo_offset[cur_idx] * release_size_per_bo[cur_id… in qxl_alloc_release_reserved()
370 qdev->current_release_bo_offset[cur_idx]++; in qxl_alloc_release_reserved()
375 mutex_unlock(&qdev->release_mutex); in qxl_alloc_release_reserved()
380 qxl_release_free(qdev, *release); in qxl_alloc_release_reserved()
384 info = qxl_release_map(qdev, *release); in qxl_alloc_release_reserved()
386 qxl_release_unmap(qdev, *release, info); in qxl_alloc_release_reserved()
391 struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev, in qxl_release_from_id_locked() argument
396 spin_lock(&qdev->release_idr_lock); in qxl_release_from_id_locked()
397 release = idr_find(&qdev->release_idr, id); in qxl_release_from_id_locked()
398 spin_unlock(&qdev->release_idr_lock); in qxl_release_from_id_locked()
407 union qxl_release_info *qxl_release_map(struct qxl_device *qdev, in qxl_release_map() argument
415 ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_SIZE); in qxl_release_map()
422 void qxl_release_unmap(struct qxl_device *qdev, in qxl_release_unmap() argument
431 qxl_bo_kunmap_atomic_page(qdev, bo, ptr); in qxl_release_unmap()
442 struct qxl_device *qdev; in qxl_release_fence_buffer_objects() local
451 qdev = container_of(bdev, struct qxl_device, mman.bdev); in qxl_release_fence_buffer_objects()
457 dma_fence_init(&release->base, &qxl_fence_ops, &qdev->release_lock, in qxl_release_fence_buffer_objects()