/drivers/gpu/drm/ |
D | drm_auth.c | 64 static bool drm_is_current_master_locked(struct drm_file *fpriv) in drm_is_current_master_locked() argument 66 lockdep_assert_once(lockdep_is_held(&fpriv->master_lookup_lock) || in drm_is_current_master_locked() 67 lockdep_is_held(&fpriv->minor->dev->master_mutex)); in drm_is_current_master_locked() 69 return fpriv->is_master && drm_lease_owner(fpriv->master) == fpriv->minor->dev->master; in drm_is_current_master_locked() 82 bool drm_is_current_master(struct drm_file *fpriv) in drm_is_current_master() argument 86 spin_lock(&fpriv->master_lookup_lock); in drm_is_current_master() 87 ret = drm_is_current_master_locked(fpriv); in drm_is_current_master() 88 spin_unlock(&fpriv->master_lookup_lock); in drm_is_current_master() 155 static void drm_set_master(struct drm_device *dev, struct drm_file *fpriv, in drm_set_master() argument 158 dev->master = drm_master_get(fpriv->master); in drm_set_master() [all …]
|
/drivers/gpu/drm/tegra/ |
D | uapi.c | 79 struct tegra_drm_file *fpriv = file->driver_priv; in tegra_drm_ioctl_channel_open() local 109 err = xa_alloc(&fpriv->contexts, &args->context, context, XA_LIMIT(1, U32_MAX), in tegra_drm_ioctl_channel_open() 135 struct tegra_drm_file *fpriv = file->driver_priv; in tegra_drm_ioctl_channel_close() local 139 mutex_lock(&fpriv->lock); in tegra_drm_ioctl_channel_close() 141 context = xa_load(&fpriv->contexts, args->context); in tegra_drm_ioctl_channel_close() 143 mutex_unlock(&fpriv->lock); in tegra_drm_ioctl_channel_close() 147 xa_erase(&fpriv->contexts, args->context); in tegra_drm_ioctl_channel_close() 149 mutex_unlock(&fpriv->lock); in tegra_drm_ioctl_channel_close() 158 struct tegra_drm_file *fpriv = file->driver_priv; in tegra_drm_ioctl_channel_map() local 167 mutex_lock(&fpriv->lock); in tegra_drm_ioctl_channel_map() [all …]
|
D | drm.c | 105 struct tegra_drm_file *fpriv; in tegra_drm_open() local 107 fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL); in tegra_drm_open() 108 if (!fpriv) in tegra_drm_open() 111 idr_init_base(&fpriv->legacy_contexts, 1); in tegra_drm_open() 112 xa_init_flags(&fpriv->contexts, XA_FLAGS_ALLOC1); in tegra_drm_open() 113 xa_init(&fpriv->syncpoints); in tegra_drm_open() 114 mutex_init(&fpriv->lock); in tegra_drm_open() 115 filp->driver_priv = fpriv; in tegra_drm_open() 428 static int tegra_client_open(struct tegra_drm_file *fpriv, in tegra_client_open() argument 438 err = idr_alloc(&fpriv->legacy_contexts, context, 1, 0, GFP_KERNEL); in tegra_client_open() [all …]
|
D | submit.c | 487 struct tegra_drm_file *fpriv = file->driver_priv; in tegra_drm_ioctl_channel_submit() local 497 mutex_lock(&fpriv->lock); in tegra_drm_ioctl_channel_submit() 499 context = xa_load(&fpriv->contexts, args->context); in tegra_drm_ioctl_channel_submit() 501 mutex_unlock(&fpriv->lock); in tegra_drm_ioctl_channel_submit() 551 job = submit_create_job(context, bo, args, job_data, &fpriv->syncpoints); in tegra_drm_ioctl_channel_submit() 625 mutex_unlock(&fpriv->lock); in tegra_drm_ioctl_channel_submit()
|
/drivers/gpu/drm/amd/amdgpu/ |
D | amdgpu_bo_list.c | 158 static void amdgpu_bo_list_destroy(struct amdgpu_fpriv *fpriv, int id) in amdgpu_bo_list_destroy() argument 162 mutex_lock(&fpriv->bo_list_lock); in amdgpu_bo_list_destroy() 163 list = idr_remove(&fpriv->bo_list_handles, id); in amdgpu_bo_list_destroy() 164 mutex_unlock(&fpriv->bo_list_lock); in amdgpu_bo_list_destroy() 169 int amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id, in amdgpu_bo_list_get() argument 173 *result = idr_find(&fpriv->bo_list_handles, id); in amdgpu_bo_list_get() 270 struct amdgpu_fpriv *fpriv = filp->driver_priv; in amdgpu_bo_list_ioctl() local 288 mutex_lock(&fpriv->bo_list_lock); in amdgpu_bo_list_ioctl() 289 r = idr_alloc(&fpriv->bo_list_handles, list, 1, 0, GFP_KERNEL); in amdgpu_bo_list_ioctl() 290 mutex_unlock(&fpriv->bo_list_lock); in amdgpu_bo_list_ioctl() [all …]
|
D | amdgpu_fdinfo.c | 57 struct amdgpu_fpriv *fpriv; in amdgpu_show_fdinfo() local 65 ret = amdgpu_file_to_fpriv(f, &fpriv); in amdgpu_show_fdinfo() 73 root = amdgpu_bo_ref(fpriv->vm.root.bo); in amdgpu_show_fdinfo() 82 amdgpu_vm_get_memory(&fpriv->vm, &vram_mem, >t_mem, &cpu_mem); in amdgpu_show_fdinfo() 87 dev, fn, fpriv->vm.pasid); in amdgpu_show_fdinfo() 98 total = amdgpu_ctx_mgr_fence_usage(&fpriv->ctx_mgr, in amdgpu_show_fdinfo()
|
D | amdgpu_sched.c | 68 struct amdgpu_fpriv *fpriv; in amdgpu_sched_process_priority_override() local 77 r = amdgpu_file_to_fpriv(f.file, &fpriv); in amdgpu_sched_process_priority_override() 83 mgr = &fpriv->ctx_mgr; in amdgpu_sched_process_priority_override() 99 struct amdgpu_fpriv *fpriv; in amdgpu_sched_context_priority_override() local 106 r = amdgpu_file_to_fpriv(f.file, &fpriv); in amdgpu_sched_context_priority_override() 112 ctx = amdgpu_ctx_get(fpriv, ctx_id); in amdgpu_sched_context_priority_override()
|
D | amdgpu_ctx.c | 235 struct amdgpu_fpriv *fpriv, in amdgpu_ctx_alloc() argument 240 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; in amdgpu_ctx_alloc() 285 static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id) in amdgpu_ctx_free() argument 287 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; in amdgpu_ctx_free() 299 struct amdgpu_fpriv *fpriv, uint32_t id, in amdgpu_ctx_query() argument 306 if (!fpriv) in amdgpu_ctx_query() 309 mgr = &fpriv->ctx_mgr; in amdgpu_ctx_query() 337 struct amdgpu_fpriv *fpriv, uint32_t id, in amdgpu_ctx_query2() argument 344 if (!fpriv) in amdgpu_ctx_query2() 347 mgr = &fpriv->ctx_mgr; in amdgpu_ctx_query2() [all …]
|
D | amdgpu_kms.c | 1168 struct amdgpu_fpriv *fpriv; in amdgpu_driver_open_kms() local 1186 fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL); in amdgpu_driver_open_kms() 1187 if (unlikely(!fpriv)) { in amdgpu_driver_open_kms() 1198 r = amdgpu_vm_init(adev, &fpriv->vm); in amdgpu_driver_open_kms() 1202 r = amdgpu_vm_set_pasid(adev, &fpriv->vm, pasid); in amdgpu_driver_open_kms() 1206 fpriv->prt_va = amdgpu_vm_bo_add(adev, &fpriv->vm, NULL); in amdgpu_driver_open_kms() 1207 if (!fpriv->prt_va) { in amdgpu_driver_open_kms() 1215 r = amdgpu_map_static_csa(adev, &fpriv->vm, adev->virt.csa_obj, in amdgpu_driver_open_kms() 1216 &fpriv->csa_va, csa_addr, AMDGPU_CSA_SIZE); in amdgpu_driver_open_kms() 1221 mutex_init(&fpriv->bo_list_lock); in amdgpu_driver_open_kms() [all …]
|
D | amdgpu_cs.c | 98 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; in amdgpu_cs_parser_init() local 99 struct amdgpu_vm *vm = &fpriv->vm; in amdgpu_cs_parser_init() 114 p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id); in amdgpu_cs_parser_init() 484 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; in amdgpu_cs_parser_bos() local 485 struct amdgpu_vm *vm = &fpriv->vm; in amdgpu_cs_parser_bos() 500 r = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle, in amdgpu_cs_parser_bos() 519 amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd); in amdgpu_cs_parser_bos() 584 r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm, in amdgpu_cs_parser_bos() 640 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; in amdgpu_cs_sync_rings() local 652 &fpriv->vm); in amdgpu_cs_sync_rings() [all …]
|
D | amdgpu_gem.c | 162 struct amdgpu_fpriv *fpriv = file_priv->driver_priv; in amdgpu_gem_object_open() local 163 struct amdgpu_vm *vm = &fpriv->vm; in amdgpu_gem_object_open() 195 struct amdgpu_fpriv *fpriv = file_priv->driver_priv; in amdgpu_gem_object_close() local 196 struct amdgpu_vm *vm = &fpriv->vm; in amdgpu_gem_object_close() 288 struct amdgpu_fpriv *fpriv = filp->driver_priv; in amdgpu_gem_create_ioctl() local 289 struct amdgpu_vm *vm = &fpriv->vm; in amdgpu_gem_create_ioctl() 671 struct amdgpu_fpriv *fpriv = filp->driver_priv; in amdgpu_gem_va_ioctl() local 746 amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd); in amdgpu_gem_va_ioctl() 753 bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo); in amdgpu_gem_va_ioctl() 759 bo_va = fpriv->prt_va; in amdgpu_gem_va_ioctl() [all …]
|
D | amdgpu_bo_list.h | 53 int amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id,
|
D | amdgpu_ctx.h | 67 struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
|
D | amdgpu_drv.c | 2524 struct amdgpu_fpriv *fpriv = file_priv->driver_priv; in amdgpu_flush() local 2527 timeout = amdgpu_ctx_mgr_entity_flush(&fpriv->ctx_mgr, timeout); in amdgpu_flush() 2528 timeout = amdgpu_vm_wait_idle(&fpriv->vm, timeout); in amdgpu_flush() 2550 int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv) in amdgpu_file_to_fpriv() argument 2562 *fpriv = file->driver_priv; in amdgpu_file_to_fpriv()
|
D | amdgpu_vm.c | 3285 struct amdgpu_fpriv *fpriv = filp->driver_priv; in amdgpu_vm_ioctl() local 3296 r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm, in amdgpu_vm_ioctl() 3308 r = amdgpu_bo_reserve(fpriv->vm.root.bo, true); in amdgpu_vm_ioctl() 3312 r = amdgpu_vm_wait_idle(&fpriv->vm, timeout); in amdgpu_vm_ioctl() 3316 amdgpu_bo_unreserve(fpriv->vm.root.bo); in amdgpu_vm_ioctl() 3317 amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB_0); in amdgpu_vm_ioctl()
|
/drivers/pci/ |
D | proc.c | 201 struct pci_filp_private *fpriv = file->private_data; in proc_bus_pci_ioctl() local 218 fpriv->mmap_state = pci_mmap_io; in proc_bus_pci_ioctl() 222 fpriv->mmap_state = pci_mmap_mem; in proc_bus_pci_ioctl() 228 fpriv->write_combine = 1; in proc_bus_pci_ioctl() 230 fpriv->write_combine = 0; in proc_bus_pci_ioctl() 248 struct pci_filp_private *fpriv = file->private_data; in proc_bus_pci_mmap() local 255 if (fpriv->mmap_state == pci_mmap_io) { in proc_bus_pci_mmap() 271 if (fpriv->mmap_state == pci_mmap_mem && in proc_bus_pci_mmap() 272 fpriv->write_combine) { in proc_bus_pci_mmap() 284 fpriv->mmap_state, write_combine); in proc_bus_pci_mmap() [all …]
|
/drivers/char/tpm/ |
D | tpmrm-dev.c | 37 struct file_priv *fpriv = file->private_data; in tpmrm_release() local 38 struct tpmrm_priv *priv = container_of(fpriv, struct tpmrm_priv, priv); in tpmrm_release() 40 tpm_common_release(file, fpriv); in tpmrm_release() 41 tpm2_del_space(fpriv->chip, &priv->space); in tpmrm_release()
|
/drivers/gpu/drm/i915/gem/ |
D | i915_gem_context.c | 276 static int proto_context_register_locked(struct drm_i915_file_private *fpriv, in proto_context_register_locked() argument 283 lockdep_assert_held(&fpriv->proto_context_lock); in proto_context_register_locked() 285 ret = xa_alloc(&fpriv->context_xa, id, NULL, xa_limit_32b, GFP_KERNEL); in proto_context_register_locked() 289 old = xa_store(&fpriv->proto_context_xa, *id, pc, GFP_KERNEL); in proto_context_register_locked() 291 xa_erase(&fpriv->context_xa, *id); in proto_context_register_locked() 299 static int proto_context_register(struct drm_i915_file_private *fpriv, in proto_context_register() argument 305 mutex_lock(&fpriv->proto_context_lock); in proto_context_register() 306 ret = proto_context_register_locked(fpriv, pc, id); in proto_context_register() 307 mutex_unlock(&fpriv->proto_context_lock); in proto_context_register() 312 static int set_proto_ctx_vm(struct drm_i915_file_private *fpriv, in set_proto_ctx_vm() argument [all …]
|
D | i915_gem_object.c | 117 struct drm_i915_file_private *fpriv = file->driver_priv; in i915_gem_close_object() local 127 if (ctx && ctx->file_priv == fpriv) { in i915_gem_close_object()
|
/drivers/gpu/drm/radeon/ |
D | radeon_kms.c | 651 struct radeon_fpriv *fpriv; in radeon_driver_open_kms() local 666 fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL); in radeon_driver_open_kms() 667 if (unlikely(!fpriv)) { in radeon_driver_open_kms() 673 vm = &fpriv->vm; in radeon_driver_open_kms() 698 file_priv->driver_priv = fpriv; in radeon_driver_open_kms() 708 kfree(fpriv); in radeon_driver_open_kms() 744 struct radeon_fpriv *fpriv = file_priv->driver_priv; in radeon_driver_postclose_kms() local 745 struct radeon_vm *vm = &fpriv->vm; in radeon_driver_postclose_kms() 758 kfree(fpriv); in radeon_driver_postclose_kms()
|
D | radeon_gem.c | 199 struct radeon_fpriv *fpriv = file_priv->driver_priv; in radeon_gem_object_open() local 200 struct radeon_vm *vm = &fpriv->vm; in radeon_gem_object_open() 230 struct radeon_fpriv *fpriv = file_priv->driver_priv; in radeon_gem_object_close() local 231 struct radeon_vm *vm = &fpriv->vm; in radeon_gem_object_close() 678 struct radeon_fpriv *fpriv = filp->driver_priv; in radeon_gem_va_ioctl() local 743 bo_va = radeon_vm_bo_find(&fpriv->vm, rbo); in radeon_gem_va_ioctl()
|
D | radeon_cs.c | 547 struct radeon_fpriv *fpriv = parser->filp->driver_priv; in radeon_cs_ib_vm_chunk() local 548 struct radeon_vm *vm = &fpriv->vm; in radeon_cs_ib_vm_chunk() 616 struct radeon_fpriv *fpriv = parser->filp->driver_priv; in radeon_cs_ib_fill() local 617 vm = &fpriv->vm; in radeon_cs_ib_fill()
|
/drivers/gpu/drm/i915/gem/selftests/ |
D | mock_context.c | 79 struct drm_i915_file_private *fpriv = to_drm_file(file)->driver_priv; in live_context() local 96 err = xa_alloc(&fpriv->context_xa, &id, NULL, xa_limit_32b, GFP_KERNEL); in live_context() 100 gem_context_register(ctx, fpriv, id); in live_context()
|
/drivers/gpu/drm/nouveau/ |
D | nouveau_drv.h | 126 nouveau_cli(struct drm_file *fpriv) in nouveau_cli() argument 128 return fpriv ? fpriv->driver_priv : NULL; in nouveau_cli()
|
D | nouveau_drm.c | 1092 nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv) in nouveau_drm_open() argument 1107 snprintf(name, sizeof(name), "%s[%d]", tmpname, pid_nr(fpriv->pid)); in nouveau_drm_open() 1118 fpriv->driver_priv = cli; in nouveau_drm_open() 1136 nouveau_drm_postclose(struct drm_device *dev, struct drm_file *fpriv) in nouveau_drm_postclose() argument 1138 struct nouveau_cli *cli = nouveau_cli(fpriv); in nouveau_drm_postclose()
|