/kernel/linux/linux-5.10/drivers/gpu/drm/qxl/ |
D | qxl_object.c | 30 static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo) in qxl_ttm_bo_destroy() argument 35 bo = to_qxl_bo(tbo); in qxl_ttm_bo_destroy() 36 qdev = to_qxl(bo->tbo.base.dev); in qxl_ttm_bo_destroy() 43 drm_gem_object_release(&bo->tbo.base); in qxl_ttm_bo_destroy() 62 if (qbo->tbo.base.size <= PAGE_SIZE) in qxl_ttm_placement_from_domain() 124 r = drm_gem_object_init(&qdev->ddev, &bo->tbo.base, size); in qxl_bo_create() 129 bo->tbo.base.funcs = &qxl_object_funcs; in qxl_bo_create() 140 bo->tbo.priority = priority; in qxl_bo_create() 141 r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type, in qxl_bo_create() 166 r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); in qxl_bo_kmap() [all …]
|
D | qxl_object.h | 34 r = ttm_bo_reserve(&bo->tbo, true, false, NULL); in qxl_bo_reserve() 37 struct drm_device *ddev = bo->tbo.base.dev; in qxl_bo_reserve() 48 ttm_bo_unreserve(&bo->tbo); in qxl_bo_unreserve() 53 return bo->tbo.num_pages << PAGE_SHIFT; in qxl_bo_size() 58 return drm_vma_node_offset_addr(&bo->tbo.base.vma_node); in qxl_bo_mmap_offset() 66 r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL); in qxl_bo_wait() 69 struct drm_device *ddev = bo->tbo.base.dev; in qxl_bo_wait() 77 *mem_type = bo->tbo.mem.mem_type; in qxl_bo_wait() 79 r = ttm_bo_wait(&bo->tbo, true, no_wait); in qxl_bo_wait() 80 ttm_bo_unreserve(&bo->tbo); in qxl_bo_wait()
|
D | qxl_gem.c | 35 struct ttm_buffer_object *tbo; in qxl_gem_object_free() local 41 tbo = &qobj->tbo; in qxl_gem_object_free() 42 ttm_bo_put(tbo); in qxl_gem_object_free() 66 *obj = &qbo->tbo.base; in qxl_gem_object_create()
|
D | qxl_release.c | 215 if (entry->tv.bo == &bo->tbo) in qxl_release_list_add() 224 entry->tv.bo = &bo->tbo; in qxl_release_list_add() 237 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); in qxl_release_validate_bo() 242 ret = dma_resv_reserve_shared(bo->tbo.base.resv, 1); in qxl_release_validate_bo() 247 ret = qxl_bo_check_id(to_qxl(bo->tbo.base.dev), bo); in qxl_release_validate_bo()
|
D | qxl_drv.h | 75 struct ttm_buffer_object tbo; member 97 #define gem_to_qxl_bo(gobj) container_of((gobj), struct qxl_bo, tbo.base) 98 #define to_qxl_bo(tobj) container_of((tobj), struct qxl_bo, tbo) 306 (bo->tbo.mem.mem_type == TTM_PL_VRAM) in qxl_bo_physical_address() 311 return slot->high_bits | ((bo->tbo.mem.start << PAGE_SHIFT) + offset); in qxl_bo_physical_address()
|
D | qxl_debugfs.c | 64 fobj = rcu_dereference(bo->tbo.base.resv->fence); in qxl_debugfs_buffers_info() 69 (unsigned long)bo->tbo.base.size, in qxl_debugfs_buffers_info()
|
/kernel/linux/linux-5.10/drivers/gpu/drm/amd/amdgpu/ |
D | amdgpu_object.c | 65 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); in amdgpu_bo_subtract_pin_size() 67 if (bo->tbo.mem.mem_type == TTM_PL_VRAM) { in amdgpu_bo_subtract_pin_size() 71 } else if (bo->tbo.mem.mem_type == TTM_PL_TT) { in amdgpu_bo_subtract_pin_size() 76 static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo) in amdgpu_bo_destroy() argument 78 struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); in amdgpu_bo_destroy() 79 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo); in amdgpu_bo_destroy() 86 if (bo->tbo.base.import_attach) in amdgpu_bo_destroy() 87 drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg); in amdgpu_bo_destroy() 88 drm_gem_object_release(&bo->tbo.base); in amdgpu_bo_destroy() 128 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); in amdgpu_bo_placement_from_domain() [all …]
|
D | amdgpu_dma_buf.c | 58 ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, in amdgpu_gem_prime_vmap() 95 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); in amdgpu_gem_prime_mmap() 109 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) || in amdgpu_gem_prime_mmap() 181 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); in amdgpu_dma_buf_attach() 202 r = __dma_resv_make_exclusive(bo->tbo.base.resv); in amdgpu_dma_buf_attach() 224 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); in amdgpu_dma_buf_detach() 248 if (bo->tbo.moving) { in amdgpu_dma_buf_pin() 249 r = dma_fence_wait(bo->tbo.moving, true); in amdgpu_dma_buf_pin() 292 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); in amdgpu_dma_buf_map() 307 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); in amdgpu_dma_buf_map() [all …]
|
D | amdgpu_object.h | 89 struct ttm_buffer_object tbo; member 117 static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo) in ttm_to_amdgpu_bo() argument 119 return container_of(tbo, struct amdgpu_bo, tbo); in ttm_to_amdgpu_bo() 160 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); in amdgpu_bo_reserve() 163 r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL); in amdgpu_bo_reserve() 174 ttm_bo_unreserve(&bo->tbo); in amdgpu_bo_unreserve() 179 return bo->tbo.num_pages << PAGE_SHIFT; in amdgpu_bo_size() 184 return (bo->tbo.num_pages << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE; in amdgpu_bo_ngpu_pages() 189 return (bo->tbo.mem.page_alignment << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE; in amdgpu_bo_gpu_page_alignment() 200 return drm_vma_node_offset_addr(&bo->tbo.base.vma_node); in amdgpu_bo_mmap_offset() [all …]
|
D | amdgpu_gem.c | 75 *obj = &bo->tbo.base; in amdgpu_gem_object_create() 112 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); in amdgpu_gem_object_open() 119 mm = amdgpu_ttm_tt_get_usermm(abo->tbo.ttm); in amdgpu_gem_object_open() 124 abo->tbo.base.resv != vm->root.base.bo->tbo.base.resv) in amdgpu_gem_object_open() 145 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); in amdgpu_gem_object_close() 160 tv.bo = &bo->tbo; in amdgpu_gem_object_close() 180 fence = dma_resv_get_excl(bo->tbo.base.resv); in amdgpu_gem_object_close() 255 resv = vm->root.base.bo->tbo.base.resv; in amdgpu_gem_create_ioctl() 340 r = amdgpu_ttm_tt_set_userptr(&bo->tbo, args->addr, args->flags); in amdgpu_gem_userptr_ioctl() 349 r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages); in amdgpu_gem_userptr_ioctl() [all …]
|
D | amdgpu_amdkfd_gpuvm.c | 200 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); in amdgpu_amdkfd_unreserve_memory_limit() 225 struct dma_resv *resv = bo->tbo.base.resv; in amdgpu_amdkfd_remove_eviction_fence() 304 BUG_ON(!dma_resv_trylock(bo->tbo.base.resv)); in amdgpu_amdkfd_remove_fence_on_pt_pd_bos() 306 dma_resv_unlock(bo->tbo.base.resv); in amdgpu_amdkfd_remove_fence_on_pt_pd_bos() 318 if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm), in amdgpu_amdkfd_bo_validate() 324 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); in amdgpu_amdkfd_bo_validate() 349 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); in vm_validate_pt_pd_bos() 380 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); in vm_update_pds() 392 struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev); in get_pte_flags() 444 unsigned long bo_size = bo->tbo.mem.size; in add_bo_to_vm() [all …]
|
D | amdgpu_gtt_mgr.c | 34 struct ttm_buffer_object *tbo; member 177 struct ttm_buffer_object *tbo, in amdgpu_gtt_mgr_new() argument 186 if ((&tbo->mem == mem || tbo->mem.mem_type != TTM_PL_TT) && in amdgpu_gtt_mgr_new() 206 node->tbo = tbo; in amdgpu_gtt_mgr_new() 280 r = amdgpu_ttm_recover_gart(node->tbo); in amdgpu_gtt_mgr_recover()
|
D | amdgpu_mn.c | 68 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); in amdgpu_mn_invalidate_gfx() 78 r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, true, false, in amdgpu_mn_invalidate_gfx() 105 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); in amdgpu_mn_invalidate_hsa()
|
D | amdgpu_vm.c | 229 if (bo->tbo.type == ttm_bo_type_kernel) in amdgpu_vm_bo_evicted() 331 if (bo->tbo.base.resv != vm->root.base.bo->tbo.base.resv) in amdgpu_vm_bo_base_init() 335 if (bo->tbo.type == ttm_bo_type_kernel && bo->parent) in amdgpu_vm_bo_base_init() 341 amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type)) in amdgpu_vm_bo_base_init() 589 entry->tv.bo = &vm->root.base.bo->tbo; in amdgpu_vm_get_pd_bo() 621 if (abo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) in amdgpu_vm_del_from_lru_notify() 656 ttm_bo_move_to_lru_tail(&bo->tbo, &vm->lru_bulk_move); in amdgpu_vm_move_to_lru_tail() 658 ttm_bo_move_to_lru_tail(&bo->shadow->tbo, in amdgpu_vm_move_to_lru_tail() 695 if (bo->tbo.type != ttm_bo_type_kernel) { in amdgpu_vm_validate_pt_bos() 788 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); in amdgpu_vm_clear_bo() [all …]
|
D | amdgpu_gmc.c | 47 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); in amdgpu_gmc_get_pde_for_bo() 50 switch (bo->tbo.mem.mem_type) { in amdgpu_gmc_get_pde_for_bo() 52 ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm); in amdgpu_gmc_get_pde_for_bo() 62 *flags = amdgpu_ttm_tt_pde_flags(bo->tbo.ttm, &bo->tbo.mem); in amdgpu_gmc_get_pde_for_bo() 72 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); in amdgpu_gmc_pd_addr()
|
D | amdgpu_cs.c | 56 p->uf_entry.tv.bo = &bo->tbo; in amdgpu_cs_user_fence_chunk() 68 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) { in amdgpu_cs_user_fence_chunk() 403 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); in amdgpu_cs_bo_validate() 407 .resv = bo->tbo.base.resv, in amdgpu_cs_bo_validate() 420 (!bo->tbo.base.dma_buf || in amdgpu_cs_bo_validate() 421 list_empty(&bo->tbo.base.dma_buf->attachments))) { in amdgpu_cs_bo_validate() 441 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); in amdgpu_cs_bo_validate() 482 usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm); in amdgpu_cs_list_validate() 486 if (amdgpu_ttm_tt_is_userptr(bo->tbo.ttm) && in amdgpu_cs_list_validate() 490 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); in amdgpu_cs_list_validate() [all …]
|
D | amdgpu_vm_cpu.c | 79 if (bo->tbo.moving) { in amdgpu_vm_cpu_update() 80 r = dma_fence_wait(bo->tbo.moving, true); in amdgpu_vm_cpu_update()
|
D | amdgpu_ttm.c | 177 return drm_vma_node_verify_access(&abo->tbo.base.vma_node, in amdgpu_verify_access() 836 struct ttm_tt *ttm = bo->tbo.ttm; in amdgpu_ttm_tt_get_user_pages() 1060 struct ttm_buffer_object *tbo, in amdgpu_ttm_gart_bind() argument 1063 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(tbo); in amdgpu_ttm_gart_bind() 1064 struct ttm_tt *ttm = tbo->ttm; in amdgpu_ttm_gart_bind() 1229 int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo) in amdgpu_ttm_recover_gart() argument 1231 struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); in amdgpu_ttm_recover_gart() 1235 if (!tbo->ttm) in amdgpu_ttm_recover_gart() 1238 flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, &tbo->mem); in amdgpu_ttm_recover_gart() 1239 r = amdgpu_ttm_gart_bind(adev, tbo, flags); in amdgpu_ttm_recover_gart() [all …]
|
D | amdgpu_vm_sdma.c | 40 r = amdgpu_ttm_alloc_gart(&table->tbo); in amdgpu_vm_sdma_map_table() 45 r = amdgpu_ttm_alloc_gart(&table->shadow->tbo); in amdgpu_vm_sdma_map_table() 211 r = amdgpu_sync_fence(&p->job->sync, bo->tbo.moving); in amdgpu_vm_sdma_update()
|
/kernel/linux/linux-5.10/drivers/gpu/drm/radeon/ |
D | radeon_object.c | 57 u64 size = (u64)bo->tbo.num_pages << PAGE_SHIFT; in radeon_update_memory_usage() 75 static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo) in radeon_ttm_bo_destroy() argument 79 bo = container_of(tbo, struct radeon_bo, tbo); in radeon_ttm_bo_destroy() 81 radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1); in radeon_ttm_bo_destroy() 88 if (bo->tbo.base.import_attach) in radeon_ttm_bo_destroy() 89 drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg); in radeon_ttm_bo_destroy() 90 drm_gem_object_release(&bo->tbo.base); in radeon_ttm_bo_destroy() 212 drm_gem_private_object_init(rdev->ddev, &bo->tbo.base, size); in radeon_bo_create() 262 r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type, in radeon_bo_create() 287 r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); in radeon_bo_kmap() [all …]
|
D | radeon_prime.c | 37 int npages = bo->tbo.num_pages; in radeon_gem_prime_get_sg_table() 39 return drm_prime_pages_to_sg(obj->dev, bo->tbo.ttm->pages, npages); in radeon_gem_prime_get_sg_table() 47 ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, in radeon_gem_prime_vmap() 83 return &bo->tbo.base; in radeon_gem_prime_import_sg_table() 100 if (bo->tbo.moving) { in radeon_gem_prime_pin() 101 ret = dma_fence_wait(bo->tbo.moving, false); in radeon_gem_prime_pin() 134 if (radeon_ttm_tt_has_userptr(bo->rdev, bo->tbo.ttm)) in radeon_gem_prime_export()
|
D | radeon_object.h | 68 r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL); in radeon_bo_reserve() 79 ttm_bo_unreserve(&bo->tbo); in radeon_bo_unreserve() 96 rdev = radeon_get_rdev(bo->tbo.bdev); in radeon_bo_gpu_offset() 98 switch (bo->tbo.mem.mem_type) { in radeon_bo_gpu_offset() 107 return (bo->tbo.mem.start << PAGE_SHIFT) + start; in radeon_bo_gpu_offset() 112 return bo->tbo.num_pages << PAGE_SHIFT; in radeon_bo_size() 117 return (bo->tbo.num_pages << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE; in radeon_bo_ngpu_pages() 122 return (bo->tbo.mem.page_alignment << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE; in radeon_bo_gpu_page_alignment() 133 return drm_vma_node_offset_addr(&bo->tbo.base.vma_node); in radeon_bo_mmap_offset()
|
D | radeon_mn.c | 56 if (!bo->tbo.ttm || !radeon_ttm_tt_is_bound(bo->tbo.bdev, bo->tbo.ttm)) in radeon_mn_invalidate() 68 r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, true, false, in radeon_mn_invalidate() 74 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); in radeon_mn_invalidate()
|
D | radeon_gem.c | 87 *obj = &robj->tbo.base; in radeon_gem_object_create() 118 r = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ); in radeon_gem_set_domain() 334 r = radeon_ttm_tt_set_userptr(rdev, bo->tbo.ttm, args->addr, args->flags); in radeon_gem_userptr_ioctl() 353 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); in radeon_gem_userptr_ioctl() 423 if (radeon_ttm_tt_has_userptr(robj->rdev, robj->tbo.ttm)) { in radeon_mode_dumb_mmap() 455 r = dma_resv_test_signaled_rcu(robj->tbo.base.resv, true); in radeon_gem_busy_ioctl() 461 cur_placement = READ_ONCE(robj->tbo.mem.mem_type); in radeon_gem_busy_ioctl() 484 ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ); in radeon_gem_wait_idle_ioctl() 491 cur_placement = READ_ONCE(robj->tbo.mem.mem_type); in radeon_gem_wait_idle_ioctl() 562 tv.bo = &bo_va->bo->tbo; in radeon_gem_va_update_vm() [all …]
|
D | radeon_cs.c | 163 if (radeon_ttm_tt_has_userptr(p->rdev, p->relocs[i].robj->tbo.ttm)) { in radeon_cs_parser_relocs() 186 p->relocs[i].tv.bo = &p->relocs[i].robj->tbo; in radeon_cs_parser_relocs() 260 resv = reloc->robj->tbo.base.resv; in radeon_cs_sync_rings() 404 return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages; in cmp_size_smaller_first() 446 drm_gem_object_put(&bo->tbo.base); in radeon_cs_parser_fini() 518 &rdev->ring_tmp_bo.bo->tbo.mem); in radeon_bo_vm_update_pte() 532 r = radeon_vm_bo_update(rdev, bo_va, &bo->tbo.mem); in radeon_bo_vm_update_pte()
|