/drivers/gpu/drm/ttm/ |
D | ttm_tt.c | 53 if (bo->ttm) in ttm_tt_create() 77 bo->ttm = bdev->driver->ttm_tt_create(bo, page_flags); in ttm_tt_create() 78 if (unlikely(bo->ttm == NULL)) in ttm_tt_create() 87 static int ttm_tt_alloc_page_directory(struct ttm_tt *ttm) in ttm_tt_alloc_page_directory() argument 89 ttm->pages = kvmalloc_array(ttm->num_pages, sizeof(void*), in ttm_tt_alloc_page_directory() 91 if (!ttm->pages) in ttm_tt_alloc_page_directory() 96 static int ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm) in ttm_dma_tt_alloc_page_directory() argument 98 ttm->ttm.pages = kvmalloc_array(ttm->ttm.num_pages, in ttm_dma_tt_alloc_page_directory() 99 sizeof(*ttm->ttm.pages) + in ttm_dma_tt_alloc_page_directory() 100 sizeof(*ttm->dma_address), in ttm_dma_tt_alloc_page_directory() [all …]
|
D | ttm_agp_backend.c | 46 struct ttm_tt ttm; member 51 int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem) in ttm_agp_bind() argument 53 struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm); in ttm_agp_bind() 63 mem = agp_allocate_memory(agp_be->bridge, ttm->num_pages, AGP_USER_MEMORY); in ttm_agp_bind() 68 for (i = 0; i < ttm->num_pages; i++) { in ttm_agp_bind() 69 struct page *page = ttm->pages[i]; in ttm_agp_bind() 89 void ttm_agp_unbind(struct ttm_tt *ttm) in ttm_agp_unbind() argument 91 struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm); in ttm_agp_unbind() 104 bool ttm_agp_is_bound(struct ttm_tt *ttm) in ttm_agp_is_bound() argument 106 struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm); in ttm_agp_is_bound() [all …]
|
D | ttm_page_alloc.c | 1029 ttm_pool_unpopulate_helper(struct ttm_tt *ttm, unsigned mem_count_update) in ttm_pool_unpopulate_helper() argument 1038 if (!ttm->pages[i]) in ttm_pool_unpopulate_helper() 1041 ttm_mem_global_free_page(mem_glob, ttm->pages[i], PAGE_SIZE); in ttm_pool_unpopulate_helper() 1045 ttm_put_pages(ttm->pages, ttm->num_pages, ttm->page_flags, in ttm_pool_unpopulate_helper() 1046 ttm->caching_state); in ttm_pool_unpopulate_helper() 1047 ttm_tt_set_unpopulated(ttm); in ttm_pool_unpopulate_helper() 1050 int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) in ttm_pool_populate() argument 1056 if (ttm_tt_is_populated(ttm)) in ttm_pool_populate() 1059 if (ttm_check_under_lowerlimit(mem_glob, ttm->num_pages, ctx)) in ttm_pool_populate() 1062 ret = ttm_get_pages(ttm->pages, ttm->num_pages, ttm->page_flags, in ttm_pool_populate() [all …]
|
D | ttm_bo_util.c | 57 struct ttm_tt *ttm = bo->ttm; in ttm_bo_move_ttm() local 75 ret = ttm_tt_set_placement_caching(ttm, new_mem->placement); in ttm_bo_move_ttm() 81 ret = ttm_tt_populate(bo->bdev, ttm, ctx); in ttm_bo_move_ttm() 173 static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src, in ttm_copy_io_ttm_page() argument 177 struct page *d = ttm->pages[page]; in ttm_copy_io_ttm_page() 195 static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, in ttm_copy_ttm_io_page() argument 199 struct page *s = ttm->pages[page]; in ttm_copy_ttm_io_page() 223 struct ttm_tt *ttm = bo->ttm; in ttm_bo_move_memcpy() local 255 (ttm == NULL || (!ttm_tt_is_populated(ttm) && in ttm_bo_move_memcpy() 256 !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) { in ttm_bo_move_memcpy() [all …]
|
D | ttm_page_alloc_dma.c | 838 struct ttm_tt *ttm = &ttm_dma->ttm; in ttm_dma_pool_get_pages() local 846 ttm->pages[index] = d_page->p; in ttm_dma_pool_get_pages() 858 struct ttm_tt *ttm = &ttm_dma->ttm; in ttm_dma_pool_gfp_flags() local 861 if (ttm->page_flags & TTM_PAGE_FLAG_DMA32) in ttm_dma_pool_gfp_flags() 865 if (ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC) in ttm_dma_pool_gfp_flags() 875 if (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY) in ttm_dma_pool_gfp_flags() 889 struct ttm_tt *ttm = &ttm_dma->ttm; in ttm_dma_populate() local 890 unsigned long num_pages = ttm->num_pages; in ttm_dma_populate() 897 if (ttm_tt_is_populated(ttm)) in ttm_dma_populate() 906 type = ttm_to_type(ttm->page_flags, ttm->caching_state); in ttm_dma_populate() [all …]
|
D | Makefile | 5 ttm-y := ttm_memory.o ttm_tt.o ttm_bo.o \ 9 ttm-$(CONFIG_AGP) += ttm_agp_backend.o 10 ttm-$(CONFIG_DRM_TTM_DMA_PAGE_POOL) += ttm_page_alloc_dma.o 12 obj-$(CONFIG_DRM_TTM) += ttm.o
|
D | ttm_bo_vm.c | 188 struct ttm_tt *ttm = bo->ttm; in ttm_bo_vm_insert_huge() local 199 pfn = page_to_pfn(ttm->pages[page_offset]); in ttm_bo_vm_insert_huge() 208 if (page_to_pfn(ttm->pages[page_offset + i]) != pfn + i) in ttm_bo_vm_insert_huge() 277 struct ttm_tt *ttm = NULL; in ttm_bo_vm_fault_reserved() local 288 if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) in ttm_bo_vm_fault_reserved() 342 ttm = bo->ttm; in ttm_bo_vm_fault_reserved() 343 if (ttm_tt_populate(bdev, bo->ttm, &ctx)) in ttm_bo_vm_fault_reserved() 363 page = ttm->pages[page_offset]; in ttm_bo_vm_fault_reserved()
|
D | ttm_bo.c | 127 if (man->use_tt && bo->ttm && in ttm_bo_add_mem_to_lru() 128 !(bo->ttm->page_flags & (TTM_PAGE_FLAG_SG | in ttm_bo_add_mem_to_lru() 178 if (bo->ttm && !(bo->ttm->page_flags & in ttm_bo_move_to_lru_tail() 258 ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement); in ttm_bo_handle_move_mem() 263 ret = ttm_tt_populate(bdev, bo->ttm, ctx); in ttm_bo_handle_move_mem() 1557 bo->ttm->caching_state != tt_cached) { in ttm_bo_swapout() 1589 ret = ttm_tt_swapout(bo->bdev, bo->ttm, bo->persistent_swap_storage); in ttm_bo_swapout() 1617 if (bo->ttm == NULL) in ttm_bo_tt_destroy() 1620 ttm_tt_destroy(bo->bdev, bo->ttm); in ttm_bo_tt_destroy() 1621 bo->ttm = NULL; in ttm_bo_tt_destroy() [all …]
|
/drivers/gpu/drm/radeon/ |
D | radeon_ttm.c | 60 struct ttm_tt *ttm, 147 if (radeon_ttm_tt_has_userptr(rdev, bo->ttm)) in radeon_verify_access() 235 r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement); in radeon_move_vram_ram() 240 r = ttm_tt_populate(bo->bdev, bo->ttm, &ctx); in radeon_move_vram_ram() 245 r = radeon_ttm_tt_bind(bo->bdev, bo->ttm, &tmp_mem); in radeon_move_vram_ram() 317 if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { in radeon_bo_move() 421 struct ttm_dma_tt ttm; member 431 static int radeon_ttm_tt_pin_userptr(struct ttm_bo_device *bdev, struct ttm_tt *ttm) in radeon_ttm_tt_pin_userptr() argument 434 struct radeon_ttm_tt *gtt = (void *)ttm; in radeon_ttm_tt_pin_userptr() 448 unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE; in radeon_ttm_tt_pin_userptr() [all …]
|
D | radeon_mn.c | 56 if (!bo->tbo.ttm || !radeon_ttm_tt_is_bound(bo->tbo.bdev, bo->tbo.ttm)) in radeon_mn_invalidate()
|
D | radeon_prime.c | 39 return drm_prime_pages_to_sg(obj->dev, bo->tbo.ttm->pages, npages); in radeon_gem_prime_get_sg_table() 134 if (radeon_ttm_tt_has_userptr(bo->rdev, bo->tbo.ttm)) in radeon_gem_prime_export()
|
/drivers/gpu/drm/nouveau/ |
D | nouveau_sgdma.c | 13 struct ttm_dma_tt ttm; member 18 nouveau_sgdma_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm) in nouveau_sgdma_destroy() argument 20 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; in nouveau_sgdma_destroy() 22 if (ttm) { in nouveau_sgdma_destroy() 23 nouveau_sgdma_unbind(bdev, ttm); in nouveau_sgdma_destroy() 24 ttm_tt_destroy_common(bdev, ttm); in nouveau_sgdma_destroy() 25 ttm_dma_tt_fini(&nvbe->ttm); in nouveau_sgdma_destroy() 31 nouveau_sgdma_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm, struct ttm_resource *reg) in nouveau_sgdma_bind() argument 33 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; in nouveau_sgdma_bind() 41 ret = nouveau_mem_host(reg, &nvbe->ttm); in nouveau_sgdma_bind() [all …]
|
D | nouveau_ttm.c | 165 ret = ttm_bo_mmap(filp, vma, &drm->ttm.bdev); in nouveau_ttm_mmap() 184 drm->ttm.type_host[!!kind] = typei; in nouveau_ttm_init_host() 190 drm->ttm.type_ncoh[!!kind] = typei; in nouveau_ttm_init_host() 207 ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_VRAM, man); in nouveau_ttm_init_vram() 211 return ttm_range_man_init(&drm->ttm.bdev, TTM_PL_VRAM, false, in nouveau_ttm_init_vram() 219 struct ttm_resource_manager *man = ttm_manager_type(&drm->ttm.bdev, TTM_PL_VRAM); in nouveau_ttm_fini_vram() 223 ttm_resource_manager_force_list_clean(&drm->ttm.bdev, man); in nouveau_ttm_fini_vram() 225 ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_VRAM, NULL); in nouveau_ttm_fini_vram() 228 ttm_range_man_fini(&drm->ttm.bdev, TTM_PL_VRAM); in nouveau_ttm_fini_vram() 243 return ttm_range_man_init(&drm->ttm.bdev, TTM_PL_TT, true, in nouveau_ttm_init_gtt() [all …]
|
D | nouveau_bo.c | 47 static int nouveau_ttm_tt_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm, 218 nvbo->bo.bdev = &drm->ttm.bdev; in nouveau_bo_alloc() 361 mmu->type[drm->ttm.type_vram].type & NVIF_MEM_UNCACHED) in set_placement_list() 592 struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm; in nouveau_bo_sync_for_device() 602 for (i = 0; i < ttm_dma->ttm.num_pages; i++) in nouveau_bo_sync_for_device() 612 struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm; in nouveau_bo_sync_for_cpu() 622 for (i = 0; i < ttm_dma->ttm.num_pages; i++) in nouveau_bo_sync_for_cpu() 632 mutex_lock(&drm->ttm.io_reserve_mutex); in nouveau_bo_add_io_reserve_lru() 633 list_move_tail(&nvbo->io_reserve_lru, &drm->ttm.io_reserve_lru); in nouveau_bo_add_io_reserve_lru() 634 mutex_unlock(&drm->ttm.io_reserve_mutex); in nouveau_bo_add_io_reserve_lru() [all …]
|
D | nouveau_ttm.h | 8 return container_of(bd, struct nouveau_drm, ttm.bdev); in nouveau_bdev() 25 int nouveau_sgdma_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm, struct ttm_resource *reg); 26 void nouveau_sgdma_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm); 27 void nouveau_sgdma_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
|
D | nouveau_mem.c | 107 type = drm->ttm.type_ncoh[!!mem->kind]; in nouveau_mem_host() 109 type = drm->ttm.type_host[0]; in nouveau_mem_host() 119 if (tt->ttm.sg) args.sgl = tt->ttm.sg->sgl; in nouveau_mem_host() 148 drm->ttm.type_vram, page, size, in nouveau_mem_vram() 156 drm->ttm.type_vram, page, size, in nouveau_mem_vram()
|
D | nouveau_drv.h | 174 } ttm; member 245 return !(mmu->type[drm->ttm.type_host[0]].type & NVIF_MEM_UNCACHED); in nouveau_drm_use_coherent_gpu_mapping()
|
/drivers/gpu/drm/amd/amdgpu/ |
D | amdgpu_ttm.c | 67 struct ttm_tt *ttm, 175 if (amdgpu_ttm_tt_get_usermm(bo->ttm)) in amdgpu_verify_access() 288 flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, mem); in amdgpu_ttm_map_buffer() 298 dma = container_of(bo->ttm, struct ttm_dma_tt, ttm); in amdgpu_ttm_map_buffer() 549 r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement); in amdgpu_move_vram_ram() 554 r = ttm_tt_populate(bo->bdev, bo->ttm, ctx); in amdgpu_move_vram_ram() 559 r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, &tmp_mem); in amdgpu_move_vram_ram() 670 if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { in amdgpu_bo_move() 814 struct ttm_dma_tt ttm; member 836 struct ttm_tt *ttm = bo->tbo.ttm; in amdgpu_ttm_tt_get_user_pages() local [all …]
|
D | amdgpu_ttm.h | 152 bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm); 159 static inline bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm) in amdgpu_ttm_tt_get_user_pages_done() argument 165 void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages); 168 bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm); 169 struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm); 170 bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, 172 bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm, 174 bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm); 175 bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm); 176 uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem); [all …]
|
D | amdgpu_gmc.c | 48 struct ttm_dma_tt *ttm; in amdgpu_gmc_get_pde_for_bo() local 52 ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm); in amdgpu_gmc_get_pde_for_bo() 53 *addr = ttm->dma_address[0]; in amdgpu_gmc_get_pde_for_bo() 62 *flags = amdgpu_ttm_tt_pde_flags(bo->tbo.ttm, &bo->tbo.mem); in amdgpu_gmc_get_pde_for_bo() 125 struct ttm_dma_tt *ttm; in amdgpu_gmc_agp_addr() local 127 if (bo->num_pages != 1 || bo->ttm->caching_state == tt_cached) in amdgpu_gmc_agp_addr() 130 ttm = container_of(bo->ttm, struct ttm_dma_tt, ttm); in amdgpu_gmc_agp_addr() 131 if (ttm->dma_address[0] + PAGE_SIZE >= adev->gmc.agp_size) in amdgpu_gmc_agp_addr() 134 return adev->gmc.agp_start + ttm->dma_address[0]; in amdgpu_gmc_agp_addr()
|
D | amdgpu_amdkfd_gpuvm.c | 318 if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm), in amdgpu_amdkfd_bo_validate() 569 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages); in init_user_pages() 587 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm); in init_user_pages() 1230 bo->tbo.ttm->sg = sg; in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu() 1388 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) { in amdgpu_amdkfd_gpuvm_map_memory_to_gpu() 1413 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && in amdgpu_amdkfd_gpuvm_map_memory_to_gpu() 1435 !amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) { in amdgpu_amdkfd_gpuvm_map_memory_to_gpu() 1473 if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->pin_count) in amdgpu_amdkfd_gpuvm_map_memory_to_gpu() 1552 !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && !mem->bo->pin_count) in amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu() 1586 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) { in amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel() [all …]
|
D | amdgpu_cs.c | 65 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) in amdgpu_cs_user_fence_chunk() 472 usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm); in amdgpu_cs_list_validate() 476 if (amdgpu_ttm_tt_is_userptr(bo->tbo.ttm) && in amdgpu_cs_list_validate() 484 amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, in amdgpu_cs_list_validate() 550 e->user_pages = kvmalloc_array(bo->tbo.ttm->num_pages, in amdgpu_cs_parser_bos() 565 for (i = 0; i < bo->tbo.ttm->num_pages; i++) { in amdgpu_cs_parser_bos() 566 if (bo->tbo.ttm->pages[i] != e->user_pages[i]) { in amdgpu_cs_parser_bos() 1225 r |= !amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm); in amdgpu_cs_submit()
|
/drivers/gpu/drm/vmwgfx/ |
D | vmwgfx_ttm_buffer.c | 441 vsgt->pages = vmw_tt->dma_ttm.ttm.pages; in vmw_ttm_map_dma() 442 vsgt->num_pages = vmw_tt->dma_ttm.ttm.num_pages; in vmw_ttm_map_dma() 552 container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); in vmw_bo_sg_table() 559 struct ttm_tt *ttm, struct ttm_resource *bo_mem) in vmw_ttm_bind() argument 562 container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); in vmw_ttm_bind() 581 ttm->num_pages, vmw_be->gmr_id); in vmw_ttm_bind() 586 vmw_mob_create(ttm->num_pages); in vmw_ttm_bind() 592 &vmw_be->vsgt, ttm->num_pages, in vmw_ttm_bind() 603 struct ttm_tt *ttm) in vmw_ttm_unbind() argument 606 container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); in vmw_ttm_unbind() [all …]
|
D | vmwgfx_blit.c | 467 if (!ttm_tt_is_populated(dst->ttm)) { in vmw_bo_cpu_blit() 468 ret = dst->bdev->driver->ttm_tt_populate(dst->bdev, dst->ttm, &ctx); in vmw_bo_cpu_blit() 473 if (!ttm_tt_is_populated(src->ttm)) { in vmw_bo_cpu_blit() 474 ret = src->bdev->driver->ttm_tt_populate(src->bdev, src->ttm, &ctx); in vmw_bo_cpu_blit() 483 d.dst_pages = dst->ttm->pages; in vmw_bo_cpu_blit() 484 d.src_pages = src->ttm->pages; in vmw_bo_cpu_blit()
|
/drivers/gpu/drm/qxl/ |
D | qxl_ttm.c | 102 struct ttm_tt ttm; member 108 struct ttm_tt *ttm, in qxl_ttm_backend_bind() argument 111 struct qxl_ttm_tt *gtt = (void *)ttm; in qxl_ttm_backend_bind() 114 if (!ttm->num_pages) { in qxl_ttm_backend_bind() 116 ttm->num_pages, bo_mem, ttm); in qxl_ttm_backend_bind() 123 struct ttm_tt *ttm) in qxl_ttm_backend_unbind() argument 129 struct ttm_tt *ttm) in qxl_ttm_backend_destroy() argument 131 struct qxl_ttm_tt *gtt = (void *)ttm; in qxl_ttm_backend_destroy() 133 ttm_tt_destroy_common(bdev, ttm); in qxl_ttm_backend_destroy() 134 ttm_tt_fini(>t->ttm); in qxl_ttm_backend_destroy() [all …]
|