/drivers/staging/media/atomisp/pci/hmm/ |
D | hmm_bo.c | 58 static int __bo_init(struct hmm_bo_device *bdev, struct hmm_buffer_object *bo, in __bo_init() argument 70 memset(bo, 0, sizeof(*bo)); in __bo_init() 71 mutex_init(&bo->mutex); in __bo_init() 74 INIT_LIST_HEAD(&bo->list); in __bo_init() 76 bo->bdev = bdev; in __bo_init() 77 bo->vmap_addr = NULL; in __bo_init() 78 bo->status = HMM_BO_FREE; in __bo_init() 79 bo->start = bdev->start; in __bo_init() 80 bo->pgnr = pgnr; in __bo_init() 81 bo->end = bo->start + pgnr_to_size(pgnr); in __bo_init() [all …]
|
D | hmm.c | 58 struct hmm_buffer_object *bo; in bo_show() local 73 list_for_each_entry(bo, bo_list, list) { in bo_show() 74 if ((active && (bo->status & HMM_BO_ALLOCED)) || in bo_show() 75 (!active && !(bo->status & HMM_BO_ALLOCED))) { in bo_show() 78 hmm_bo_type_string[bo->type], bo->pgnr); in bo_show() 80 total[bo->type] += bo->pgnr; in bo_show() 81 count[bo->type]++; in bo_show() 229 struct hmm_buffer_object *bo; in hmm_alloc() local 246 bo = hmm_bo_alloc(&bo_device, pgnr); in hmm_alloc() 247 if (!bo) { in hmm_alloc() [all …]
|
/drivers/gpu/drm/ttm/ |
D | ttm_bo.c | 48 static void ttm_bo_default_destroy(struct ttm_buffer_object *bo) in ttm_bo_default_destroy() argument 50 kfree(bo); in ttm_bo_default_destroy() 53 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, in ttm_bo_mem_space_debug() argument 61 bo, bo->resource->num_pages, bo->base.size >> 10, in ttm_bo_mem_space_debug() 62 bo->base.size >> 20); in ttm_bo_mem_space_debug() 67 man = ttm_manager_type(bo->bdev, mem_type); in ttm_bo_mem_space_debug() 72 static void ttm_bo_del_from_lru(struct ttm_buffer_object *bo) in ttm_bo_del_from_lru() argument 74 struct ttm_device *bdev = bo->bdev; in ttm_bo_del_from_lru() 76 list_del_init(&bo->lru); in ttm_bo_del_from_lru() 79 bdev->funcs->del_from_lru_notify(bo); in ttm_bo_del_from_lru() [all …]
|
D | ttm_bo_util.c | 47 struct ttm_buffer_object *bo; member 89 void ttm_move_memcpy(struct ttm_buffer_object *bo, in ttm_move_memcpy() argument 96 struct ttm_tt *ttm = bo->ttm; in ttm_move_memcpy() 135 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, in ttm_bo_move_memcpy() argument 139 struct ttm_device *bdev = bo->bdev; in ttm_bo_move_memcpy() 141 ttm_manager_type(bo->bdev, dst_mem->mem_type); in ttm_bo_move_memcpy() 142 struct ttm_tt *ttm = bo->ttm; in ttm_bo_move_memcpy() 143 struct ttm_resource *src_mem = bo->resource; in ttm_bo_move_memcpy() 162 dst_iter = ttm_kmap_iter_tt_init(&_dst_iter.tt, bo->ttm); in ttm_bo_move_memcpy() 168 src_iter = ttm_kmap_iter_tt_init(&_src_iter.tt, bo->ttm); in ttm_bo_move_memcpy() [all …]
|
D | ttm_bo_vm.c | 46 static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, in ttm_bo_vm_fault_idle() argument 52 if (likely(!bo->moving)) in ttm_bo_vm_fault_idle() 58 if (dma_fence_is_signaled(bo->moving)) in ttm_bo_vm_fault_idle() 71 ttm_bo_get(bo); in ttm_bo_vm_fault_idle() 73 (void) dma_fence_wait(bo->moving, true); in ttm_bo_vm_fault_idle() 74 dma_resv_unlock(bo->base.resv); in ttm_bo_vm_fault_idle() 75 ttm_bo_put(bo); in ttm_bo_vm_fault_idle() 82 err = dma_fence_wait(bo->moving, true); in ttm_bo_vm_fault_idle() 90 dma_fence_put(bo->moving); in ttm_bo_vm_fault_idle() 91 bo->moving = NULL; in ttm_bo_vm_fault_idle() [all …]
|
/drivers/gpu/drm/qxl/ |
D | qxl_object.c | 32 static int __qxl_bo_pin(struct qxl_bo *bo); 33 static void __qxl_bo_unpin(struct qxl_bo *bo); 37 struct qxl_bo *bo; in qxl_ttm_bo_destroy() local 40 bo = to_qxl_bo(tbo); in qxl_ttm_bo_destroy() 41 qdev = to_qxl(bo->tbo.base.dev); in qxl_ttm_bo_destroy() 43 qxl_surface_evict(qdev, bo, false); in qxl_ttm_bo_destroy() 44 WARN_ON_ONCE(bo->map_count > 0); in qxl_ttm_bo_destroy() 46 list_del_init(&bo->list); in qxl_ttm_bo_destroy() 48 drm_gem_object_release(&bo->tbo.base); in qxl_ttm_bo_destroy() 49 kfree(bo); in qxl_ttm_bo_destroy() [all …]
|
D | qxl_release.c | 124 struct qxl_bo *bo; in qxl_release_free_list() local 128 bo = to_qxl_bo(entry->tv.bo); in qxl_release_free_list() 129 qxl_bo_unref(&bo); in qxl_release_free_list() 163 struct qxl_bo **bo, in qxl_release_bo_alloc() argument 168 QXL_GEM_DOMAIN_VRAM, priority, NULL, bo); in qxl_release_bo_alloc() 171 int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo) in qxl_release_list_add() argument 176 if (entry->tv.bo == &bo->tbo) in qxl_release_list_add() 184 qxl_bo_ref(bo); in qxl_release_list_add() 185 entry->tv.bo = &bo->tbo; in qxl_release_list_add() 191 static int qxl_release_validate_bo(struct qxl_bo *bo) in qxl_release_validate_bo() argument [all …]
|
D | qxl_object.h | 30 static inline int qxl_bo_reserve(struct qxl_bo *bo) in qxl_bo_reserve() argument 34 r = ttm_bo_reserve(&bo->tbo, true, false, NULL); in qxl_bo_reserve() 37 struct drm_device *ddev = bo->tbo.base.dev; in qxl_bo_reserve() 39 dev_err(ddev->dev, "%p reserve failed\n", bo); in qxl_bo_reserve() 46 static inline void qxl_bo_unreserve(struct qxl_bo *bo) in qxl_bo_unreserve() argument 48 ttm_bo_unreserve(&bo->tbo); in qxl_bo_unreserve() 51 static inline unsigned long qxl_bo_size(struct qxl_bo *bo) in qxl_bo_size() argument 53 return bo->tbo.base.size; in qxl_bo_size() 62 int qxl_bo_vmap(struct qxl_bo *bo, struct dma_buf_map *map); 63 int qxl_bo_vmap_locked(struct qxl_bo *bo, struct dma_buf_map *map); [all …]
|
/drivers/gpu/drm/radeon/ |
D | radeon_object.c | 45 static void radeon_bo_clear_surface_reg(struct radeon_bo *bo); 52 static void radeon_update_memory_usage(struct ttm_buffer_object *bo, in radeon_update_memory_usage() argument 55 struct radeon_device *rdev = radeon_get_rdev(bo->bdev); in radeon_update_memory_usage() 60 atomic64_add(bo->base.size, &rdev->gtt_usage); in radeon_update_memory_usage() 62 atomic64_sub(bo->base.size, &rdev->gtt_usage); in radeon_update_memory_usage() 66 atomic64_add(bo->base.size, &rdev->vram_usage); in radeon_update_memory_usage() 68 atomic64_sub(bo->base.size, &rdev->vram_usage); in radeon_update_memory_usage() 75 struct radeon_bo *bo; in radeon_ttm_bo_destroy() local 77 bo = container_of(tbo, struct radeon_bo, tbo); in radeon_ttm_bo_destroy() 79 mutex_lock(&bo->rdev->gem.mutex); in radeon_ttm_bo_destroy() [all …]
|
D | radeon_prime.c | 37 struct radeon_bo *bo = gem_to_radeon_bo(obj); in radeon_gem_prime_get_sg_table() local 39 return drm_prime_pages_to_sg(obj->dev, bo->tbo.ttm->pages, in radeon_gem_prime_get_sg_table() 40 bo->tbo.ttm->num_pages); in radeon_gem_prime_get_sg_table() 49 struct radeon_bo *bo; in radeon_gem_prime_import_sg_table() local 54 RADEON_GEM_DOMAIN_GTT, 0, sg, resv, &bo); in radeon_gem_prime_import_sg_table() 59 bo->tbo.base.funcs = &radeon_gem_object_funcs; in radeon_gem_prime_import_sg_table() 62 list_add_tail(&bo->list, &rdev->gem.objects); in radeon_gem_prime_import_sg_table() 65 bo->prime_shared_count = 1; in radeon_gem_prime_import_sg_table() 66 return &bo->tbo.base; in radeon_gem_prime_import_sg_table() 71 struct radeon_bo *bo = gem_to_radeon_bo(obj); in radeon_gem_prime_pin() local [all …]
|
D | radeon_object.h | 64 static inline int radeon_bo_reserve(struct radeon_bo *bo, bool no_intr) in radeon_bo_reserve() argument 68 r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL); in radeon_bo_reserve() 71 dev_err(bo->rdev->dev, "%p reserve failed\n", bo); in radeon_bo_reserve() 77 static inline void radeon_bo_unreserve(struct radeon_bo *bo) in radeon_bo_unreserve() argument 79 ttm_bo_unreserve(&bo->tbo); in radeon_bo_unreserve() 91 static inline u64 radeon_bo_gpu_offset(struct radeon_bo *bo) in radeon_bo_gpu_offset() argument 96 rdev = radeon_get_rdev(bo->tbo.bdev); in radeon_bo_gpu_offset() 98 switch (bo->tbo.resource->mem_type) { in radeon_bo_gpu_offset() 107 return (bo->tbo.resource->start << PAGE_SHIFT) + start; in radeon_bo_gpu_offset() 110 static inline unsigned long radeon_bo_size(struct radeon_bo *bo) in radeon_bo_size() argument [all …]
|
D | radeon_mn.c | 53 struct radeon_bo *bo = container_of(mn, struct radeon_bo, notifier); in radeon_mn_invalidate() local 57 if (!bo->tbo.ttm || !radeon_ttm_tt_is_bound(bo->tbo.bdev, bo->tbo.ttm)) in radeon_mn_invalidate() 63 r = radeon_bo_reserve(bo, true); in radeon_mn_invalidate() 69 r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false, in radeon_mn_invalidate() 74 radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU); in radeon_mn_invalidate() 75 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); in radeon_mn_invalidate() 79 radeon_bo_unreserve(bo); in radeon_mn_invalidate() 96 int radeon_mn_register(struct radeon_bo *bo, unsigned long addr) in radeon_mn_register() argument 100 ret = mmu_interval_notifier_insert(&bo->notifier, current->mm, addr, in radeon_mn_register() 101 radeon_bo_size(bo), &radeon_mn_ops); in radeon_mn_register() [all …]
|
/drivers/gpu/drm/tegra/ |
D | gem.c | 23 static void tegra_bo_put(struct host1x_bo *bo) in tegra_bo_put() argument 25 struct tegra_bo *obj = host1x_to_tegra_bo(bo); in tegra_bo_put() 53 static struct sg_table *tegra_bo_pin(struct device *dev, struct host1x_bo *bo, in tegra_bo_pin() argument 56 struct tegra_bo *obj = host1x_to_tegra_bo(bo); in tegra_bo_pin() 132 static void *tegra_bo_mmap(struct host1x_bo *bo) in tegra_bo_mmap() argument 134 struct tegra_bo *obj = host1x_to_tegra_bo(bo); in tegra_bo_mmap() 149 static void tegra_bo_munmap(struct host1x_bo *bo, void *addr) in tegra_bo_munmap() argument 151 struct tegra_bo *obj = host1x_to_tegra_bo(bo); in tegra_bo_munmap() 162 static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo) in tegra_bo_get() argument 164 struct tegra_bo *obj = host1x_to_tegra_bo(bo); in tegra_bo_get() [all …]
|
/drivers/gpu/drm/amd/amdgpu/ |
D | amdgpu_object.c | 57 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo); in amdgpu_bo_destroy() local 59 amdgpu_bo_kunmap(bo); in amdgpu_bo_destroy() 61 if (bo->tbo.base.import_attach) in amdgpu_bo_destroy() 62 drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg); in amdgpu_bo_destroy() 63 drm_gem_object_release(&bo->tbo.base); in amdgpu_bo_destroy() 64 amdgpu_bo_unref(&bo->parent); in amdgpu_bo_destroy() 65 kvfree(bo); in amdgpu_bo_destroy() 70 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo); in amdgpu_bo_user_destroy() local 73 ubo = to_amdgpu_bo_user(bo); in amdgpu_bo_user_destroy() 81 struct amdgpu_bo *shadow_bo = ttm_to_amdgpu_bo(tbo), *bo; in amdgpu_bo_vm_destroy() local [all …]
|
D | amdgpu_object.h | 46 #define to_amdgpu_bo_user(abo) container_of((abo), struct amdgpu_bo_user, bo) 47 #define to_amdgpu_bo_vm(abo) container_of((abo), struct amdgpu_bo_vm, bo) 59 void (*destroy)(struct ttm_buffer_object *bo); 115 struct amdgpu_bo bo; member 124 struct amdgpu_bo bo; member 171 static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool no_intr) in amdgpu_bo_reserve() argument 173 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); in amdgpu_bo_reserve() 176 r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL); in amdgpu_bo_reserve() 179 dev_err(adev->dev, "%p reserve failed\n", bo); in amdgpu_bo_reserve() 185 static inline void amdgpu_bo_unreserve(struct amdgpu_bo *bo) in amdgpu_bo_unreserve() argument [all …]
|
D | amdgpu_dma_buf.c | 57 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); in amdgpu_dma_buf_attach() local 58 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); in amdgpu_dma_buf_attach() 90 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); in amdgpu_dma_buf_detach() local 91 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); in amdgpu_dma_buf_detach() 107 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); in amdgpu_dma_buf_pin() local 111 r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT); in amdgpu_dma_buf_pin() 115 if (bo->tbo.moving) { in amdgpu_dma_buf_pin() 116 r = dma_fence_wait(bo->tbo.moving, true); in amdgpu_dma_buf_pin() 118 amdgpu_bo_unpin(bo); in amdgpu_dma_buf_pin() 135 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); in amdgpu_dma_buf_unpin() local [all …]
|
D | amdgpu_amdkfd_gpuvm.c | 210 void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo) in amdgpu_amdkfd_unreserve_memory_limit() argument 212 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); in amdgpu_amdkfd_unreserve_memory_limit() 213 u32 domain = bo->preferred_domains; in amdgpu_amdkfd_unreserve_memory_limit() 214 bool sg = (bo->preferred_domains == AMDGPU_GEM_DOMAIN_CPU); in amdgpu_amdkfd_unreserve_memory_limit() 216 if (bo->flags & AMDGPU_AMDKFD_CREATE_USERPTR_BO) { in amdgpu_amdkfd_unreserve_memory_limit() 221 unreserve_mem_limit(adev, amdgpu_bo_size(bo), domain, sg); in amdgpu_amdkfd_unreserve_memory_limit() 234 static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo, in amdgpu_amdkfd_remove_eviction_fence() argument 237 struct dma_resv *resv = bo->tbo.base.resv; in amdgpu_amdkfd_remove_eviction_fence() 287 int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo) in amdgpu_amdkfd_remove_fence_on_pt_pd_bos() argument 289 struct amdgpu_bo *root = bo; in amdgpu_amdkfd_remove_fence_on_pt_pd_bos() [all …]
|
/drivers/gpu/drm/vc4/ |
D | vc4_bo.c | 132 struct vc4_bo *bo = to_vc4_bo(gem_obj); in vc4_bo_set_label() local 142 vc4->bo_labels[bo->label].num_allocated--; in vc4_bo_set_label() 143 vc4->bo_labels[bo->label].size_allocated -= gem_obj->size; in vc4_bo_set_label() 145 if (vc4->bo_labels[bo->label].num_allocated == 0 && in vc4_bo_set_label() 146 is_user_label(bo->label)) { in vc4_bo_set_label() 152 kfree(vc4->bo_labels[bo->label].name); in vc4_bo_set_label() 153 vc4->bo_labels[bo->label].name = NULL; in vc4_bo_set_label() 156 bo->label = label; in vc4_bo_set_label() 164 static void vc4_bo_destroy(struct vc4_bo *bo) in vc4_bo_destroy() argument 166 struct drm_gem_object *obj = &bo->base.base; in vc4_bo_destroy() [all …]
|
/drivers/gpu/drm/lima/ |
D | lima_gem.c | 20 int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm) in lima_heap_alloc() argument 23 struct address_space *mapping = bo->base.base.filp->f_mapping; in lima_heap_alloc() 24 struct device *dev = bo->base.base.dev->dev; in lima_heap_alloc() 25 size_t old_size = bo->heap_size; in lima_heap_alloc() 26 size_t new_size = bo->heap_size ? bo->heap_size * 2 : in lima_heap_alloc() 31 if (bo->heap_size >= bo->base.base.size) in lima_heap_alloc() 34 new_size = min(new_size, bo->base.base.size); in lima_heap_alloc() 36 mutex_lock(&bo->base.pages_lock); in lima_heap_alloc() 38 if (bo->base.pages) { in lima_heap_alloc() 39 pages = bo->base.pages; in lima_heap_alloc() [all …]
|
/drivers/gpu/drm/vmwgfx/ |
D | vmwgfx_bo.c | 56 vmw_buffer_object(struct ttm_buffer_object *bo) in vmw_buffer_object() argument 58 return container_of(bo, struct vmw_buffer_object, base); in vmw_buffer_object() 71 vmw_user_buffer_object(struct ttm_buffer_object *bo) in vmw_user_buffer_object() argument 73 struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo); in vmw_user_buffer_object() 95 struct ttm_buffer_object *bo = &buf->base; in vmw_bo_pin_in_placement() local 101 ret = ttm_bo_reserve(bo, interruptible, false, NULL); in vmw_bo_pin_in_placement() 106 ret = ttm_bo_mem_compat(placement, bo->resource, in vmw_bo_pin_in_placement() 109 ret = ttm_bo_validate(bo, placement, &ctx); in vmw_bo_pin_in_placement() 114 ttm_bo_unreserve(bo); in vmw_bo_pin_in_placement() 137 struct ttm_buffer_object *bo = &buf->base; in vmw_bo_pin_in_vram_or_gmr() local [all …]
|
/drivers/gpu/drm/panfrost/ |
D | panfrost_gem.c | 19 struct panfrost_gem_object *bo = to_panfrost_bo(obj); in panfrost_gem_free_object() local 29 list_del_init(&bo->base.madv_list); in panfrost_gem_free_object() 36 WARN_ON_ONCE(!list_empty(&bo->mappings.list)); in panfrost_gem_free_object() 38 if (bo->sgts) { in panfrost_gem_free_object() 40 int n_sgt = bo->base.base.size / SZ_2M; in panfrost_gem_free_object() 43 if (bo->sgts[i].sgl) { in panfrost_gem_free_object() 44 dma_unmap_sgtable(pfdev->dev, &bo->sgts[i], in panfrost_gem_free_object() 46 sg_free_table(&bo->sgts[i]); in panfrost_gem_free_object() 49 kvfree(bo->sgts); in panfrost_gem_free_object() 56 panfrost_gem_mapping_get(struct panfrost_gem_object *bo, in panfrost_gem_mapping_get() argument [all …]
|
/drivers/gpu/drm/v3d/ |
D | v3d_bo.c | 34 struct v3d_bo *bo = to_v3d_bo(obj); in v3d_free_object() local 36 v3d_mmu_remove_ptes(bo); in v3d_free_object() 44 drm_mm_remove_node(&bo->node); in v3d_free_object() 48 bo->base.pages_mark_dirty_on_put = true; in v3d_free_object() 69 struct v3d_bo *bo; in v3d_create_object() local 75 bo = kzalloc(sizeof(*bo), GFP_KERNEL); in v3d_create_object() 76 if (!bo) in v3d_create_object() 78 obj = &bo->base.base; in v3d_create_object() 81 bo->base.map_wc = true; in v3d_create_object() 82 INIT_LIST_HEAD(&bo->unref_head); in v3d_create_object() [all …]
|
/drivers/gpu/drm/virtio/ |
D | virtgpu_object.c | 64 void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo) in virtio_gpu_cleanup_object() argument 66 struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private; in virtio_gpu_cleanup_object() 68 virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle); in virtio_gpu_cleanup_object() 69 if (virtio_gpu_is_shmem(bo)) { in virtio_gpu_cleanup_object() 70 struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo); in virtio_gpu_cleanup_object() 82 drm_gem_shmem_unpin(&bo->base.base); in virtio_gpu_cleanup_object() 85 drm_gem_shmem_free_object(&bo->base.base); in virtio_gpu_cleanup_object() 86 } else if (virtio_gpu_is_vram(bo)) { in virtio_gpu_cleanup_object() 87 struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo); in virtio_gpu_cleanup_object() 103 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); in virtio_gpu_free_object() local [all …]
|
/drivers/gpu/drm/i915/gem/ |
D | i915_gem_ttm.c | 170 static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo, in i915_ttm_tt_create() argument 174 ttm_manager_type(bo->bdev, bo->resource->mem_type); in i915_ttm_tt_create() 175 struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); in i915_ttm_tt_create() 187 ret = ttm_tt_init(&i915_tt->ttm, bo, page_flags, in i915_ttm_tt_create() 222 static bool i915_ttm_eviction_valuable(struct ttm_buffer_object *bo, in i915_ttm_eviction_valuable() argument 225 struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); in i915_ttm_eviction_valuable() 231 static void i915_ttm_evict_flags(struct ttm_buffer_object *bo, in i915_ttm_evict_flags() argument 237 static int i915_ttm_move_notify(struct ttm_buffer_object *bo) in i915_ttm_move_notify() argument 239 struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); in i915_ttm_move_notify() 274 struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); in i915_ttm_adjust_domains_after_move() local [all …]
|
/drivers/gpu/drm/nouveau/ |
D | nouveau_bo.c | 136 nouveau_bo_del_ttm(struct ttm_buffer_object *bo) in nouveau_bo_del_ttm() argument 138 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); in nouveau_bo_del_ttm() 140 struct nouveau_bo *nvbo = nouveau_bo(bo); in nouveau_bo_del_ttm() 142 WARN_ON(nvbo->bo.pin_count > 0); in nouveau_bo_del_ttm() 143 nouveau_bo_del_io_reserve_lru(bo); in nouveau_bo_del_ttm() 150 if (bo->base.dev) in nouveau_bo_del_ttm() 151 drm_gem_object_release(&bo->base); in nouveau_bo_del_ttm() 153 dma_resv_fini(&bo->base._resv); in nouveau_bo_del_ttm() 169 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); in nouveau_bo_fixup_align() 220 nvbo->bo.bdev = &drm->ttm.bdev; in nouveau_bo_alloc() [all …]
|