/drivers/gpu/drm/ |
D | drm_buddy.c | 14 static struct drm_buddy_block *drm_block_alloc(struct drm_buddy *mm, in drm_block_alloc() argument 35 static void drm_block_free(struct drm_buddy *mm, in drm_block_free() argument 41 static void list_insert_sorted(struct drm_buddy *mm, in list_insert_sorted() argument 47 head = &mm->free_list[drm_buddy_block_order(block)]; in list_insert_sorted() 68 static void mark_free(struct drm_buddy *mm, in mark_free() argument 74 list_insert_sorted(mm, block); in mark_free() 97 int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size) in drm_buddy_init() argument 113 mm->size = size; in drm_buddy_init() 114 mm->avail = size; in drm_buddy_init() 115 mm->chunk_size = chunk_size; in drm_buddy_init() [all …]
|
D | drm_mm.c | 118 static void show_leaks(struct drm_mm *mm) in show_leaks() argument 127 list_for_each_entry(node, drm_mm_nodes(mm), node_list) { in show_leaks() 146 static void show_leaks(struct drm_mm *mm) { } in show_leaks() argument 157 __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last) in INTERVAL_TREE_DEFINE() 159 return drm_mm_interval_tree_iter_first((struct rb_root_cached *)&mm->interval_tree, in INTERVAL_TREE_DEFINE() 160 start, last) ?: (struct drm_mm_node *)&mm->head_node; in INTERVAL_TREE_DEFINE() 167 struct drm_mm *mm = hole_node->mm; in drm_mm_interval_tree_add_node() local 190 link = &mm->interval_tree.rb_root.rb_node; in drm_mm_interval_tree_add_node() 208 rb_insert_augmented_cached(&node->rb, &mm->interval_tree, leftmost, in drm_mm_interval_tree_add_node() 268 struct drm_mm *mm = node->mm; in add_hole() local [all …]
|
/drivers/gpu/drm/tests/ |
D | drm_buddy_test.c | 46 static void __dump_block(struct kunit *test, struct drm_buddy *mm, in __dump_block() argument 52 drm_buddy_block_size(mm, block), !block->parent, buddy); in __dump_block() 55 static void dump_block(struct kunit *test, struct drm_buddy *mm, in dump_block() argument 60 __dump_block(test, mm, block, false); in dump_block() 64 __dump_block(test, mm, buddy, true); in dump_block() 67 static int check_block(struct kunit *test, struct drm_buddy *mm, in check_block() argument 84 block_size = drm_buddy_block_size(mm, block); in check_block() 87 if (block_size < mm->chunk_size) { in check_block() 98 if (!IS_ALIGNED(block_size, mm->chunk_size)) { in check_block() 103 if (!IS_ALIGNED(offset, mm->chunk_size)) { in check_block() [all …]
|
D | drm_mm_test.c | 46 static bool assert_no_holes(struct kunit *test, const struct drm_mm *mm) in assert_no_holes() argument 53 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) in assert_no_holes() 61 drm_mm_for_each_node(hole, mm) { in assert_no_holes() 71 static bool assert_one_hole(struct kunit *test, const struct drm_mm *mm, u64 start, u64 end) in assert_one_hole() argument 82 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) { in assert_one_hole() 100 static bool assert_continuous(struct kunit *test, const struct drm_mm *mm, u64 size) in assert_continuous() argument 106 if (!assert_no_holes(test, mm)) in assert_continuous() 111 drm_mm_for_each_node(node, mm) { in assert_continuous() 130 drm_mm_for_each_node_in_range(check, mm, addr, addr + size) { in assert_continuous() 162 static bool assert_node(struct kunit *test, struct drm_mm_node *node, struct drm_mm *mm, in assert_node() argument [all …]
|
/drivers/gpu/drm/i915/gem/ |
D | i915_gem_shrinker.c | 36 return swap_available() || obj->mm.madv == I915_MADV_DONTNEED; in can_release_pages() 112 { &i915->mm.purge_list, ~0u }, in i915_gem_shrink() 114 &i915->mm.shrink_list, in i915_gem_shrink() 190 spin_lock_irqsave(&i915->mm.obj_lock, flags); in i915_gem_shrink() 194 mm.link))) { in i915_gem_shrink() 195 list_move_tail(&obj->mm.link, &still_in_list); in i915_gem_shrink() 198 !is_vmalloc_addr(obj->mm.mapping)) in i915_gem_shrink() 211 spin_unlock_irqrestore(&i915->mm.obj_lock, flags); in i915_gem_shrink() 235 spin_lock_irqsave(&i915->mm.obj_lock, flags); in i915_gem_shrink() 240 spin_unlock_irqrestore(&i915->mm.obj_lock, flags); in i915_gem_shrink() [all …]
|
D | i915_gem_pages.c | 29 obj->mm.madv = I915_MADV_DONTNEED; in __i915_gem_object_set_pages() 40 obj->mm.get_page.sg_pos = pages->sgl; in __i915_gem_object_set_pages() 41 obj->mm.get_page.sg_idx = 0; in __i915_gem_object_set_pages() 42 obj->mm.get_dma_page.sg_pos = pages->sgl; in __i915_gem_object_set_pages() 43 obj->mm.get_dma_page.sg_idx = 0; in __i915_gem_object_set_pages() 45 obj->mm.pages = pages; in __i915_gem_object_set_pages() 47 obj->mm.page_sizes.phys = i915_sg_dma_sizes(pages->sgl); in __i915_gem_object_set_pages() 48 GEM_BUG_ON(!obj->mm.page_sizes.phys); in __i915_gem_object_set_pages() 58 obj->mm.page_sizes.sg = 0; in __i915_gem_object_set_pages() 60 if (obj->mm.page_sizes.phys & ~0u << i) in __i915_gem_object_set_pages() [all …]
|
D | i915_gem_object.c | 106 INIT_LIST_HEAD(&obj->mm.link); in i915_gem_object_init() 120 obj->mm.madv = I915_MADV_WILLNEED; in i915_gem_object_init() 121 INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN); in i915_gem_object_init() 122 mutex_init(&obj->mm.get_page.lock); in i915_gem_object_init() 123 INIT_RADIX_TREE(&obj->mm.get_dma_page.radix, GFP_KERNEL | __GFP_NOWARN); in i915_gem_object_init() 124 mutex_init(&obj->mm.get_dma_page.lock); in i915_gem_object_init() 138 mutex_destroy(&obj->mm.get_page.lock); in __i915_gem_object_fini() 139 mutex_destroy(&obj->mm.get_dma_page.lock); in __i915_gem_object_fini() 297 GEM_BUG_ON(!atomic_read(&i915->mm.free_count)); in __i915_gem_free_object_rcu() 298 atomic_dec(&i915->mm.free_count); in __i915_gem_free_object_rcu() [all …]
|
/drivers/net/ethernet/mscc/ |
D | ocelot_mm.c | 55 struct ocelot_mm_state *mm = &ocelot->mm[port]; in ocelot_port_update_active_preemptible_tcs() local 66 ocelot_port->speed == SPEED_1000) && mm->tx_active) in ocelot_port_update_active_preemptible_tcs() 67 val = mm->preemptible_tcs; in ocelot_port_update_active_preemptible_tcs() 75 mm->active_preemptible_tcs = val; in ocelot_port_update_active_preemptible_tcs() 82 mm->tx_active ? "active" : "inactive", mm->preemptible_tcs, in ocelot_port_update_active_preemptible_tcs() 83 mm->active_preemptible_tcs); in ocelot_port_update_active_preemptible_tcs() 93 struct ocelot_mm_state *mm = &ocelot->mm[port]; in ocelot_port_change_fp() local 97 if (mm->preemptible_tcs == preemptible_tcs) in ocelot_port_change_fp() 100 mm->preemptible_tcs = preemptible_tcs; in ocelot_port_change_fp() 108 struct ocelot_mm_state *mm = &ocelot->mm[port]; in ocelot_mm_update_port_status() local [all …]
|
/drivers/gpu/drm/nouveau/nvkm/core/ |
D | mm.c | 26 #define node(root, dir) ((root)->nl_entry.dir == &mm->nodes) ? NULL : \ 30 nvkm_mm_dump(struct nvkm_mm *mm, const char *header) in nvkm_mm_dump() argument 36 list_for_each_entry(node, &mm->nodes, nl_entry) { in nvkm_mm_dump() 41 list_for_each_entry(node, &mm->free, fl_entry) { in nvkm_mm_dump() 48 nvkm_mm_free(struct nvkm_mm *mm, struct nvkm_mm_node **pthis) in nvkm_mm_free() argument 72 list_for_each_entry(prev, &mm->free, fl_entry) { in nvkm_mm_free() 86 region_head(struct nvkm_mm *mm, struct nvkm_mm_node *a, u32 size) in region_head() argument 111 nvkm_mm_head(struct nvkm_mm *mm, u8 heap, u8 type, u32 size_max, u32 size_min, in nvkm_mm_head() argument 121 list_for_each_entry(this, &mm->free, fl_entry) { in nvkm_mm_head() 131 s = roundup(s, mm->block_size); in nvkm_mm_head() [all …]
|
/drivers/misc/cxl/ |
D | fault.c | 84 static int cxl_fault_segment(struct cxl_context *ctx, struct mm_struct *mm, in cxl_fault_segment() argument 90 if (!(rc = copro_calculate_slb(mm, ea, &slb))) { in cxl_fault_segment() 113 struct mm_struct *mm, u64 ea) in cxl_handle_segment_miss() argument 120 if ((rc = cxl_fault_segment(ctx, mm, ea))) in cxl_handle_segment_miss() 131 int cxl_handle_mm_fault(struct mm_struct *mm, u64 dsisr, u64 dar) in cxl_handle_mm_fault() argument 144 if (mm && !cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) { in cxl_handle_mm_fault() 145 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); in cxl_handle_mm_fault() 153 if ((result = copro_handle_mm_fault(mm, dar, dsisr, &flt))) { in cxl_handle_mm_fault() 167 if (!mm && (get_region_id(dar) != USER_REGION_ID)) in cxl_handle_mm_fault() 174 hash_page_mm(mm, dar, access, 0x300, inv_flags); in cxl_handle_mm_fault() [all …]
|
D | cxllib.c | 184 struct mm_struct *mm = get_task_mm(task); in cxllib_get_PE_attributes() local 185 if (mm == NULL) in cxllib_get_PE_attributes() 191 attr->pid = mm->context.id; in cxllib_get_PE_attributes() 192 mmput(mm); in cxllib_get_PE_attributes() 202 static int get_vma_info(struct mm_struct *mm, u64 addr, in get_vma_info() argument 209 mmap_read_lock(mm); in get_vma_info() 211 vma = find_vma(mm, addr); in get_vma_info() 220 mmap_read_unlock(mm); in get_vma_info() 224 int cxllib_handle_fault(struct mm_struct *mm, u64 addr, u64 size, u64 flags) in cxllib_handle_fault() argument 230 if (mm == NULL) in cxllib_handle_fault() [all …]
|
/drivers/gpu/drm/amd/amdkfd/ |
D | kfd_mqd_manager.c | 91 void free_mqd_hiq_sdma(struct mqd_manager *mm, void *mqd, in free_mqd_hiq_sdma() argument 98 void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm, in mqd_symmetrically_map_cu_mask() argument 104 bool wgp_mode_req = KFD_GC_VERSION(mm->dev) >= IP_VERSION(10, 0, 0); in mqd_symmetrically_map_cu_mask() 108 int inc = cu_inc * NUM_XCC(mm->dev->xcc_mask); in mqd_symmetrically_map_cu_mask() 109 int xcc_inst = inst + ffs(mm->dev->xcc_mask) - 1; in mqd_symmetrically_map_cu_mask() 111 amdgpu_amdkfd_get_cu_info(mm->dev->adev, &cu_info); in mqd_symmetrically_map_cu_mask() 113 cu_active_per_node = cu_info.cu_active_number / mm->dev->kfd->num_nodes; in mqd_symmetrically_map_cu_mask() 131 cu_bitmap_sh_mul = (KFD_GC_VERSION(mm->dev) >= IP_VERSION(11, 0, 0) && in mqd_symmetrically_map_cu_mask() 132 KFD_GC_VERSION(mm->dev) < IP_VERSION(12, 0, 0)) ? 2 : 1; in mqd_symmetrically_map_cu_mask() 206 int kfd_hiq_load_mqd_kiq(struct mqd_manager *mm, void *mqd, in kfd_hiq_load_mqd_kiq() argument [all …]
|
D | kfd_mqd_manager_v9.c | 37 static void update_mqd(struct mqd_manager *mm, void *mqd, 41 static uint64_t mqd_stride_v9(struct mqd_manager *mm, in mqd_stride_v9() argument 44 if (mm->dev->kfd->cwsr_enabled && in mqd_stride_v9() 49 return mm->mqd_size; in mqd_stride_v9() 62 static void update_cu_mask(struct mqd_manager *mm, void *mqd, in update_cu_mask() argument 71 mqd_symmetrically_map_cu_mask(mm, in update_cu_mask() 80 if (KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 4, 3)) { in update_cu_mask() 158 static void init_mqd(struct mqd_manager *mm, void **mqd, in init_mqd() argument 207 if (mm->dev->kfd->cwsr_enabled && q->ctx_save_restore_area_address) { in init_mqd() 223 update_mqd(mm, m, q, NULL); in init_mqd() [all …]
|
D | kfd_mqd_manager.h | 74 void (*init_mqd)(struct mqd_manager *mm, void **mqd, 78 int (*load_mqd)(struct mqd_manager *mm, void *mqd, 83 void (*update_mqd)(struct mqd_manager *mm, void *mqd, 87 int (*destroy_mqd)(struct mqd_manager *mm, void *mqd, 92 void (*free_mqd)(struct mqd_manager *mm, void *mqd, 95 bool (*is_occupied)(struct mqd_manager *mm, void *mqd, 99 int (*get_wave_state)(struct mqd_manager *mm, void *mqd, 105 void (*get_checkpoint_info)(struct mqd_manager *mm, void *mqd, uint32_t *ctl_stack_size); 107 void (*checkpoint_mqd)(struct mqd_manager *mm, 112 void (*restore_mqd)(struct mqd_manager *mm, void **mqd, [all …]
|
D | kfd_mqd_manager_vi.c | 48 static void update_cu_mask(struct mqd_manager *mm, void *mqd, in update_cu_mask() argument 57 mqd_symmetrically_map_cu_mask(mm, in update_cu_mask() 91 static void init_mqd(struct mqd_manager *mm, void **mqd, in init_mqd() argument 138 if (mm->dev->kfd->cwsr_enabled && q->ctx_save_restore_area_address) { in init_mqd() 154 mm->update_mqd(mm, m, q, NULL); in init_mqd() 157 static int load_mqd(struct mqd_manager *mm, void *mqd, in load_mqd() argument 165 return mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id, in load_mqd() 170 static void __update_mqd(struct mqd_manager *mm, void *mqd, in __update_mqd() argument 229 if (mm->dev->kfd->cwsr_enabled && q->ctx_save_restore_area_address) in __update_mqd() 234 update_cu_mask(mm, mqd, minfo); in __update_mqd() [all …]
|
D | kfd_mqd_manager_cik.c | 45 static void update_cu_mask(struct mqd_manager *mm, void *mqd, in update_cu_mask() argument 54 mqd_symmetrically_map_cu_mask(mm, in update_cu_mask() 88 static void init_mqd(struct mqd_manager *mm, void **mqd, in init_mqd() argument 139 mm->update_mqd(mm, m, q, NULL); in init_mqd() 142 static void init_mqd_sdma(struct mqd_manager *mm, void **mqd, in init_mqd_sdma() argument 156 mm->update_mqd(mm, m, q, NULL); in init_mqd_sdma() 159 static int load_mqd(struct mqd_manager *mm, void *mqd, uint32_t pipe_id, in load_mqd() argument 167 return mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id, in load_mqd() 172 static void __update_mqd(struct mqd_manager *mm, void *mqd, in __update_mqd() argument 203 update_cu_mask(mm, mqd, minfo); in __update_mqd() [all …]
|
D | kfd_mqd_manager_v10.c | 45 static void update_cu_mask(struct mqd_manager *mm, void *mqd, in update_cu_mask() argument 54 mqd_symmetrically_map_cu_mask(mm, in update_cu_mask() 88 static void init_mqd(struct mqd_manager *mm, void **mqd, in init_mqd() argument 129 if (mm->dev->kfd->cwsr_enabled) { in init_mqd() 145 mm->update_mqd(mm, m, q, NULL); in init_mqd() 148 static int load_mqd(struct mqd_manager *mm, void *mqd, in load_mqd() argument 156 r = mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id, in load_mqd() 162 static void update_mqd(struct mqd_manager *mm, void *mqd, in update_mqd() argument 218 if (mm->dev->kfd->cwsr_enabled) in update_mqd() 221 update_cu_mask(mm, mqd, minfo); in update_mqd() [all …]
|
D | kfd_mqd_manager_v11.c | 44 static void update_cu_mask(struct mqd_manager *mm, void *mqd, in update_cu_mask() argument 73 mqd_symmetrically_map_cu_mask(mm, in update_cu_mask() 123 static void init_mqd(struct mqd_manager *mm, void **mqd, in init_mqd() argument 135 if (mm->dev->kfd->shared_resources.enable_mes) in init_mqd() 175 if (amdgpu_amdkfd_have_atomics_support(mm->dev->adev)) in init_mqd() 183 if (mm->dev->kfd->cwsr_enabled) { in init_mqd() 199 mm->update_mqd(mm, m, q, NULL); in init_mqd() 202 static int load_mqd(struct mqd_manager *mm, void *mqd, in load_mqd() argument 210 r = mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id, in load_mqd() 216 static void update_mqd(struct mqd_manager *mm, void *mqd, in update_mqd() argument [all …]
|
/drivers/iommu/arm/arm-smmu-v3/ |
D | arm-smmu-v3-sva.c | 29 struct mm_struct *mm; member 45 arm_smmu_share_asid(struct mm_struct *mm, u16 asid) in arm_smmu_share_asid() argument 57 if (cd->mm) { in arm_smmu_share_asid() 58 if (WARN_ON(cd->mm != mm)) in arm_smmu_share_asid() 92 static struct arm_smmu_ctx_desc *arm_smmu_alloc_shared_cd(struct mm_struct *mm) in arm_smmu_alloc_shared_cd() argument 101 mmgrab(mm); in arm_smmu_alloc_shared_cd() 103 asid = arm64_mm_context_get(mm); in arm_smmu_alloc_shared_cd() 118 ret = arm_smmu_share_asid(mm, asid); in arm_smmu_alloc_shared_cd() 156 cd->ttbr = virt_to_phys(mm->pgd); in arm_smmu_alloc_shared_cd() 164 cd->mm = mm; in arm_smmu_alloc_shared_cd() [all …]
|
/drivers/acpi/acpica/ |
D | exregion.c | 44 struct acpi_mem_mapping *mm = mem_info->cur_mm; in acpi_ex_system_memory_space_handler() local 99 if (!mm || (address < mm->physical_address) || in acpi_ex_system_memory_space_handler() 100 ((u64) address + length > (u64) mm->physical_address + mm->length)) { in acpi_ex_system_memory_space_handler() 108 for (mm = mem_info->first_mm; mm; mm = mm->next_mm) { in acpi_ex_system_memory_space_handler() 109 if (mm == mem_info->cur_mm) in acpi_ex_system_memory_space_handler() 112 if (address < mm->physical_address) in acpi_ex_system_memory_space_handler() 116 (u64) mm->physical_address + mm->length) in acpi_ex_system_memory_space_handler() 119 mem_info->cur_mm = mm; in acpi_ex_system_memory_space_handler() 124 mm = ACPI_ALLOCATE_ZEROED(sizeof(*mm)); in acpi_ex_system_memory_space_handler() 125 if (!mm) { in acpi_ex_system_memory_space_handler() [all …]
|
/drivers/iommu/ |
D | iommu-sva.c | 15 static int iommu_sva_alloc_pasid(struct mm_struct *mm, struct device *dev) in iommu_sva_alloc_pasid() argument 20 if (!arch_pgtable_dma_compat(mm)) in iommu_sva_alloc_pasid() 25 if (mm_valid_pasid(mm)) { in iommu_sva_alloc_pasid() 26 if (mm->pasid >= dev->iommu->max_pasids) in iommu_sva_alloc_pasid() 36 mm->pasid = pasid; in iommu_sva_alloc_pasid() 59 struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm) in iommu_sva_bind_device() argument 66 ret = iommu_sva_alloc_pasid(mm, dev); in iommu_sva_bind_device() 76 domain = iommu_get_domain_for_dev_pasid(dev, mm->pasid, in iommu_sva_bind_device() 89 domain = iommu_sva_domain_alloc(dev, mm); in iommu_sva_bind_device() 95 ret = iommu_attach_device_pasid(domain, dev, mm->pasid); in iommu_sva_bind_device() [all …]
|
/drivers/gpu/drm/i915/gvt/ |
D | gtt.c | 549 static void _ppgtt_get_root_entry(struct intel_vgpu_mm *mm, in _ppgtt_get_root_entry() argument 553 const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; in _ppgtt_get_root_entry() 555 GEM_BUG_ON(mm->type != INTEL_GVT_MM_PPGTT); in _ppgtt_get_root_entry() 557 entry->type = mm->ppgtt_mm.root_entry_type; in _ppgtt_get_root_entry() 558 pte_ops->get_entry(guest ? mm->ppgtt_mm.guest_pdps : in _ppgtt_get_root_entry() 559 mm->ppgtt_mm.shadow_pdps, in _ppgtt_get_root_entry() 560 entry, index, false, 0, mm->vgpu); in _ppgtt_get_root_entry() 564 static inline void ppgtt_get_guest_root_entry(struct intel_vgpu_mm *mm, in ppgtt_get_guest_root_entry() argument 567 _ppgtt_get_root_entry(mm, entry, index, true); in ppgtt_get_guest_root_entry() 570 static inline void ppgtt_get_shadow_root_entry(struct intel_vgpu_mm *mm, in ppgtt_get_shadow_root_entry() argument [all …]
|
/drivers/gpu/drm/i915/selftests/ |
D | mock_region.c | 18 i915_refct_sgt_put(obj->mm.rsgt); in mock_region_put_pages() 19 obj->mm.rsgt = NULL; in mock_region_put_pages() 20 intel_region_ttm_resource_free(obj->mm.region, obj->mm.res); in mock_region_put_pages() 28 obj->mm.res = intel_region_ttm_resource_alloc(obj->mm.region, in mock_region_get_pages() 32 if (IS_ERR(obj->mm.res)) in mock_region_get_pages() 33 return PTR_ERR(obj->mm.res); in mock_region_get_pages() 35 obj->mm.rsgt = intel_region_ttm_resource_to_rsgt(obj->mm.region, in mock_region_get_pages() 36 obj->mm.res, in mock_region_get_pages() 37 obj->mm.region->min_page_size); in mock_region_get_pages() 38 if (IS_ERR(obj->mm.rsgt)) { in mock_region_get_pages() [all …]
|
/drivers/gpu/drm/i915/ |
D | i915_ttm_buddy_manager.c | 19 struct drm_buddy mm; member 41 struct drm_buddy *mm = &bman->mm; in i915_ttm_buddy_man_alloc() local 57 bman_res->mm = mm; in i915_ttm_buddy_man_alloc() 72 GEM_BUG_ON(min_page_size < mm->chunk_size); in i915_ttm_buddy_man_alloc() 82 pages = size >> ilog2(mm->chunk_size); in i915_ttm_buddy_man_alloc() 92 n_pages = size >> ilog2(mm->chunk_size); in i915_ttm_buddy_man_alloc() 101 err = drm_buddy_alloc_blocks(mm, (u64)place->fpfn << PAGE_SHIFT, in i915_ttm_buddy_man_alloc() 113 drm_buddy_block_trim(mm, in i915_ttm_buddy_man_alloc() 129 (drm_buddy_block_size(mm, block) >> PAGE_SHIFT); in i915_ttm_buddy_man_alloc() 146 drm_buddy_free_list(mm, &bman_res->blocks); in i915_ttm_buddy_man_alloc() [all …]
|
/drivers/gpu/drm/amd/amdgpu/ |
D | amdgpu_amdkfd_fence.c | 64 struct mm_struct *mm, in amdgpu_amdkfd_fence_create() argument 74 mmgrab(mm); in amdgpu_amdkfd_fence_create() 75 fence->mm = mm; in amdgpu_amdkfd_fence_create() 130 if (!kgd2kfd_schedule_evict_and_restore_process(fence->mm, f)) in amdkfd_fence_enable_signaling() 157 mmdrop(fence->mm); in amdkfd_fence_release() 171 bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm) in amdkfd_fence_check_mm() argument 177 else if (fence->mm == mm && !fence->svm_bo) in amdkfd_fence_check_mm()
|