/drivers/gpu/drm/i915/selftests/ |
D | i915_buddy.c | 11 static void __igt_dump_block(struct i915_buddy_mm *mm, in __igt_dump_block() argument 20 i915_buddy_block_size(mm, block), in __igt_dump_block() 25 static void igt_dump_block(struct i915_buddy_mm *mm, in igt_dump_block() argument 30 __igt_dump_block(mm, block, false); in igt_dump_block() 34 __igt_dump_block(mm, buddy, true); in igt_dump_block() 37 static int igt_check_block(struct i915_buddy_mm *mm, in igt_check_block() argument 55 block_size = i915_buddy_block_size(mm, block); in igt_check_block() 58 if (block_size < mm->chunk_size) { in igt_check_block() 68 if (!IS_ALIGNED(block_size, mm->chunk_size)) { in igt_check_block() 73 if (!IS_ALIGNED(offset, mm->chunk_size)) { in igt_check_block() [all …]
|
D | mock_region.c | 20 intel_region_ttm_resource_free(obj->mm.region, obj->mm.res); in mock_region_put_pages() 35 obj->mm.res = intel_region_ttm_resource_alloc(obj->mm.region, in mock_region_get_pages() 38 if (IS_ERR(obj->mm.res)) in mock_region_get_pages() 39 return PTR_ERR(obj->mm.res); in mock_region_get_pages() 41 pages = intel_region_ttm_resource_to_st(obj->mm.region, obj->mm.res); in mock_region_get_pages() 52 intel_region_ttm_resource_free(obj->mm.region, obj->mm.res); in mock_region_get_pages()
|
/drivers/gpu/drm/i915/ |
D | i915_buddy.c | 15 static struct i915_buddy_block *i915_block_alloc(struct i915_buddy_mm *mm, in i915_block_alloc() argument 36 static void i915_block_free(struct i915_buddy_mm *mm, in i915_block_free() argument 50 static void mark_free(struct i915_buddy_mm *mm, in mark_free() argument 57 &mm->free_list[i915_buddy_block_order(block)]); in mark_free() 68 int i915_buddy_init(struct i915_buddy_mm *mm, u64 size, u64 chunk_size) in i915_buddy_init() argument 84 mm->size = size; in i915_buddy_init() 85 mm->chunk_size = chunk_size; in i915_buddy_init() 86 mm->max_order = ilog2(size) - ilog2(chunk_size); in i915_buddy_init() 88 GEM_BUG_ON(mm->max_order > I915_BUDDY_MAX_ORDER); in i915_buddy_init() 90 mm->free_list = kmalloc_array(mm->max_order + 1, in i915_buddy_init() [all …]
|
D | i915_ttm_buddy_manager.c | 18 struct i915_buddy_mm mm; member 37 struct i915_buddy_mm *mm = &bman->mm; in i915_ttm_buddy_man_alloc() local 52 bman_res->mm = mm; in i915_ttm_buddy_man_alloc() 61 GEM_BUG_ON(min_page_size < mm->chunk_size); in i915_ttm_buddy_man_alloc() 62 min_order = ilog2(min_page_size) - ilog2(mm->chunk_size); in i915_ttm_buddy_man_alloc() 65 min_order = ilog2(size) - ilog2(mm->chunk_size); in i915_ttm_buddy_man_alloc() 68 if (size > mm->size) { in i915_ttm_buddy_man_alloc() 73 n_pages = size >> ilog2(mm->chunk_size); in i915_ttm_buddy_man_alloc() 80 GEM_BUG_ON(order > mm->max_order); in i915_ttm_buddy_man_alloc() 85 block = i915_buddy_alloc(mm, order); in i915_ttm_buddy_man_alloc() [all …]
|
/drivers/gpu/drm/i915/gem/ |
D | i915_gem_shrinker.c | 37 return swap_available() || obj->mm.madv == I915_MADV_DONTNEED; in can_release_pages() 62 switch (obj->mm.madv) { in try_to_writeback() 111 { &i915->mm.purge_list, ~0u }, in i915_gem_shrink() 113 &i915->mm.shrink_list, in i915_gem_shrink() 189 spin_lock_irqsave(&i915->mm.obj_lock, flags); in i915_gem_shrink() 193 mm.link))) { in i915_gem_shrink() 194 list_move_tail(&obj->mm.link, &still_in_list); in i915_gem_shrink() 197 !is_vmalloc_addr(obj->mm.mapping)) in i915_gem_shrink() 210 spin_unlock_irqrestore(&i915->mm.obj_lock, flags); in i915_gem_shrink() 238 spin_lock_irqsave(&i915->mm.obj_lock, flags); in i915_gem_shrink() [all …]
|
D | i915_gem_pages.c | 27 obj->mm.madv = I915_MADV_DONTNEED; in __i915_gem_object_set_pages() 37 obj->mm.get_page.sg_pos = pages->sgl; in __i915_gem_object_set_pages() 38 obj->mm.get_page.sg_idx = 0; in __i915_gem_object_set_pages() 39 obj->mm.get_dma_page.sg_pos = pages->sgl; in __i915_gem_object_set_pages() 40 obj->mm.get_dma_page.sg_idx = 0; in __i915_gem_object_set_pages() 42 obj->mm.pages = pages; in __i915_gem_object_set_pages() 45 obj->mm.page_sizes.phys = sg_page_sizes; in __i915_gem_object_set_pages() 55 obj->mm.page_sizes.sg = 0; in __i915_gem_object_set_pages() 57 if (obj->mm.page_sizes.phys & ~0u << i) in __i915_gem_object_set_pages() 58 obj->mm.page_sizes.sg |= BIT(i); in __i915_gem_object_set_pages() [all …]
|
D | i915_gem_object.c | 71 INIT_LIST_HEAD(&obj->mm.link); in i915_gem_object_init() 85 obj->mm.madv = I915_MADV_WILLNEED; in i915_gem_object_init() 86 INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN); in i915_gem_object_init() 87 mutex_init(&obj->mm.get_page.lock); in i915_gem_object_init() 88 INIT_RADIX_TREE(&obj->mm.get_dma_page.radix, GFP_KERNEL | __GFP_NOWARN); in i915_gem_object_init() 89 mutex_init(&obj->mm.get_dma_page.lock); in i915_gem_object_init() 180 GEM_BUG_ON(!atomic_read(&i915->mm.free_count)); in __i915_gem_free_object_rcu() 181 atomic_dec(&i915->mm.free_count); in __i915_gem_free_object_rcu() 244 atomic_set(&obj->mm.pages_pin_count, 0); in __i915_gem_free_object() 257 if (obj->mm.n_placements > 1) in __i915_gem_free_object() [all …]
|
/drivers/gpu/drm/ |
D | drm_mm.c | 118 static void show_leaks(struct drm_mm *mm) in show_leaks() argument 129 list_for_each_entry(node, drm_mm_nodes(mm), node_list) { in show_leaks() 149 static void show_leaks(struct drm_mm *mm) { } in show_leaks() argument 160 __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last) in INTERVAL_TREE_DEFINE() 162 return drm_mm_interval_tree_iter_first((struct rb_root_cached *)&mm->interval_tree, in INTERVAL_TREE_DEFINE() 163 start, last) ?: (struct drm_mm_node *)&mm->head_node; in INTERVAL_TREE_DEFINE() 170 struct drm_mm *mm = hole_node->mm; in drm_mm_interval_tree_add_node() local 193 link = &mm->interval_tree.rb_root.rb_node; in drm_mm_interval_tree_add_node() 211 rb_insert_augmented_cached(&node->rb, &mm->interval_tree, leftmost, in drm_mm_interval_tree_add_node() 271 struct drm_mm *mm = node->mm; in add_hole() local [all …]
|
/drivers/gpu/drm/selftests/ |
D | test-drm_mm.c | 54 static bool assert_no_holes(const struct drm_mm *mm) in assert_no_holes() argument 61 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) in assert_no_holes() 68 drm_mm_for_each_node(hole, mm) { in assert_no_holes() 78 static bool assert_one_hole(const struct drm_mm *mm, u64 start, u64 end) in assert_one_hole() argument 89 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) { in assert_one_hole() 107 static bool assert_continuous(const struct drm_mm *mm, u64 size) in assert_continuous() argument 113 if (!assert_no_holes(mm)) in assert_continuous() 118 drm_mm_for_each_node(node, mm) { in assert_continuous() 137 drm_mm_for_each_node_in_range(check, mm, addr, addr + size) { in assert_continuous() 169 static bool assert_node(struct drm_mm_node *node, struct drm_mm *mm, in assert_node() argument [all …]
|
/drivers/gpu/drm/nouveau/nvkm/core/ |
D | mm.c | 26 #define node(root, dir) ((root)->nl_entry.dir == &mm->nodes) ? NULL : \ 30 nvkm_mm_dump(struct nvkm_mm *mm, const char *header) in nvkm_mm_dump() argument 36 list_for_each_entry(node, &mm->nodes, nl_entry) { in nvkm_mm_dump() 41 list_for_each_entry(node, &mm->free, fl_entry) { in nvkm_mm_dump() 48 nvkm_mm_free(struct nvkm_mm *mm, struct nvkm_mm_node **pthis) in nvkm_mm_free() argument 72 list_for_each_entry(prev, &mm->free, fl_entry) { in nvkm_mm_free() 86 region_head(struct nvkm_mm *mm, struct nvkm_mm_node *a, u32 size) in region_head() argument 111 nvkm_mm_head(struct nvkm_mm *mm, u8 heap, u8 type, u32 size_max, u32 size_min, in nvkm_mm_head() argument 121 list_for_each_entry(this, &mm->free, fl_entry) { in nvkm_mm_head() 131 s = roundup(s, mm->block_size); in nvkm_mm_head() [all …]
|
/drivers/misc/cxl/ |
D | fault.c | 84 static int cxl_fault_segment(struct cxl_context *ctx, struct mm_struct *mm, in cxl_fault_segment() argument 90 if (!(rc = copro_calculate_slb(mm, ea, &slb))) { in cxl_fault_segment() 113 struct mm_struct *mm, u64 ea) in cxl_handle_segment_miss() argument 120 if ((rc = cxl_fault_segment(ctx, mm, ea))) in cxl_handle_segment_miss() 131 int cxl_handle_mm_fault(struct mm_struct *mm, u64 dsisr, u64 dar) in cxl_handle_mm_fault() argument 144 if (mm && !cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) { in cxl_handle_mm_fault() 145 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); in cxl_handle_mm_fault() 153 if ((result = copro_handle_mm_fault(mm, dar, dsisr, &flt))) { in cxl_handle_mm_fault() 167 if (!mm && (get_region_id(dar) != USER_REGION_ID)) in cxl_handle_mm_fault() 174 hash_page_mm(mm, dar, access, 0x300, inv_flags); in cxl_handle_mm_fault() [all …]
|
D | cxllib.c | 183 struct mm_struct *mm = get_task_mm(task); in cxllib_get_PE_attributes() local 184 if (mm == NULL) in cxllib_get_PE_attributes() 190 attr->pid = mm->context.id; in cxllib_get_PE_attributes() 191 mmput(mm); in cxllib_get_PE_attributes() 201 static int get_vma_info(struct mm_struct *mm, u64 addr, in get_vma_info() argument 208 mmap_read_lock(mm); in get_vma_info() 210 vma = find_vma(mm, addr); in get_vma_info() 219 mmap_read_unlock(mm); in get_vma_info() 223 int cxllib_handle_fault(struct mm_struct *mm, u64 addr, u64 size, u64 flags) in cxllib_handle_fault() argument 229 if (mm == NULL) in cxllib_handle_fault() [all …]
|
/drivers/iommu/arm/arm-smmu-v3/ |
D | arm-smmu-v3-sva.c | 29 struct mm_struct *mm; member 45 arm_smmu_share_asid(struct mm_struct *mm, u16 asid) in arm_smmu_share_asid() argument 57 if (cd->mm) { in arm_smmu_share_asid() 58 if (WARN_ON(cd->mm != mm)) in arm_smmu_share_asid() 92 static struct arm_smmu_ctx_desc *arm_smmu_alloc_shared_cd(struct mm_struct *mm) in arm_smmu_alloc_shared_cd() argument 101 mmgrab(mm); in arm_smmu_alloc_shared_cd() 103 asid = arm64_mm_context_get(mm); in arm_smmu_alloc_shared_cd() 118 ret = arm_smmu_share_asid(mm, asid); in arm_smmu_alloc_shared_cd() 156 cd->ttbr = virt_to_phys(mm->pgd); in arm_smmu_alloc_shared_cd() 164 cd->mm = mm; in arm_smmu_alloc_shared_cd() [all …]
|
/drivers/acpi/acpica/ |
D | exregion.c | 44 struct acpi_mem_mapping *mm = mem_info->cur_mm; in acpi_ex_system_memory_space_handler() local 100 if (!mm || (address < mm->physical_address) || in acpi_ex_system_memory_space_handler() 101 ((u64) address + length > (u64) mm->physical_address + mm->length)) { in acpi_ex_system_memory_space_handler() 109 for (mm = mem_info->first_mm; mm; mm = mm->next_mm) { in acpi_ex_system_memory_space_handler() 110 if (mm == mem_info->cur_mm) in acpi_ex_system_memory_space_handler() 113 if (address < mm->physical_address) in acpi_ex_system_memory_space_handler() 117 (u64) mm->physical_address + mm->length) in acpi_ex_system_memory_space_handler() 120 mem_info->cur_mm = mm; in acpi_ex_system_memory_space_handler() 125 mm = ACPI_ALLOCATE_ZEROED(sizeof(*mm)); in acpi_ex_system_memory_space_handler() 126 if (!mm) { in acpi_ex_system_memory_space_handler() [all …]
|
/drivers/gpu/drm/amd/amdkfd/ |
D | kfd_mqd_manager_cik.c | 44 static void update_cu_mask(struct mqd_manager *mm, void *mqd, in update_cu_mask() argument 53 mqd_symmetrically_map_cu_mask(mm, in update_cu_mask() 87 static void init_mqd(struct mqd_manager *mm, void **mqd, in init_mqd() argument 138 mm->update_mqd(mm, m, q); in init_mqd() 141 static void init_mqd_sdma(struct mqd_manager *mm, void **mqd, in init_mqd_sdma() argument 155 mm->update_mqd(mm, m, q); in init_mqd_sdma() 158 static void free_mqd(struct mqd_manager *mm, void *mqd, in free_mqd() argument 161 kfd_gtt_sa_free(mm->dev, mqd_mem_obj); in free_mqd() 165 static int load_mqd(struct mqd_manager *mm, void *mqd, uint32_t pipe_id, in load_mqd() argument 173 return mm->dev->kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id, in load_mqd() [all …]
|
D | kfd_mqd_manager_vi.c | 47 static void update_cu_mask(struct mqd_manager *mm, void *mqd, in update_cu_mask() argument 56 mqd_symmetrically_map_cu_mask(mm, in update_cu_mask() 90 static void init_mqd(struct mqd_manager *mm, void **mqd, in init_mqd() argument 137 if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address) { in init_mqd() 153 mm->update_mqd(mm, m, q); in init_mqd() 156 static int load_mqd(struct mqd_manager *mm, void *mqd, in load_mqd() argument 164 return mm->dev->kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id, in load_mqd() 169 static void __update_mqd(struct mqd_manager *mm, void *mqd, in __update_mqd() argument 228 if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address) in __update_mqd() 233 update_cu_mask(mm, mqd, q); in __update_mqd() [all …]
|
D | kfd_mqd_manager_v10.c | 44 static void update_cu_mask(struct mqd_manager *mm, void *mqd, in update_cu_mask() argument 53 mqd_symmetrically_map_cu_mask(mm, in update_cu_mask() 87 static void init_mqd(struct mqd_manager *mm, void **mqd, in init_mqd() argument 123 if (mm->dev->cwsr_enabled) { in init_mqd() 139 mm->update_mqd(mm, m, q); in init_mqd() 142 static int load_mqd(struct mqd_manager *mm, void *mqd, in load_mqd() argument 150 r = mm->dev->kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id, in load_mqd() 156 static int hiq_load_mqd_kiq(struct mqd_manager *mm, void *mqd, in hiq_load_mqd_kiq() argument 160 return mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->kgd, mqd, pipe_id, in hiq_load_mqd_kiq() 164 static void update_mqd(struct mqd_manager *mm, void *mqd, in update_mqd() argument [all …]
|
D | kfd_mqd_manager_v9.c | 45 static void update_cu_mask(struct mqd_manager *mm, void *mqd, in update_cu_mask() argument 54 mqd_symmetrically_map_cu_mask(mm, in update_cu_mask() 131 static void init_mqd(struct mqd_manager *mm, void **mqd, in init_mqd() argument 176 if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address) { in init_mqd() 192 mm->update_mqd(mm, m, q); in init_mqd() 195 static int load_mqd(struct mqd_manager *mm, void *mqd, in load_mqd() argument 202 return mm->dev->kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id, in load_mqd() 207 static int hiq_load_mqd_kiq(struct mqd_manager *mm, void *mqd, in hiq_load_mqd_kiq() argument 211 return mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->kgd, mqd, pipe_id, in hiq_load_mqd_kiq() 215 static void update_mqd(struct mqd_manager *mm, void *mqd, in update_mqd() argument [all …]
|
D | kfd_mqd_manager.h | 73 void (*init_mqd)(struct mqd_manager *mm, void **mqd, 77 int (*load_mqd)(struct mqd_manager *mm, void *mqd, 82 void (*update_mqd)(struct mqd_manager *mm, void *mqd, 85 int (*destroy_mqd)(struct mqd_manager *mm, void *mqd, 90 void (*free_mqd)(struct mqd_manager *mm, void *mqd, 93 bool (*is_occupied)(struct mqd_manager *mm, void *mqd, 97 int (*get_wave_state)(struct mqd_manager *mm, void *mqd, 117 void free_mqd_hiq_sdma(struct mqd_manager *mm, void *mqd, 120 void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
|
D | kfd_svm.c | 79 svm_range_add_notifier_locked(struct mm_struct *mm, struct svm_range *prange) in svm_range_add_notifier_locked() argument 84 mmu_interval_notifier_insert_locked(&prange->notifier, mm, in svm_range_add_notifier_locked() 464 struct mm_struct *mm; in svm_range_vram_node_new() local 479 mm = get_task_mm(p->lead_thread); in svm_range_vram_node_new() 480 if (!mm) { in svm_range_vram_node_new() 488 mm, in svm_range_vram_node_new() 490 mmput(mm); in svm_range_vram_node_new() 965 svm_range_add_child(struct svm_range *prange, struct mm_struct *mm, in svm_range_add_child() argument 971 pchild->work_item.mm = mm; in svm_range_add_child() 994 svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm, in svm_range_split_by_granularity() argument [all …]
|
/drivers/gpu/drm/i915/gvt/ |
D | gtt.c | 552 static void _ppgtt_get_root_entry(struct intel_vgpu_mm *mm, in _ppgtt_get_root_entry() argument 556 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; in _ppgtt_get_root_entry() 558 GEM_BUG_ON(mm->type != INTEL_GVT_MM_PPGTT); in _ppgtt_get_root_entry() 560 entry->type = mm->ppgtt_mm.root_entry_type; in _ppgtt_get_root_entry() 561 pte_ops->get_entry(guest ? mm->ppgtt_mm.guest_pdps : in _ppgtt_get_root_entry() 562 mm->ppgtt_mm.shadow_pdps, in _ppgtt_get_root_entry() 563 entry, index, false, 0, mm->vgpu); in _ppgtt_get_root_entry() 567 static inline void ppgtt_get_guest_root_entry(struct intel_vgpu_mm *mm, in ppgtt_get_guest_root_entry() argument 570 _ppgtt_get_root_entry(mm, entry, index, true); in ppgtt_get_guest_root_entry() 573 static inline void ppgtt_get_shadow_root_entry(struct intel_vgpu_mm *mm, in ppgtt_get_shadow_root_entry() argument [all …]
|
/drivers/iommu/ |
D | iommu-sva-lib.c | 26 int iommu_sva_alloc_pasid(struct mm_struct *mm, ioasid_t min, ioasid_t max) in iommu_sva_alloc_pasid() argument 36 if (mm->pasid) { in iommu_sva_alloc_pasid() 37 if (mm->pasid >= min && mm->pasid <= max) in iommu_sva_alloc_pasid() 38 ioasid_get(mm->pasid); in iommu_sva_alloc_pasid() 42 pasid = ioasid_alloc(&iommu_sva_pasid, min, max, mm); in iommu_sva_alloc_pasid() 46 mm->pasid = pasid; in iommu_sva_alloc_pasid() 59 void iommu_sva_free_pasid(struct mm_struct *mm) in iommu_sva_free_pasid() argument 62 if (ioasid_put(mm->pasid)) in iommu_sva_free_pasid() 63 mm->pasid = 0; in iommu_sva_free_pasid() 69 static bool __mmget_not_zero(void *mm) in __mmget_not_zero() argument [all …]
|
/drivers/gpu/drm/amd/amdgpu/ |
D | amdgpu_amdkfd_fence.c | 64 struct mm_struct *mm, in amdgpu_amdkfd_fence_create() argument 74 mmgrab(mm); in amdgpu_amdkfd_fence_create() 75 fence->mm = mm; in amdgpu_amdkfd_fence_create() 130 if (!kgd2kfd_schedule_evict_and_restore_process(fence->mm, f)) in amdkfd_fence_enable_signaling() 157 mmdrop(fence->mm); in amdkfd_fence_release() 168 bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm) in amdkfd_fence_check_mm() argument 174 else if (fence->mm == mm) in amdkfd_fence_check_mm()
|
/drivers/iommu/intel/ |
D | svm.c | 244 struct mm_struct *mm, in intel_invalidate_range() argument 253 static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm) in intel_mm_release() argument 395 svm->mm = get_task_mm(current); in intel_svm_bind_gpasid() 403 mmput(svm->mm); in intel_svm_bind_gpasid() 513 static void load_pasid(struct mm_struct *mm, u32 pasid) in load_pasid() argument 515 mutex_lock(&mm->context.lock); in load_pasid() 518 on_each_cpu_mask(mm_cpumask(mm), _load_pasid, NULL, true); in load_pasid() 520 mutex_unlock(&mm->context.lock); in load_pasid() 523 static int intel_svm_alloc_pasid(struct device *dev, struct mm_struct *mm, in intel_svm_alloc_pasid() argument 529 return iommu_sva_alloc_pasid(mm, PASID_MIN, max_pasid - 1); in intel_svm_alloc_pasid() [all …]
|
/drivers/firmware/efi/ |
D | fdtparams.c | 82 u64 __init efi_get_fdt_params(struct efi_memory_map_data *mm) in efi_get_fdt_params() argument 92 [MMBASE] = { &mm->phys_map, sizeof(mm->phys_map) }, in efi_get_fdt_params() 93 [MMSIZE] = { &mm->size, sizeof(mm->size) }, in efi_get_fdt_params() 94 [DCSIZE] = { &mm->desc_size, sizeof(mm->desc_size) }, in efi_get_fdt_params() 95 [DCVERS] = { &mm->desc_version, sizeof(mm->desc_version) }, in efi_get_fdt_params()
|