/drivers/gpu/drm/i915/selftests/ |
D | i915_buddy.c | 11 static void __igt_dump_block(struct i915_buddy_mm *mm, in __igt_dump_block() argument 20 i915_buddy_block_size(mm, block), in __igt_dump_block() 25 static void igt_dump_block(struct i915_buddy_mm *mm, in igt_dump_block() argument 30 __igt_dump_block(mm, block, false); in igt_dump_block() 34 __igt_dump_block(mm, buddy, true); in igt_dump_block() 37 static int igt_check_block(struct i915_buddy_mm *mm, in igt_check_block() argument 55 block_size = i915_buddy_block_size(mm, block); in igt_check_block() 58 if (block_size < mm->chunk_size) { in igt_check_block() 68 if (!IS_ALIGNED(block_size, mm->chunk_size)) { in igt_check_block() 73 if (!IS_ALIGNED(offset, mm->chunk_size)) { in igt_check_block() [all …]
|
/drivers/gpu/drm/i915/gem/ |
D | i915_gem_shrinker.c | 36 return swap_available() || obj->mm.madv == I915_MADV_DONTNEED; in can_release_pages() 59 switch (obj->mm.madv) { in try_to_writeback() 105 { &i915->mm.purge_list, ~0u }, in i915_gem_shrink() 107 &i915->mm.shrink_list, in i915_gem_shrink() 179 spin_lock_irqsave(&i915->mm.obj_lock, flags); in i915_gem_shrink() 183 mm.link))) { in i915_gem_shrink() 184 list_move_tail(&obj->mm.link, &still_in_list); in i915_gem_shrink() 187 !is_vmalloc_addr(obj->mm.mapping)) in i915_gem_shrink() 200 spin_unlock_irqrestore(&i915->mm.obj_lock, flags); in i915_gem_shrink() 204 mutex_lock(&obj->mm.lock); in i915_gem_shrink() [all …]
|
D | i915_gem_userptr.c | 19 struct mm_struct *mm; member 35 struct i915_mm_struct *mm; member 158 i915_mmu_notifier_create(struct i915_mm_struct *mm) in i915_mmu_notifier_create() argument 169 mn->mm = mm; in i915_mmu_notifier_create() 190 i915_mmu_notifier_find(struct i915_mm_struct *mm) in i915_mmu_notifier_find() argument 195 mn = READ_ONCE(mm->mn); in i915_mmu_notifier_find() 199 mn = i915_mmu_notifier_create(mm); in i915_mmu_notifier_find() 203 err = mmu_notifier_register(&mn->mn, mm->mm); in i915_mmu_notifier_find() 209 old = cmpxchg(&mm->mn, NULL, mn); in i915_mmu_notifier_find() 211 mmu_notifier_unregister(&mn->mn, mm->mm); in i915_mmu_notifier_find() [all …]
|
D | i915_gem_pages.c | 23 lockdep_assert_held(&obj->mm.lock); in __i915_gem_object_set_pages() 26 obj->mm.madv = I915_MADV_DONTNEED; in __i915_gem_object_set_pages() 36 obj->mm.get_page.sg_pos = pages->sgl; in __i915_gem_object_set_pages() 37 obj->mm.get_page.sg_idx = 0; in __i915_gem_object_set_pages() 39 obj->mm.pages = pages; in __i915_gem_object_set_pages() 43 GEM_BUG_ON(obj->mm.quirked); in __i915_gem_object_set_pages() 45 obj->mm.quirked = true; in __i915_gem_object_set_pages() 49 obj->mm.page_sizes.phys = sg_page_sizes; in __i915_gem_object_set_pages() 59 obj->mm.page_sizes.sg = 0; in __i915_gem_object_set_pages() 61 if (obj->mm.page_sizes.phys & ~0u << i) in __i915_gem_object_set_pages() [all …]
|
D | i915_gem_region.c | 15 __intel_memory_region_put_pages_buddy(obj->mm.region, &obj->mm.blocks); in i915_gem_object_put_pages_buddy() 17 obj->mm.dirty = false; in i915_gem_object_put_pages_buddy() 25 struct intel_memory_region *mem = obj->mm.region; in i915_gem_object_get_pages_buddy() 26 struct list_head *blocks = &obj->mm.blocks; in i915_gem_object_get_pages_buddy() 40 if (sg_alloc_table(st, size >> ilog2(mem->mm.chunk_size), GFP_KERNEL)) { in i915_gem_object_get_pages_buddy() 64 i915_buddy_block_size(&mem->mm, block)); in i915_gem_object_get_pages_buddy() 108 INIT_LIST_HEAD(&obj->mm.blocks); in i915_gem_object_init_memory_region() 109 obj->mm.region = intel_memory_region_get(mem); in i915_gem_object_init_memory_region() 118 list_add(&obj->mm.region_link, &mem->objects.purgeable); in i915_gem_object_init_memory_region() 120 list_add(&obj->mm.region_link, &mem->objects.list); in i915_gem_object_init_memory_region() [all …]
|
D | i915_gem_pm.c | 39 mm.link); in first_mm_object() 46 &i915->mm.shrink_list, in i915_gem_suspend_late() 47 &i915->mm.purge_list, in i915_gem_suspend_late() 74 spin_lock_irqsave(&i915->mm.obj_lock, flags); in i915_gem_suspend_late() 79 list_move_tail(&obj->mm.link, &keep); in i915_gem_suspend_late() 85 spin_unlock_irqrestore(&i915->mm.obj_lock, flags); in i915_gem_suspend_late() 93 spin_lock_irqsave(&i915->mm.obj_lock, flags); in i915_gem_suspend_late() 98 spin_unlock_irqrestore(&i915->mm.obj_lock, flags); in i915_gem_suspend_late()
|
/drivers/gpu/drm/i915/ |
D | i915_buddy.c | 75 static void mark_free(struct i915_buddy_mm *mm, in mark_free() argument 82 &mm->free_list[i915_buddy_block_order(block)]); in mark_free() 93 int i915_buddy_init(struct i915_buddy_mm *mm, u64 size, u64 chunk_size) in i915_buddy_init() argument 109 mm->size = size; in i915_buddy_init() 110 mm->chunk_size = chunk_size; in i915_buddy_init() 111 mm->max_order = ilog2(size) - ilog2(chunk_size); in i915_buddy_init() 113 GEM_BUG_ON(mm->max_order > I915_BUDDY_MAX_ORDER); in i915_buddy_init() 115 mm->free_list = kmalloc_array(mm->max_order + 1, in i915_buddy_init() 118 if (!mm->free_list) in i915_buddy_init() 121 for (i = 0; i <= mm->max_order; ++i) in i915_buddy_init() [all …]
|
D | i915_buddy.h | 107 i915_buddy_block_size(struct i915_buddy_mm *mm, in i915_buddy_block_size() argument 110 return mm->chunk_size << i915_buddy_block_order(block); in i915_buddy_block_size() 113 int i915_buddy_init(struct i915_buddy_mm *mm, u64 size, u64 chunk_size); 115 void i915_buddy_fini(struct i915_buddy_mm *mm); 118 i915_buddy_alloc(struct i915_buddy_mm *mm, unsigned int order); 120 int i915_buddy_alloc_range(struct i915_buddy_mm *mm, 124 void i915_buddy_free(struct i915_buddy_mm *mm, struct i915_buddy_block *block); 126 void i915_buddy_free_list(struct i915_buddy_mm *mm, struct list_head *objects);
|
/drivers/gpu/drm/ |
D | drm_mm.c | 118 static void show_leaks(struct drm_mm *mm) in show_leaks() argument 129 list_for_each_entry(node, drm_mm_nodes(mm), node_list) { in show_leaks() 149 static void show_leaks(struct drm_mm *mm) { } in show_leaks() argument 160 __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last) in INTERVAL_TREE_DEFINE() 162 return drm_mm_interval_tree_iter_first((struct rb_root_cached *)&mm->interval_tree, in INTERVAL_TREE_DEFINE() 163 start, last) ?: (struct drm_mm_node *)&mm->head_node; in INTERVAL_TREE_DEFINE() 170 struct drm_mm *mm = hole_node->mm; in drm_mm_interval_tree_add_node() local 193 link = &mm->interval_tree.rb_root.rb_node; in drm_mm_interval_tree_add_node() 211 rb_insert_augmented_cached(&node->rb, &mm->interval_tree, leftmost, in drm_mm_interval_tree_add_node() 271 struct drm_mm *mm = node->mm; in add_hole() local [all …]
|
/drivers/gpu/drm/selftests/ |
D | test-drm_mm.c | 54 static bool assert_no_holes(const struct drm_mm *mm) in assert_no_holes() argument 61 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) in assert_no_holes() 68 drm_mm_for_each_node(hole, mm) { in assert_no_holes() 78 static bool assert_one_hole(const struct drm_mm *mm, u64 start, u64 end) in assert_one_hole() argument 89 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) { in assert_one_hole() 107 static bool assert_continuous(const struct drm_mm *mm, u64 size) in assert_continuous() argument 113 if (!assert_no_holes(mm)) in assert_continuous() 118 drm_mm_for_each_node(node, mm) { in assert_continuous() 137 drm_mm_for_each_node_in_range(check, mm, addr, addr + size) { in assert_continuous() 169 static bool assert_node(struct drm_mm_node *node, struct drm_mm *mm, in assert_node() argument [all …]
|
/drivers/gpu/drm/nouveau/nvkm/core/ |
D | mm.c | 26 #define node(root, dir) ((root)->nl_entry.dir == &mm->nodes) ? NULL : \ 30 nvkm_mm_dump(struct nvkm_mm *mm, const char *header) in nvkm_mm_dump() argument 36 list_for_each_entry(node, &mm->nodes, nl_entry) { in nvkm_mm_dump() 41 list_for_each_entry(node, &mm->free, fl_entry) { in nvkm_mm_dump() 48 nvkm_mm_free(struct nvkm_mm *mm, struct nvkm_mm_node **pthis) in nvkm_mm_free() argument 72 list_for_each_entry(prev, &mm->free, fl_entry) { in nvkm_mm_free() 86 region_head(struct nvkm_mm *mm, struct nvkm_mm_node *a, u32 size) in region_head() argument 111 nvkm_mm_head(struct nvkm_mm *mm, u8 heap, u8 type, u32 size_max, u32 size_min, in nvkm_mm_head() argument 121 list_for_each_entry(this, &mm->free, fl_entry) { in nvkm_mm_head() 131 s = roundup(s, mm->block_size); in nvkm_mm_head() [all …]
|
/drivers/misc/cxl/ |
D | fault.c | 84 static int cxl_fault_segment(struct cxl_context *ctx, struct mm_struct *mm, in cxl_fault_segment() argument 90 if (!(rc = copro_calculate_slb(mm, ea, &slb))) { in cxl_fault_segment() 113 struct mm_struct *mm, u64 ea) in cxl_handle_segment_miss() argument 120 if ((rc = cxl_fault_segment(ctx, mm, ea))) in cxl_handle_segment_miss() 131 int cxl_handle_mm_fault(struct mm_struct *mm, u64 dsisr, u64 dar) in cxl_handle_mm_fault() argument 144 if (mm && !cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) { in cxl_handle_mm_fault() 145 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); in cxl_handle_mm_fault() 153 if ((result = copro_handle_mm_fault(mm, dar, dsisr, &flt))) { in cxl_handle_mm_fault() 167 if (!mm && (get_region_id(dar) != USER_REGION_ID)) in cxl_handle_mm_fault() 174 hash_page_mm(mm, dar, access, 0x300, inv_flags); in cxl_handle_mm_fault() [all …]
|
D | cxllib.c | 173 struct mm_struct *mm = NULL; in cxllib_get_PE_attributes() local 185 mm = get_task_mm(task); in cxllib_get_PE_attributes() 186 if (mm == NULL) in cxllib_get_PE_attributes() 192 attr->pid = mm->context.id; in cxllib_get_PE_attributes() 193 mmput(mm); in cxllib_get_PE_attributes() 203 static int get_vma_info(struct mm_struct *mm, u64 addr, in get_vma_info() argument 210 mmap_read_lock(mm); in get_vma_info() 212 vma = find_vma(mm, addr); in get_vma_info() 221 mmap_read_unlock(mm); in get_vma_info() 225 int cxllib_handle_fault(struct mm_struct *mm, u64 addr, u64 size, u64 flags) in cxllib_handle_fault() argument [all …]
|
/drivers/acpi/acpica/ |
D | exregion.c | 44 struct acpi_mem_mapping *mm = mem_info->cur_mm; in acpi_ex_system_memory_space_handler() local 100 if (!mm || (address < mm->physical_address) || in acpi_ex_system_memory_space_handler() 101 ((u64) address + length > (u64) mm->physical_address + mm->length)) { in acpi_ex_system_memory_space_handler() 109 for (mm = mem_info->first_mm; mm; mm = mm->next_mm) { in acpi_ex_system_memory_space_handler() 110 if (mm == mem_info->cur_mm) in acpi_ex_system_memory_space_handler() 113 if (address < mm->physical_address) in acpi_ex_system_memory_space_handler() 117 (u64) mm->physical_address + mm->length) in acpi_ex_system_memory_space_handler() 120 mem_info->cur_mm = mm; in acpi_ex_system_memory_space_handler() 125 mm = ACPI_ALLOCATE_ZEROED(sizeof(*mm)); in acpi_ex_system_memory_space_handler() 126 if (!mm) { in acpi_ex_system_memory_space_handler() [all …]
|
/drivers/gpu/drm/amd/amdkfd/ |
D | kfd_mqd_manager_cik.c | 44 static void update_cu_mask(struct mqd_manager *mm, void *mqd, in update_cu_mask() argument 53 mqd_symmetrically_map_cu_mask(mm, in update_cu_mask() 87 static void init_mqd(struct mqd_manager *mm, void **mqd, in init_mqd() argument 138 mm->update_mqd(mm, m, q); in init_mqd() 141 static void init_mqd_sdma(struct mqd_manager *mm, void **mqd, in init_mqd_sdma() argument 155 mm->update_mqd(mm, m, q); in init_mqd_sdma() 158 static void free_mqd(struct mqd_manager *mm, void *mqd, in free_mqd() argument 161 kfd_gtt_sa_free(mm->dev, mqd_mem_obj); in free_mqd() 165 static int load_mqd(struct mqd_manager *mm, void *mqd, uint32_t pipe_id, in load_mqd() argument 173 return mm->dev->kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id, in load_mqd() [all …]
|
D | kfd_mqd_manager_vi.c | 47 static void update_cu_mask(struct mqd_manager *mm, void *mqd, in update_cu_mask() argument 56 mqd_symmetrically_map_cu_mask(mm, in update_cu_mask() 90 static void init_mqd(struct mqd_manager *mm, void **mqd, in init_mqd() argument 137 if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address) { in init_mqd() 153 mm->update_mqd(mm, m, q); in init_mqd() 156 static int load_mqd(struct mqd_manager *mm, void *mqd, in load_mqd() argument 164 return mm->dev->kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id, in load_mqd() 169 static void __update_mqd(struct mqd_manager *mm, void *mqd, in __update_mqd() argument 228 if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address) in __update_mqd() 233 update_cu_mask(mm, mqd, q); in __update_mqd() [all …]
|
D | kfd_mqd_manager_v10.c | 44 static void update_cu_mask(struct mqd_manager *mm, void *mqd, in update_cu_mask() argument 53 mqd_symmetrically_map_cu_mask(mm, in update_cu_mask() 87 static void init_mqd(struct mqd_manager *mm, void **mqd, in init_mqd() argument 123 if (mm->dev->cwsr_enabled) { in init_mqd() 139 mm->update_mqd(mm, m, q); in init_mqd() 142 static int load_mqd(struct mqd_manager *mm, void *mqd, in load_mqd() argument 150 r = mm->dev->kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id, in load_mqd() 156 static int hiq_load_mqd_kiq(struct mqd_manager *mm, void *mqd, in hiq_load_mqd_kiq() argument 160 return mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->kgd, mqd, pipe_id, in hiq_load_mqd_kiq() 164 static void update_mqd(struct mqd_manager *mm, void *mqd, in update_mqd() argument [all …]
|
D | kfd_mqd_manager_v9.c | 45 static void update_cu_mask(struct mqd_manager *mm, void *mqd, in update_cu_mask() argument 54 mqd_symmetrically_map_cu_mask(mm, in update_cu_mask() 131 static void init_mqd(struct mqd_manager *mm, void **mqd, in init_mqd() argument 176 if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address) { in init_mqd() 192 mm->update_mqd(mm, m, q); in init_mqd() 195 static int load_mqd(struct mqd_manager *mm, void *mqd, in load_mqd() argument 202 return mm->dev->kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id, in load_mqd() 207 static int hiq_load_mqd_kiq(struct mqd_manager *mm, void *mqd, in hiq_load_mqd_kiq() argument 211 return mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->kgd, mqd, pipe_id, in hiq_load_mqd_kiq() 215 static void update_mqd(struct mqd_manager *mm, void *mqd, in update_mqd() argument [all …]
|
D | kfd_mqd_manager.h | 73 void (*init_mqd)(struct mqd_manager *mm, void **mqd, 77 int (*load_mqd)(struct mqd_manager *mm, void *mqd, 82 void (*update_mqd)(struct mqd_manager *mm, void *mqd, 85 int (*destroy_mqd)(struct mqd_manager *mm, void *mqd, 90 void (*free_mqd)(struct mqd_manager *mm, void *mqd, 93 bool (*is_occupied)(struct mqd_manager *mm, void *mqd, 97 int (*get_wave_state)(struct mqd_manager *mm, void *mqd, 116 void free_mqd_hiq_sdma(struct mqd_manager *mm, void *mqd, 119 void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
|
/drivers/oprofile/ |
D | buffer_sync.c | 91 struct mm_struct *mm = current->mm; in munmap_notify() local 94 mmap_read_lock(mm); in munmap_notify() 96 mpnt = find_vma(mm, addr); in munmap_notify() 98 mmap_read_unlock(mm); in munmap_notify() 106 mmap_read_unlock(mm); in munmap_notify() 227 static unsigned long get_exec_dcookie(struct mm_struct *mm) in get_exec_dcookie() argument 232 if (!mm) in get_exec_dcookie() 235 exe_file = get_mm_exe_file(mm); in get_exec_dcookie() 254 lookup_dcookie(struct mm_struct *mm, unsigned long addr, off_t *offset) in lookup_dcookie() argument 259 mmap_read_lock(mm); in lookup_dcookie() [all …]
|
/drivers/gpu/drm/i915/gvt/ |
D | gtt.c | 552 static void _ppgtt_get_root_entry(struct intel_vgpu_mm *mm, in _ppgtt_get_root_entry() argument 556 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; in _ppgtt_get_root_entry() 558 GEM_BUG_ON(mm->type != INTEL_GVT_MM_PPGTT); in _ppgtt_get_root_entry() 560 entry->type = mm->ppgtt_mm.root_entry_type; in _ppgtt_get_root_entry() 561 pte_ops->get_entry(guest ? mm->ppgtt_mm.guest_pdps : in _ppgtt_get_root_entry() 562 mm->ppgtt_mm.shadow_pdps, in _ppgtt_get_root_entry() 563 entry, index, false, 0, mm->vgpu); in _ppgtt_get_root_entry() 567 static inline void ppgtt_get_guest_root_entry(struct intel_vgpu_mm *mm, in ppgtt_get_guest_root_entry() argument 570 _ppgtt_get_root_entry(mm, entry, index, true); in ppgtt_get_guest_root_entry() 573 static inline void ppgtt_get_shadow_root_entry(struct intel_vgpu_mm *mm, in ppgtt_get_shadow_root_entry() argument [all …]
|
/drivers/gpu/drm/vmwgfx/ |
D | vmwgfx_thp.c | 20 struct drm_mm mm; member 31 static int vmw_thp_insert_aligned(struct drm_mm *mm, struct drm_mm_node *node, in vmw_thp_insert_aligned() argument 40 return drm_mm_insert_node_in_range(mm, node, in vmw_thp_insert_aligned() 55 struct drm_mm *mm = &rman->mm; in vmw_thp_get_node() local 78 ret = vmw_thp_insert_aligned(mm, node, align_pages, in vmw_thp_get_node() 87 ret = vmw_thp_insert_aligned(mm, node, align_pages, place, mem, in vmw_thp_get_node() 93 ret = drm_mm_insert_node_in_range(mm, node, mem->num_pages, in vmw_thp_get_node() 138 drm_mm_init(&rman->mm, 0, rman->manager.size); in vmw_thp_init() 150 struct drm_mm *mm = &rman->mm; in vmw_thp_fini() local 159 drm_mm_clean(mm); in vmw_thp_fini() [all …]
|
/drivers/gpu/drm/amd/amdgpu/ |
D | amdgpu_amdkfd_fence.c | 63 struct mm_struct *mm) in amdgpu_amdkfd_fence_create() argument 72 mmgrab(mm); in amdgpu_amdkfd_fence_create() 73 fence->mm = mm; in amdgpu_amdkfd_fence_create() 125 if (!kgd2kfd_schedule_evict_and_restore_process(fence->mm, f)) in amdkfd_fence_enable_signaling() 149 mmdrop(fence->mm); in amdkfd_fence_release() 160 bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm) in amdkfd_fence_check_mm() argument 166 else if (fence->mm == mm) in amdkfd_fence_check_mm()
|
/drivers/iommu/intel/ |
D | svm.c | 167 struct mm_struct *mm, in intel_invalidate_range() argument 176 static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm) in intel_mm_release() argument 331 svm->mm = get_task_mm(current); in intel_svm_bind_gpasid() 339 mmput(svm->mm); in intel_svm_bind_gpasid() 449 static void load_pasid(struct mm_struct *mm, u32 pasid) in load_pasid() argument 451 mutex_lock(&mm->context.lock); in load_pasid() 454 smp_store_release(&mm->pasid, pasid); in load_pasid() 457 on_each_cpu_mask(mm_cpumask(mm), _load_pasid, NULL, true); in load_pasid() 459 mutex_unlock(&mm->context.lock); in load_pasid() 466 struct mm_struct *mm, struct intel_svm_dev **sd) in intel_svm_bind_mm() argument [all …]
|
/drivers/iommu/arm/arm-smmu-v3/ |
D | arm-smmu-v3-sva.c | 20 arm_smmu_share_asid(struct mm_struct *mm, u16 asid) in arm_smmu_share_asid() argument 32 if (cd->mm) { in arm_smmu_share_asid() 33 if (WARN_ON(cd->mm != mm)) in arm_smmu_share_asid() 68 static struct arm_smmu_ctx_desc *arm_smmu_alloc_shared_cd(struct mm_struct *mm) in arm_smmu_alloc_shared_cd() argument 76 asid = arm64_mm_context_get(mm); in arm_smmu_alloc_shared_cd() 89 ret = arm_smmu_share_asid(mm, asid); in arm_smmu_alloc_shared_cd() 127 cd->ttbr = virt_to_phys(mm->pgd); in arm_smmu_alloc_shared_cd() 135 cd->mm = mm; in arm_smmu_alloc_shared_cd() 144 arm64_mm_context_put(mm); in arm_smmu_alloc_shared_cd() 153 arm64_mm_context_put(cd->mm); in arm_smmu_free_shared_cd()
|