/drivers/gpu/drm/ |
D | drm_mm.c | 93 static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, 98 static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm, 112 struct drm_mm *mm = hole_node->mm; in drm_mm_insert_helper() local 120 if (mm->color_adjust) in drm_mm_insert_helper() 121 mm->color_adjust(hole_node, color, &adj_start, &adj_end); in drm_mm_insert_helper() 146 node->mm = mm; in drm_mm_insert_helper() 157 list_add(&node->hole_stack, &mm->hole_stack); in drm_mm_insert_helper() 176 int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node) in drm_mm_reserve_node() argument 186 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) { in drm_mm_reserve_node() 190 node->mm = mm; in drm_mm_reserve_node() [all …]
|
/drivers/gpu/drm/i915/ |
D | i915_gem_userptr.c | 36 struct mm_struct *mm; member 79 was_interruptible = dev_priv->mm.interruptible; in cancel_userptr() 80 dev_priv->mm.interruptible = false; in cancel_userptr() 88 dev_priv->mm.interruptible = was_interruptible; in cancel_userptr() 100 struct mm_struct *mm, in invalidate_range__linear() argument 133 struct mm_struct *mm, in i915_gem_userptr_mn_invalidate_range_start() argument 148 it = invalidate_range__linear(mn, mm, start, end); in i915_gem_userptr_mn_invalidate_range_start() 184 i915_mmu_notifier_create(struct mm_struct *mm) in i915_mmu_notifier_create() argument 201 ret = __mmu_notifier_register(&mn->mn, mm); in i915_mmu_notifier_create() 310 i915_mmu_notifier_find(struct i915_mm_struct *mm) in i915_mmu_notifier_find() argument [all …]
|
D | i915_gem_stolen.c | 171 ret = drm_mm_insert_node(&dev_priv->mm.stolen, node, in find_compression_threshold() 182 ret = drm_mm_insert_node(&dev_priv->mm.stolen, node, in find_compression_threshold() 221 ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_llb, in i915_setup_compression() 229 dev_priv->mm.stolen_base + dev_priv->fbc.compressed_fb.start); in i915_setup_compression() 231 dev_priv->mm.stolen_base + compressed_llb->start); in i915_setup_compression() 253 if (!drm_mm_initialized(&dev_priv->mm.stolen)) in i915_gem_stolen_setup_compression() 286 if (!drm_mm_initialized(&dev_priv->mm.stolen)) in i915_gem_cleanup_stolen() 290 drm_mm_takedown(&dev_priv->mm.stolen); in i915_gem_cleanup_stolen() 309 dev_priv->mm.stolen_base = i915_stolen_to_physical(dev); in i915_gem_init_stolen() 310 if (dev_priv->mm.stolen_base == 0) in i915_gem_init_stolen() [all …]
|
D | i915_gem.c | 95 spin_lock(&dev_priv->mm.object_stat_lock); in i915_gem_info_add_obj() 96 dev_priv->mm.object_count++; in i915_gem_info_add_obj() 97 dev_priv->mm.object_memory += size; in i915_gem_info_add_obj() 98 spin_unlock(&dev_priv->mm.object_stat_lock); in i915_gem_info_add_obj() 104 spin_lock(&dev_priv->mm.object_stat_lock); in i915_gem_info_remove_obj() 105 dev_priv->mm.object_count--; in i915_gem_info_remove_obj() 106 dev_priv->mm.object_memory -= size; in i915_gem_info_remove_obj() 107 spin_unlock(&dev_priv->mm.object_stat_lock); in i915_gem_info_remove_obj() 200 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) in i915_gem_get_aperture_ioctl() 1180 &file_priv->mm.idle_work, in __wait_seqno() [all …]
|
/drivers/gpu/drm/nouveau/core/core/ |
D | mm.c | 28 #define node(root, dir) ((root)->nl_entry.dir == &mm->nodes) ? NULL : \ 32 nouveau_mm_dump(struct nouveau_mm *mm, const char *header) in nouveau_mm_dump() argument 38 list_for_each_entry(node, &mm->nodes, nl_entry) { in nouveau_mm_dump() 43 list_for_each_entry(node, &mm->free, fl_entry) { in nouveau_mm_dump() 50 nouveau_mm_free(struct nouveau_mm *mm, struct nouveau_mm_node **pthis) in nouveau_mm_free() argument 74 list_for_each_entry(prev, &mm->free, fl_entry) { in nouveau_mm_free() 88 region_head(struct nouveau_mm *mm, struct nouveau_mm_node *a, u32 size) in region_head() argument 112 nouveau_mm_head(struct nouveau_mm *mm, u8 heap, u8 type, u32 size_max, in nouveau_mm_head() argument 122 list_for_each_entry(this, &mm->free, fl_entry) { in nouveau_mm_head() 132 s = roundup(s, mm->block_size); in nouveau_mm_head() [all …]
|
/drivers/gpio/ |
D | gpio-mpc8xxx.c | 51 to_mpc8xxx_gpio_chip(struct of_mm_gpio_chip *mm) in to_mpc8xxx_gpio_chip() argument 53 return container_of(mm, struct mpc8xxx_gpio_chip, mm_gc); in to_mpc8xxx_gpio_chip() 56 static void mpc8xxx_gpio_save_regs(struct of_mm_gpio_chip *mm) in mpc8xxx_gpio_save_regs() argument 58 struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm); in mpc8xxx_gpio_save_regs() 60 mpc8xxx_gc->data = in_be32(mm->regs + GPIO_DAT); in mpc8xxx_gpio_save_regs() 71 struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc); in mpc8572_gpio_get() local 72 struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm); in mpc8572_gpio_get() 75 out_mask = in_be32(mm->regs + GPIO_DIR); in mpc8572_gpio_get() 77 val = in_be32(mm->regs + GPIO_DAT) & ~out_mask; in mpc8572_gpio_get() 85 struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc); in mpc8xxx_gpio_get() local [all …]
|
/drivers/oprofile/ |
D | buffer_sync.c | 88 struct mm_struct *mm = current->mm; in munmap_notify() local 91 down_read(&mm->mmap_sem); in munmap_notify() 93 mpnt = find_vma(mm, addr); in munmap_notify() 95 up_read(&mm->mmap_sem); in munmap_notify() 103 up_read(&mm->mmap_sem); in munmap_notify() 224 static unsigned long get_exec_dcookie(struct mm_struct *mm) in get_exec_dcookie() argument 228 if (mm && mm->exe_file) in get_exec_dcookie() 229 cookie = fast_get_dcookie(&mm->exe_file->f_path); in get_exec_dcookie() 241 lookup_dcookie(struct mm_struct *mm, unsigned long addr, off_t *offset) in lookup_dcookie() argument 246 for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) { in lookup_dcookie() [all …]
|
/drivers/infiniband/hw/ipath/ |
D | ipath_user_pages.c | 74 ret = get_user_pages(current, current->mm, in __ipath_get_user_pages() 82 current->mm->pinned_vm += num_pages; in __ipath_get_user_pages() 166 down_write(¤t->mm->mmap_sem); in ipath_get_user_pages() 170 up_write(¤t->mm->mmap_sem); in ipath_get_user_pages() 177 down_write(¤t->mm->mmap_sem); in ipath_release_user_pages() 181 current->mm->pinned_vm -= num_pages; in ipath_release_user_pages() 183 up_write(¤t->mm->mmap_sem); in ipath_release_user_pages() 188 struct mm_struct *mm; member 197 down_write(&work->mm->mmap_sem); in user_pages_account() 198 work->mm->pinned_vm -= work->num_pages; in user_pages_account() [all …]
|
/drivers/misc/cxl/ |
D | fault.c | 85 static int cxl_fault_segment(struct cxl_context *ctx, struct mm_struct *mm, in cxl_fault_segment() argument 91 if (!(rc = copro_calculate_slb(mm, ea, &slb))) { in cxl_fault_segment() 114 struct mm_struct *mm, u64 ea) in cxl_handle_segment_miss() argument 120 if ((rc = cxl_fault_segment(ctx, mm, ea))) in cxl_handle_segment_miss() 132 struct mm_struct *mm, u64 dsisr, u64 dar) in cxl_handle_page_fault() argument 138 if ((result = copro_handle_mm_fault(mm, dar, dsisr, &flt))) { in cxl_handle_page_fault() 153 hash_page_mm(mm, dar, access, 0x300); in cxl_handle_page_fault() 167 struct mm_struct *mm; in cxl_handle_fault() local 188 if (!(mm = get_task_mm(task))) { in cxl_handle_fault() 196 cxl_handle_segment_miss(ctx, mm, dar); in cxl_handle_fault() [all …]
|
D | main.c | 34 static inline void _cxl_slbia(struct cxl_context *ctx, struct mm_struct *mm) in _cxl_slbia() argument 44 if (task->mm != mm) in _cxl_slbia() 59 static inline void cxl_slbia_core(struct mm_struct *mm) in cxl_slbia_core() argument 78 _cxl_slbia(ctx, mm); in cxl_slbia_core()
|
/drivers/infiniband/core/ |
D | umem.c | 148 down_write(¤t->mm->mmap_sem); in ib_umem_get() 150 locked = npages + current->mm->pinned_vm; in ib_umem_get() 173 ret = get_user_pages(current, current->mm, cur_base, in ib_umem_get() 216 current->mm->pinned_vm = locked; in ib_umem_get() 218 up_write(¤t->mm->mmap_sem); in ib_umem_get() 231 down_write(&umem->mm->mmap_sem); in ib_umem_account() 232 umem->mm->pinned_vm -= umem->diff; in ib_umem_account() 233 up_write(&umem->mm->mmap_sem); in ib_umem_account() 234 mmput(umem->mm); in ib_umem_account() 245 struct mm_struct *mm; in ib_umem_release() local [all …]
|
/drivers/gpu/drm/radeon/ |
D | radeon_mn.c | 42 struct mm_struct *mm; member 78 mmu_notifier_unregister(&rmn->mn, rmn->mm); in radeon_mn_destroy() 91 struct mm_struct *mm) in radeon_mn_release() argument 110 struct mm_struct *mm, in radeon_mn_invalidate_range_start() argument 166 struct mm_struct *mm = current->mm; in radeon_mn_get() local 170 down_write(&mm->mmap_sem); in radeon_mn_get() 173 hash_for_each_possible(rdev->mn_hash, rmn, node, (unsigned long)mm) in radeon_mn_get() 174 if (rmn->mm == mm) in radeon_mn_get() 184 rmn->mm = mm; in radeon_mn_get() 189 r = __mmu_notifier_register(&rmn->mn, mm); in radeon_mn_get() [all …]
|
/drivers/gpu/drm/ttm/ |
D | ttm_bo_manager.c | 46 struct drm_mm mm; member 56 struct drm_mm *mm = &rman->mm; in ttm_bo_man_get_node() local 74 ret = drm_mm_insert_node_in_range_generic(mm, node, mem->num_pages, in ttm_bo_man_get_node() 115 drm_mm_init(&rman->mm, 0, p_size); in ttm_bo_man_init() 124 struct drm_mm *mm = &rman->mm; in ttm_bo_man_takedown() local 127 if (drm_mm_clean(mm)) { in ttm_bo_man_takedown() 128 drm_mm_takedown(mm); in ttm_bo_man_takedown() 144 drm_mm_debug_table(&rman->mm, prefix); in ttm_bo_man_debug()
|
/drivers/infiniband/hw/qib/ |
D | qib_user_pages.c | 69 ret = get_user_pages(current, current->mm, in __qib_get_user_pages() 77 current->mm->pinned_vm += num_pages; in __qib_get_user_pages() 137 down_write(¤t->mm->mmap_sem); in qib_get_user_pages() 141 up_write(¤t->mm->mmap_sem); in qib_get_user_pages() 148 if (current->mm) /* during close after signal, mm can be NULL */ in qib_release_user_pages() 149 down_write(¤t->mm->mmap_sem); in qib_release_user_pages() 153 if (current->mm) { in qib_release_user_pages() 154 current->mm->pinned_vm -= num_pages; in qib_release_user_pages() 155 up_write(¤t->mm->mmap_sem); in qib_release_user_pages()
|
/drivers/infiniband/hw/cxgb3/ |
D | iwch_provider.h | 209 struct iwch_mm_entry *mm; in remove_mmap() local 214 mm = list_entry(pos, struct iwch_mm_entry, entry); in remove_mmap() 215 if (mm->key == key && mm->len == len) { in remove_mmap() 216 list_del_init(&mm->entry); in remove_mmap() 219 key, (unsigned long long) mm->addr, mm->len); in remove_mmap() 220 return mm; in remove_mmap() 228 struct iwch_mm_entry *mm) in insert_mmap() argument 232 mm->key, (unsigned long long) mm->addr, mm->len); in insert_mmap() 233 list_add_tail(&mm->entry, &ucontext->mmaps); in insert_mmap()
|
/drivers/infiniband/hw/usnic/ |
D | usnic_uiom.c | 61 down_write(&umem->mm->mmap_sem); in usnic_uiom_reg_account() 62 umem->mm->locked_vm -= umem->diff; in usnic_uiom_reg_account() 63 up_write(&umem->mm->mmap_sem); in usnic_uiom_reg_account() 64 mmput(umem->mm); in usnic_uiom_reg_account() 131 down_write(¤t->mm->mmap_sem); in usnic_uiom_get_pages() 133 locked = npages + current->mm->locked_vm; in usnic_uiom_get_pages() 147 ret = get_user_pages(current, current->mm, cur_base, in usnic_uiom_get_pages() 190 current->mm->locked_vm = locked; in usnic_uiom_get_pages() 192 up_write(¤t->mm->mmap_sem); in usnic_uiom_get_pages() 426 struct mm_struct *mm; in usnic_uiom_reg_release() local [all …]
|
/drivers/iommu/ |
D | amd_iommu_v2.c | 50 struct mm_struct *mm; /* mm_struct for the faults */ member 81 struct mm_struct *mm; member 367 mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm); in free_pasid_states() 404 struct mm_struct *mm, in mn_clear_flush_young() argument 415 struct mm_struct *mm, in mn_invalidate_page() argument 422 struct mm_struct *mm, in mn_invalidate_range_start() argument 443 struct mm_struct *mm, in mn_invalidate_range_end() argument 458 __pa(pasid_state->mm->pgd)); in mn_invalidate_range_end() 463 static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm) in mn_release() argument 524 down_read(&fault->state->mm->mmap_sem); in do_fault() [all …]
|
/drivers/misc/sgi-gru/ |
D | grutlbpurge.c | 223 struct mm_struct *mm, in gru_invalidate_range_start() argument 237 struct mm_struct *mm, unsigned long start, in gru_invalidate_range_end() argument 250 static void gru_invalidate_page(struct mmu_notifier *mn, struct mm_struct *mm, in gru_invalidate_page() argument 261 static void gru_release(struct mmu_notifier *mn, struct mm_struct *mm) in gru_release() argument 279 static struct mmu_notifier *mmu_find_ops(struct mm_struct *mm, in mmu_find_ops() argument 284 if (mm->mmu_notifier_mm) { in mmu_find_ops() 286 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, in mmu_find_ops() 303 mn = mmu_find_ops(current->mm, &gru_mmuops); in gru_register_mmu_notifier() 315 err = __mmu_notifier_register(&gms->ms_notifier, current->mm); in gru_register_mmu_notifier() 334 mmu_notifier_unregister(&gms->ms_notifier, current->mm); in gru_drop_mmu_notifier()
|
D | grufault.c | 65 vma = find_vma(current->mm, vaddr); in gru_find_vma() 81 struct mm_struct *mm = current->mm; in gru_find_lock_gts() local 85 down_read(&mm->mmap_sem); in gru_find_lock_gts() 92 up_read(&mm->mmap_sem); in gru_find_lock_gts() 98 struct mm_struct *mm = current->mm; in gru_alloc_locked_gts() local 102 down_write(&mm->mmap_sem); in gru_alloc_locked_gts() 111 downgrade_write(&mm->mmap_sem); in gru_alloc_locked_gts() 115 up_write(&mm->mmap_sem); in gru_alloc_locked_gts() 125 up_read(¤t->mm->mmap_sem); in gru_unlock_gts() 202 (current, current->mm, vaddr, 1, write, 0, &page, NULL) <= 0) in non_atomic_pte_lookup() [all …]
|
/drivers/vfio/ |
D | vfio_iommu_type1.c | 133 struct mm_struct *mm; member 142 struct mm_struct *mm; in vfio_lock_acct_bg() local 144 mm = vwork->mm; in vfio_lock_acct_bg() 145 down_write(&mm->mmap_sem); in vfio_lock_acct_bg() 146 mm->locked_vm += vwork->npage; in vfio_lock_acct_bg() 147 up_write(&mm->mmap_sem); in vfio_lock_acct_bg() 148 mmput(mm); in vfio_lock_acct_bg() 155 struct mm_struct *mm; in vfio_lock_acct() local 157 if (!current->mm || !npage) in vfio_lock_acct() 160 if (down_write_trylock(¤t->mm->mmap_sem)) { in vfio_lock_acct() [all …]
|
D | vfio_iommu_spapr_tce.c | 59 if (!current->mm) in tce_iommu_enable() 83 down_write(¤t->mm->mmap_sem); in tce_iommu_enable() 85 locked = current->mm->locked_vm + npages; in tce_iommu_enable() 93 current->mm->locked_vm += npages; in tce_iommu_enable() 96 up_write(¤t->mm->mmap_sem); in tce_iommu_enable() 108 if (!container->tbl || !current->mm) in tce_iommu_disable() 111 down_write(¤t->mm->mmap_sem); in tce_iommu_disable() 112 current->mm->locked_vm -= (container->tbl->it_size << in tce_iommu_disable() 114 up_write(¤t->mm->mmap_sem); in tce_iommu_disable()
|
/drivers/infiniband/hw/cxgb4/ |
D | provider.c | 94 struct c4iw_mm_entry *mm, *tmp; in c4iw_dealloc_ucontext() local 97 list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry) in c4iw_dealloc_ucontext() 98 kfree(mm); in c4iw_dealloc_ucontext() 112 struct c4iw_mm_entry *mm = NULL; in c4iw_alloc_ucontext() local 130 mm = kmalloc(sizeof(*mm), GFP_KERNEL); in c4iw_alloc_ucontext() 131 if (!mm) { in c4iw_alloc_ucontext() 148 mm->key = uresp.status_page_key; in c4iw_alloc_ucontext() 149 mm->addr = virt_to_phys(rhp->rdev.status_page); in c4iw_alloc_ucontext() 150 mm->len = PAGE_SIZE; in c4iw_alloc_ucontext() 151 insert_mmap(context, mm); in c4iw_alloc_ucontext() [all …]
|
/drivers/xen/ |
D | privcmd.c | 201 struct mm_struct *mm = current->mm; in privcmd_ioctl_mmap() local 221 down_write(&mm->mmap_sem); in privcmd_ioctl_mmap() 228 vma = find_vma(mm, msg->va); in privcmd_ioctl_mmap() 246 up_write(&mm->mmap_sem); in privcmd_ioctl_mmap() 376 struct mm_struct *mm = current->mm; in privcmd_ioctl_mmap_batch() local 423 down_write(&mm->mmap_sem); in privcmd_ioctl_mmap_batch() 425 vma = find_vma(mm, m.addr); in privcmd_ioctl_mmap_batch() 478 up_write(&mm->mmap_sem); in privcmd_ioctl_mmap_batch() 499 up_write(&mm->mmap_sem); in privcmd_ioctl_mmap_batch()
|
D | gntdev.c | 71 struct mm_struct *mm; member 430 struct mm_struct *mm, in mn_invl_range_start() argument 447 struct mm_struct *mm, in mn_invl_page() argument 450 mn_invl_range_start(mn, mm, address, address + PAGE_SIZE); in mn_invl_page() 454 struct mm_struct *mm) in mn_release() argument 504 priv->mm = get_task_mm(current); in gntdev_open() 505 if (!priv->mm) { in gntdev_open() 510 ret = mmu_notifier_register(&priv->mn, priv->mm); in gntdev_open() 511 mmput(priv->mm); in gntdev_open() 542 mmu_notifier_unregister(&priv->mn, priv->mm); in gntdev_release() [all …]
|
/drivers/staging/android/ |
D | binder_alloc.c | 195 struct mm_struct *mm = NULL; in binder_update_page_range() local 220 mm = alloc->vma_vm_mm; in binder_update_page_range() 222 if (mm) { in binder_update_page_range() 223 down_write(&mm->mmap_sem); in binder_update_page_range() 291 if (mm) { in binder_update_page_range() 292 up_write(&mm->mmap_sem); in binder_update_page_range() 293 mmput(mm); in binder_update_page_range() 324 if (mm) { in binder_update_page_range() 325 up_write(&mm->mmap_sem); in binder_update_page_range() 326 mmput(mm); in binder_update_page_range() [all …]
|