Home
last modified time | relevance | path

Searched refs:mm (Results 1 – 25 of 96) sorted by relevance

1234

/drivers/gpu/drm/
Ddrm_mm.c52 static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic) in drm_mm_kmalloc() argument
62 spin_lock(&mm->unused_lock); in drm_mm_kmalloc()
63 if (list_empty(&mm->unused_nodes)) in drm_mm_kmalloc()
67 list_entry(mm->unused_nodes.next, in drm_mm_kmalloc()
70 --mm->num_unused; in drm_mm_kmalloc()
72 spin_unlock(&mm->unused_lock); in drm_mm_kmalloc()
82 int drm_mm_pre_get(struct drm_mm *mm) in drm_mm_pre_get() argument
86 spin_lock(&mm->unused_lock); in drm_mm_pre_get()
87 while (mm->num_unused < MM_UNUSED_TARGET) { in drm_mm_pre_get()
88 spin_unlock(&mm->unused_lock); in drm_mm_pre_get()
[all …]
Ddrm_gem.c93 struct drm_gem_mm *mm; in drm_gem_init() local
98 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL); in drm_gem_init()
99 if (!mm) { in drm_gem_init()
104 dev->mm_private = mm; in drm_gem_init()
106 if (drm_ht_create(&mm->offset_hash, 12)) { in drm_gem_init()
107 kfree(mm); in drm_gem_init()
111 if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START, in drm_gem_init()
113 drm_ht_remove(&mm->offset_hash); in drm_gem_init()
114 kfree(mm); in drm_gem_init()
124 struct drm_gem_mm *mm = dev->mm_private; in drm_gem_destroy() local
[all …]
/drivers/gpu/drm/nouveau/core/core/
Dmm.c28 #define node(root, dir) ((root)->nl_entry.dir == &mm->nodes) ? NULL : \
32 nouveau_mm_free(struct nouveau_mm *mm, struct nouveau_mm_node **pthis) in nouveau_mm_free() argument
56 list_for_each_entry(prev, &mm->free, fl_entry) { in nouveau_mm_free()
70 region_head(struct nouveau_mm *mm, struct nouveau_mm_node *a, u32 size) in region_head() argument
93 nouveau_mm_head(struct nouveau_mm *mm, u8 type, u32 size_max, u32 size_min, in nouveau_mm_head() argument
101 list_for_each_entry(this, &mm->free, fl_entry) { in nouveau_mm_head()
107 s = roundup(s, mm->block_size); in nouveau_mm_head()
111 e = rounddown(e, mm->block_size); in nouveau_mm_head()
119 if (splitoff && !region_head(mm, this, splitoff)) in nouveau_mm_head()
122 this = region_head(mm, this, min(size_max, e - s)); in nouveau_mm_head()
[all …]
/drivers/gpio/
Dgpio-mpc8xxx.c50 to_mpc8xxx_gpio_chip(struct of_mm_gpio_chip *mm) in to_mpc8xxx_gpio_chip() argument
52 return container_of(mm, struct mpc8xxx_gpio_chip, mm_gc); in to_mpc8xxx_gpio_chip()
55 static void mpc8xxx_gpio_save_regs(struct of_mm_gpio_chip *mm) in mpc8xxx_gpio_save_regs() argument
57 struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm); in mpc8xxx_gpio_save_regs()
59 mpc8xxx_gc->data = in_be32(mm->regs + GPIO_DAT); in mpc8xxx_gpio_save_regs()
70 struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc); in mpc8572_gpio_get() local
71 struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm); in mpc8572_gpio_get()
73 val = in_be32(mm->regs + GPIO_DAT) & ~in_be32(mm->regs + GPIO_DIR); in mpc8572_gpio_get()
80 struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc); in mpc8xxx_gpio_get() local
82 return in_be32(mm->regs + GPIO_DAT) & mpc8xxx_gpio2mask(gpio); in mpc8xxx_gpio_get()
[all …]
/drivers/oprofile/
Dbuffer_sync.c88 struct mm_struct *mm = current->mm; in munmap_notify() local
91 down_read(&mm->mmap_sem); in munmap_notify()
93 mpnt = find_vma(mm, addr); in munmap_notify()
95 up_read(&mm->mmap_sem); in munmap_notify()
103 up_read(&mm->mmap_sem); in munmap_notify()
224 static unsigned long get_exec_dcookie(struct mm_struct *mm) in get_exec_dcookie() argument
228 if (mm && mm->exe_file) in get_exec_dcookie()
229 cookie = fast_get_dcookie(&mm->exe_file->f_path); in get_exec_dcookie()
241 lookup_dcookie(struct mm_struct *mm, unsigned long addr, off_t *offset) in lookup_dcookie() argument
246 for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) { in lookup_dcookie()
[all …]
/drivers/infiniband/hw/ipath/
Dipath_user_pages.c74 ret = get_user_pages(current, current->mm, in __ipath_get_user_pages()
82 current->mm->pinned_vm += num_pages; in __ipath_get_user_pages()
166 down_write(&current->mm->mmap_sem); in ipath_get_user_pages()
170 up_write(&current->mm->mmap_sem); in ipath_get_user_pages()
177 down_write(&current->mm->mmap_sem); in ipath_release_user_pages()
181 current->mm->pinned_vm -= num_pages; in ipath_release_user_pages()
183 up_write(&current->mm->mmap_sem); in ipath_release_user_pages()
188 struct mm_struct *mm; member
197 down_write(&work->mm->mmap_sem); in user_pages_account()
198 work->mm->pinned_vm -= work->num_pages; in user_pages_account()
[all …]
/drivers/infiniband/core/
Dumem.c138 down_write(&current->mm->mmap_sem); in ib_umem_get()
140 locked = npages + current->mm->pinned_vm; in ib_umem_get()
152 ret = get_user_pages(current, current->mm, cur_base, in ib_umem_get()
210 current->mm->pinned_vm = locked; in ib_umem_get()
212 up_write(&current->mm->mmap_sem); in ib_umem_get()
225 down_write(&umem->mm->mmap_sem); in ib_umem_account()
226 umem->mm->pinned_vm -= umem->diff; in ib_umem_account()
227 up_write(&umem->mm->mmap_sem); in ib_umem_account()
228 mmput(umem->mm); in ib_umem_account()
239 struct mm_struct *mm; in ib_umem_release() local
[all …]
/drivers/gpu/drm/i915/
Di915_gem_stolen.c81 base -= dev_priv->mm.gtt->stolen_size; in i915_stolen_to_physical()
97 compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, in i915_setup_compression()
100 compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, in i915_setup_compression()
112 compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen, in i915_setup_compression()
123 dev_priv->mm.stolen_base + compressed_fb->start); in i915_setup_compression()
125 dev_priv->mm.stolen_base + compressed_llb->start); in i915_setup_compression()
146 if (dev_priv->mm.stolen_base == 0) in i915_gem_stolen_setup_compression()
179 drm_mm_takedown(&dev_priv->mm.stolen); in i915_gem_cleanup_stolen()
186 dev_priv->mm.stolen_base = i915_stolen_to_physical(dev); in i915_gem_init_stolen()
187 if (dev_priv->mm.stolen_base == 0) in i915_gem_init_stolen()
[all …]
Di915_gem_evict.c81 drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space, in i915_gem_evict_something()
85 drm_mm_init_scan(&dev_priv->mm.gtt_space, in i915_gem_evict_something()
89 list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) { in i915_gem_evict_something()
98 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { in i915_gem_evict_something()
161 lists_empty = (list_empty(&dev_priv->mm.inactive_list) && in i915_gem_evict_everything()
162 list_empty(&dev_priv->mm.active_list)); in i915_gem_evict_everything()
180 &dev_priv->mm.inactive_list, mm_list) in i915_gem_evict_everything()
Di915_gem.c78 dev_priv->mm.object_count++; in i915_gem_info_add_obj()
79 dev_priv->mm.object_memory += size; in i915_gem_info_add_obj()
85 dev_priv->mm.object_count--; in i915_gem_info_remove_obj()
86 dev_priv->mm.object_memory -= size; in i915_gem_info_remove_obj()
179 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) in i915_gem_get_aperture_ioctl()
1071 bool interruptible = dev_priv->mm.interruptible; in i915_wait_seqno()
1139 BUG_ON(!dev_priv->mm.interruptible); in i915_gem_object_wait_rendering__nonblocking()
1492 dev_priv->mm.shrinker_no_lock_stealing = true; in i915_gem_object_create_mmap_offset()
1513 dev_priv->mm.shrinker_no_lock_stealing = false; in i915_gem_object_create_mmap_offset()
1698 &dev_priv->mm.unbound_list, in __i915_gem_shrink()
[all …]
Di915_gem_gtt.c83 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; in gen6_ppgtt_enable()
318 dev_priv->mm.aliasing_ppgtt = ppgtt; in i915_gem_init_aliasing_ppgtt()
326 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; in i915_gem_cleanup_aliasing_ppgtt()
332 dev_priv->mm.aliasing_ppgtt = NULL; in i915_gem_cleanup_aliasing_ppgtt()
370 bool ret = dev_priv->mm.interruptible; in do_idling()
373 dev_priv->mm.interruptible = false; in do_idling()
387 dev_priv->mm.interruptible = interruptible; in undo_idling()
399 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) { in i915_gem_restore_gtt_mappings()
584 drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE); in i915_gem_setup_global_gtt()
586 dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust; in i915_gem_setup_global_gtt()
[all …]
/drivers/gpu/drm/ttm/
Dttm_bo_manager.c46 struct drm_mm mm; member
56 struct drm_mm *mm = &rman->mm; in ttm_bo_man_get_node() local
65 ret = drm_mm_pre_get(mm); in ttm_bo_man_get_node()
70 node = drm_mm_search_free_in_range(mm, in ttm_bo_man_get_node()
112 ret = drm_mm_init(&rman->mm, 0, p_size); in ttm_bo_man_init()
126 struct drm_mm *mm = &rman->mm; in ttm_bo_man_takedown() local
129 if (drm_mm_clean(mm)) { in ttm_bo_man_takedown()
130 drm_mm_takedown(mm); in ttm_bo_man_takedown()
146 drm_mm_debug_table(&rman->mm, prefix); in ttm_bo_man_debug()
/drivers/infiniband/hw/qib/
Dqib_user_pages.c69 ret = get_user_pages(current, current->mm, in __qib_get_user_pages()
77 current->mm->pinned_vm += num_pages; in __qib_get_user_pages()
137 down_write(&current->mm->mmap_sem); in qib_get_user_pages()
141 up_write(&current->mm->mmap_sem); in qib_get_user_pages()
148 if (current->mm) /* during close after signal, mm can be NULL */ in qib_release_user_pages()
149 down_write(&current->mm->mmap_sem); in qib_release_user_pages()
153 if (current->mm) { in qib_release_user_pages()
154 current->mm->pinned_vm -= num_pages; in qib_release_user_pages()
155 up_write(&current->mm->mmap_sem); in qib_release_user_pages()
/drivers/infiniband/hw/cxgb3/
Diwch_provider.h209 struct iwch_mm_entry *mm; in remove_mmap() local
214 mm = list_entry(pos, struct iwch_mm_entry, entry); in remove_mmap()
215 if (mm->key == key && mm->len == len) { in remove_mmap()
216 list_del_init(&mm->entry); in remove_mmap()
219 key, (unsigned long long) mm->addr, mm->len); in remove_mmap()
220 return mm; in remove_mmap()
228 struct iwch_mm_entry *mm) in insert_mmap() argument
232 mm->key, (unsigned long long) mm->addr, mm->len); in insert_mmap()
233 list_add_tail(&mm->entry, &ucontext->mmaps); in insert_mmap()
/drivers/gpu/drm/nouveau/core/subdev/vm/
Dbase.c239 mutex_unlock(&vm->mm.mutex); in nouveau_vm_unmap_pgt()
241 mutex_lock(&vm->mm.mutex); in nouveau_vm_unmap_pgt()
259 mutex_unlock(&vm->mm.mutex); in nouveau_vm_map_pgt()
262 mutex_lock(&vm->mm.mutex); in nouveau_vm_map_pgt()
268 mutex_unlock(&vm->mm.mutex); in nouveau_vm_map_pgt()
270 mutex_lock(&vm->mm.mutex); in nouveau_vm_map_pgt()
292 mutex_lock(&vm->mm.mutex); in nouveau_vm_get()
293 ret = nouveau_mm_head(&vm->mm, page_shift, msize, msize, align, in nouveau_vm_get()
296 mutex_unlock(&vm->mm.mutex); in nouveau_vm_get()
316 nouveau_mm_free(&vm->mm, &vma->node); in nouveau_vm_get()
[all …]
/drivers/vfio/
Dvfio_iommu_type1.c79 struct mm_struct *mm; member
88 struct mm_struct *mm; in vfio_lock_acct_bg() local
90 mm = vwork->mm; in vfio_lock_acct_bg()
91 down_write(&mm->mmap_sem); in vfio_lock_acct_bg()
92 mm->locked_vm += vwork->npage; in vfio_lock_acct_bg()
93 up_write(&mm->mmap_sem); in vfio_lock_acct_bg()
94 mmput(mm); in vfio_lock_acct_bg()
101 struct mm_struct *mm; in vfio_lock_acct() local
103 if (!current->mm) in vfio_lock_acct()
106 if (down_write_trylock(&current->mm->mmap_sem)) { in vfio_lock_acct()
[all …]
/drivers/misc/sgi-gru/
Dgrutlbpurge.c223 struct mm_struct *mm, in gru_invalidate_range_start() argument
237 struct mm_struct *mm, unsigned long start, in gru_invalidate_range_end() argument
250 static void gru_invalidate_page(struct mmu_notifier *mn, struct mm_struct *mm, in gru_invalidate_page() argument
261 static void gru_release(struct mmu_notifier *mn, struct mm_struct *mm) in gru_release() argument
279 static struct mmu_notifier *mmu_find_ops(struct mm_struct *mm, in mmu_find_ops() argument
284 if (mm->mmu_notifier_mm) { in mmu_find_ops()
286 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, in mmu_find_ops()
303 mn = mmu_find_ops(current->mm, &gru_mmuops); in gru_register_mmu_notifier()
315 err = __mmu_notifier_register(&gms->ms_notifier, current->mm); in gru_register_mmu_notifier()
334 mmu_notifier_unregister(&gms->ms_notifier, current->mm); in gru_drop_mmu_notifier()
Dgrufault.c65 vma = find_vma(current->mm, vaddr); in gru_find_vma()
81 struct mm_struct *mm = current->mm; in gru_find_lock_gts() local
85 down_read(&mm->mmap_sem); in gru_find_lock_gts()
92 up_read(&mm->mmap_sem); in gru_find_lock_gts()
98 struct mm_struct *mm = current->mm; in gru_alloc_locked_gts() local
102 down_write(&mm->mmap_sem); in gru_alloc_locked_gts()
111 downgrade_write(&mm->mmap_sem); in gru_alloc_locked_gts()
115 up_write(&mm->mmap_sem); in gru_alloc_locked_gts()
125 up_read(&current->mm->mmap_sem); in gru_unlock_gts()
202 (current, current->mm, vaddr, 1, write, 0, &page, NULL) <= 0) in non_atomic_pte_lookup()
[all …]
/drivers/staging/zcache/
DTODO11 4. zcache side of cleancache/mm WasActive patch
18 9. get core mm developer to review
20 11. get review/acks from 1-2 additional mm developers
21 12. incorporate any feedback from additional mm reviews
22 13. propose location/file-naming in mm tree
33 staging/zcache for 3.9; dependent on proposed mm patch, see
40 and proposed mm patches with new version in staging/zcache
55 track. Akpm clearly has to approve for any mm merge to happen. Minchan
56 Kim has interest but may be happy if/when zram is merged into mm. Konrad
58 separately from the rest of mm. (More LSF/MM 2013 discussion.)
/drivers/xen/
Dprivcmd.c196 struct mm_struct *mm = current->mm; in privcmd_ioctl_mmap() local
216 down_write(&mm->mmap_sem); in privcmd_ioctl_mmap()
223 vma = find_vma(mm, msg->va); in privcmd_ioctl_mmap()
241 up_write(&mm->mmap_sem); in privcmd_ioctl_mmap()
371 struct mm_struct *mm = current->mm; in privcmd_ioctl_mmap_batch() local
418 down_write(&mm->mmap_sem); in privcmd_ioctl_mmap_batch()
420 vma = find_vma(mm, m.addr); in privcmd_ioctl_mmap_batch()
426 up_write(&mm->mmap_sem); in privcmd_ioctl_mmap_batch()
433 up_write(&mm->mmap_sem); in privcmd_ioctl_mmap_batch()
449 up_write(&mm->mmap_sem); in privcmd_ioctl_mmap_batch()
Dgntdev.c69 struct mm_struct *mm; member
435 struct mm_struct *mm, in mn_invl_range_start() argument
452 struct mm_struct *mm, in mn_invl_page() argument
455 mn_invl_range_start(mn, mm, address, address + PAGE_SIZE); in mn_invl_page()
459 struct mm_struct *mm) in mn_release() argument
509 priv->mm = get_task_mm(current); in gntdev_open()
510 if (!priv->mm) { in gntdev_open()
515 ret = mmu_notifier_register(&priv->mn, priv->mm); in gntdev_open()
516 mmput(priv->mm); in gntdev_open()
545 mmu_notifier_unregister(&priv->mn, priv->mm); in gntdev_release()
[all …]
/drivers/iommu/
Damd_iommu_v2.c49 struct mm_struct *mm; /* mm_struct for the faults */ member
75 struct mm_struct *mm; member
311 mmput(pasid_state->mm); in put_pasid_state_wait()
327 mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm); in __unbind_pasid()
413 struct mm_struct *mm, in mn_clear_flush_young() argument
422 struct mm_struct *mm, in mn_change_pte() argument
430 struct mm_struct *mm, in mn_invalidate_page() argument
437 struct mm_struct *mm, in mn_invalidate_range_start() argument
451 struct mm_struct *mm, in mn_invalidate_range_end() argument
461 __pa(pasid_state->mm->pgd)); in mn_invalidate_range_end()
[all …]
/drivers/media/platform/omap3isp/
Dispqueue.c112 if (!current || !current->mm) in isp_video_buffer_lock_vma()
118 down_write(&current->mm->mmap_sem); in isp_video_buffer_lock_vma()
119 spin_lock(&current->mm->page_table_lock); in isp_video_buffer_lock_vma()
122 vma = find_vma(current->mm, start); in isp_video_buffer_lock_vma()
142 spin_unlock(&current->mm->page_table_lock); in isp_video_buffer_lock_vma()
143 up_write(&current->mm->mmap_sem); in isp_video_buffer_lock_vma()
334 down_read(&current->mm->mmap_sem); in isp_video_buffer_prepare_user()
335 ret = get_user_pages(current, current->mm, data & PAGE_MASK, in isp_video_buffer_prepare_user()
339 up_read(&current->mm->mmap_sem); in isp_video_buffer_prepare_user()
379 down_read(&current->mm->mmap_sem); in isp_video_buffer_prepare_pfnmap()
[all …]
/drivers/pcmcia/
Drsrc_nonstatic.c487 struct resource_map *m, mm; in validate_mem() local
503 for (m = s_data->mem_db.next; m != &s_data->mem_db; m = mm.next) { in validate_mem()
504 mm = *m; in validate_mem()
506 if (mm.base >= 0x100000) in validate_mem()
508 if ((mm.base | mm.num) & 0xffff) { in validate_mem()
509 ok += do_mem_probe(s, mm.base, mm.num, readable, in validate_mem()
516 if ((b >= mm.base) && (b+0x10000 <= mm.base+mm.num)) { in validate_mem()
543 struct resource_map *m, mm; in validate_mem() local
547 for (m = s_data->mem_db.next; m != &s_data->mem_db; m = mm.next) { in validate_mem()
548 mm = *m; in validate_mem()
[all …]
/drivers/infiniband/hw/cxgb4/
Diw_cxgb4.h475 struct c4iw_mm_entry *mm; in remove_mmap() local
480 mm = list_entry(pos, struct c4iw_mm_entry, entry); in remove_mmap()
481 if (mm->key == key && mm->len == len) { in remove_mmap()
482 list_del_init(&mm->entry); in remove_mmap()
485 key, (unsigned long long) mm->addr, mm->len); in remove_mmap()
486 return mm; in remove_mmap()
494 struct c4iw_mm_entry *mm) in insert_mmap() argument
498 mm->key, (unsigned long long) mm->addr, mm->len); in insert_mmap()
499 list_add_tail(&mm->entry, &ucontext->mmaps); in insert_mmap()

1234