/mm/kasan/ |
D | quarantine.c | 65 static void qlist_move_all(struct qlist_head *from, struct qlist_head *to) in qlist_move_all() argument 67 if (unlikely(qlist_empty(from))) in qlist_move_all() 71 *to = *from; in qlist_move_all() 72 qlist_init(from); in qlist_move_all() 76 to->tail->next = from->head; in qlist_move_all() 77 to->tail = from->tail; in qlist_move_all() 78 to->bytes += from->bytes; in qlist_move_all() 80 qlist_init(from); in qlist_move_all() 276 static void qlist_move_cache(struct qlist_head *from, in qlist_move_cache() argument 282 if (unlikely(qlist_empty(from))) in qlist_move_cache() [all …]
|
/mm/ |
D | memcontrol.c | 185 struct mem_cgroup *from; member 1265 static void __invalidate_reclaim_iterators(struct mem_cgroup *from, in __invalidate_reclaim_iterators() argument 1273 mz = mem_cgroup_nodeinfo(from, nid); in __invalidate_reclaim_iterators() 1488 struct mem_cgroup *from; in mem_cgroup_under_move() local 1496 from = mc.from; in mem_cgroup_under_move() 1498 if (!from) in mem_cgroup_under_move() 1501 ret = mem_cgroup_is_descendant(from, memcg) || in mem_cgroup_under_move() 3349 struct mem_cgroup *from, struct mem_cgroup *to) in mem_cgroup_move_swap_account() argument 3353 old_id = mem_cgroup_id(from); in mem_cgroup_move_swap_account() 3357 mod_memcg_state(from, MEMCG_SWAP, -1); in mem_cgroup_move_swap_account() [all …]
|
D | hugetlb.c | 252 get_file_region_entry_from_cache(struct resv_map *resv, long from, long to) in get_file_region_entry_from_cache() argument 262 nrg->from = from; in get_file_region_entry_from_cache() 340 if (&prg->link != &resv->regions && prg->to == rg->from && in coalesce_file_region() 352 if (&nrg->link != &resv->regions && nrg->from == rg->to && in coalesce_file_region() 354 nrg->from = rg->from; in coalesce_file_region() 388 if (rg->from < f) { in add_reservation_in_range() 400 if (rg->from > t) in add_reservation_in_range() 406 if (rg->from > last_accounted_offset) { in add_reservation_in_range() 407 add += rg->from - last_accounted_offset; in add_reservation_in_range() 410 resv, last_accounted_offset, rg->from); in add_reservation_in_range() [all …]
|
D | nommu.c | 509 static void free_page_series(unsigned long from, unsigned long to) in free_page_series() argument 511 for (; from < to; from += PAGE_SIZE) { in free_page_series() 512 struct page *page = virt_to_page(from); in free_page_series() 1419 unsigned long from, unsigned long to) in shrink_vma() argument 1426 if (from > vma->vm_start) in shrink_vma() 1427 vma->vm_end = from; in shrink_vma() 1438 if (from > region->vm_start) { in shrink_vma() 1440 region->vm_top = region->vm_end = from; in shrink_vma() 1447 free_page_series(from, to); in shrink_vma()
|
D | Kconfig | 186 # Heavily threaded applications may benefit from splitting the mm-wide 191 # PA-RISC 7xxx's spinlock_t would enlarge struct page from 32 to 44 bytes. 252 free pages from the buddy allocator for the purpose of reporting 322 int "Low address space to protect from user allocation" 327 from userspace allocation. Keeping a user from writing to low pages 346 bool "Enable recovery from hardware memory errors" 350 Enables code to recover from some memory failures on systems 495 be allocated from it. This way, the kernel can use the memory for 521 from CMA. 556 in the case where decompressing from RAM is faster that swap device [all …]
|
D | filemap.c | 3367 generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from) in generic_file_direct_write() argument 3377 write_len = iov_iter_count(from); in generic_file_direct_write() 3410 written = mapping->a_ops->direct_IO(iocb, from); in generic_file_direct_write() 3442 iov_iter_revert(from, write_len - iov_iter_count(from)); in generic_file_direct_write() 3575 ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) in __generic_file_write_iter() argument 3597 written = generic_file_direct_write(iocb, from); in __generic_file_write_iter() 3605 if (written < 0 || !iov_iter_count(from) || IS_DAX(inode)) in __generic_file_write_iter() 3608 status = generic_perform_write(file, from, pos = iocb->ki_pos); in __generic_file_write_iter() 3640 written = generic_perform_write(file, from, iocb->ki_pos); in __generic_file_write_iter() 3663 ssize_t generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) in generic_file_write_iter() argument [all …]
|
D | page_io.c | 272 struct iov_iter from; in __swap_writepage() local 274 iov_iter_bvec(&from, WRITE, &bv, 1, PAGE_SIZE); in __swap_writepage() 280 ret = mapping->a_ops->direct_IO(&kiocb, &from); in __swap_writepage()
|
D | truncate.c | 882 void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to) in pagecache_isize_extended() argument 891 if (from >= to || bsize == PAGE_SIZE) in pagecache_isize_extended() 894 rounded_from = round_up(from, bsize); in pagecache_isize_extended() 898 index = from >> PAGE_SHIFT; in pagecache_isize_extended()
|
D | Kconfig.debug | 17 Unmap pages from the kernel linear mapping after free_pages(). 86 reduce the risk of information leaks from freed data. This does
|
D | mempolicy.c | 1116 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, in do_migrate_pages() argument 1158 tmp = *from; in do_migrate_pages() 1181 if ((nodes_weight(*from) != nodes_weight(*to)) && in do_migrate_pages() 1185 d = node_remap(s, *from, *to); in do_migrate_pages() 1262 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, in do_migrate_pages() argument
|
D | vmalloc.c | 525 struct rb_root *root, struct rb_node *from, in find_va_links() argument 538 link = &from; in find_va_links() 717 struct rb_node *from, struct rb_root *root, in insert_vmap_area_augment() argument 723 if (from) in insert_vmap_area_augment() 724 link = find_va_links(va, NULL, from, &parent); in insert_vmap_area_augment()
|
D | slab.c | 576 struct array_cache *from, unsigned int max) in transfer_objects() argument 579 int nr = min3(from->avail, max, to->limit - to->avail); in transfer_objects() 584 memcpy(to->entry + to->avail, from->entry + from->avail -nr, in transfer_objects() 587 from->avail -= nr; in transfer_objects()
|
D | migrate.c | 1401 int migrate_pages(struct list_head *from, new_page_t get_new_page, in migrate_pages() argument 1428 list_for_each_entry_safe(page, page2, from, lru) { in migrate_pages() 1463 rc = split_huge_page_to_list(page, from); in migrate_pages()
|
D | slub.c | 806 void *from, void *to) in restore_bytes() argument 808 slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data); in restore_bytes() 809 memset(from, data, to - from); in restore_bytes()
|
D | shmem.c | 2528 unsigned from = pos & (PAGE_SIZE - 1); in shmem_write_end() local 2529 zero_user_segments(page, 0, from, in shmem_write_end() 2530 from + copied, PAGE_SIZE); in shmem_write_end()
|