/mm/ |
D | iov_iter.c | 7 static size_t copy_to_iter_iovec(void *from, size_t bytes, struct iov_iter *i) in copy_to_iter_iovec() argument 25 left = __copy_to_user(buf, from, copy); in copy_to_iter_iovec() 28 from += copy; in copy_to_iter_iovec() 34 left = __copy_to_user(buf, from, copy); in copy_to_iter_iovec() 37 from += copy; in copy_to_iter_iovec() 103 void *kaddr, *from; in copy_page_to_iter_iovec() local 119 from = kaddr + offset; in copy_page_to_iter_iovec() 122 left = __copy_to_user_inatomic(buf, from, copy); in copy_page_to_iter_iovec() 125 from += copy; in copy_page_to_iter_iovec() 132 left = __copy_to_user_inatomic(buf, from, copy); in copy_page_to_iter_iovec() [all …]
|
D | memcontrol.c | 431 struct mem_cgroup *from; member 1564 struct mem_cgroup *from; in mem_cgroup_under_move() local 1572 from = mc.from; in mem_cgroup_under_move() 1574 if (!from) in mem_cgroup_under_move() 1577 ret = mem_cgroup_same_or_subtree(memcg, from) in mem_cgroup_under_move() 3365 struct mem_cgroup *from, in mem_cgroup_move_account() argument 3371 VM_BUG_ON(from == to); in mem_cgroup_move_account() 3392 if (!PageCgroupUsed(pc) || pc->mem_cgroup != from) in mem_cgroup_move_account() 3395 move_lock_mem_cgroup(from, &flags); in mem_cgroup_move_account() 3398 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED], in mem_cgroup_move_account() [all …]
|
D | filemap.c | 2361 generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos) in generic_file_direct_write() argument 2371 write_len = iov_iter_count(from); in generic_file_direct_write() 2398 data = *from; in generic_file_direct_write() 2416 iov_iter_advance(from, written); in generic_file_direct_write() 2557 ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) in __generic_file_write_iter() argument 2566 size_t count = iov_iter_count(from); in __generic_file_write_iter() 2577 iov_iter_truncate(from, count); in __generic_file_write_iter() 2591 written = generic_file_direct_write(iocb, from, pos); in __generic_file_write_iter() 2602 status = generic_perform_write(file, from, pos); in __generic_file_write_iter() 2634 written = generic_perform_write(file, from, pos); in __generic_file_write_iter() [all …]
|
D | nommu.c | 628 static void free_page_series(unsigned long from, unsigned long to) in free_page_series() argument 630 for (; from < to; from += PAGE_SIZE) { in free_page_series() 631 struct page *page = virt_to_page(from); in free_page_series() 633 kdebug("- free %lx", from); in free_page_series() 1614 unsigned long from, unsigned long to) in shrink_vma() argument 1623 if (from > vma->vm_start) in shrink_vma() 1624 vma->vm_end = from; in shrink_vma() 1635 if (from > region->vm_start) { in shrink_vma() 1637 region->vm_top = region->vm_end = from; in shrink_vma() 1644 free_page_series(from, to); in shrink_vma()
|
D | hugetlb.c | 151 long from; member 167 if (f > rg->from) in region_add() 168 f = rg->from; in region_add() 175 if (rg->from > t) in region_add() 188 nrg->from = f; in region_add() 210 if (&rg->link == head || t < rg->from) { in region_chg() 217 nrg->from = f; in region_chg() 229 if (f > rg->from) in region_chg() 230 f = rg->from; in region_chg() 237 if (rg->from > t) in region_chg() [all …]
|
D | truncate.c | 752 void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to) in pagecache_isize_extended() argument 761 if (from >= to || bsize == PAGE_CACHE_SIZE) in pagecache_isize_extended() 764 rounded_from = round_up(from, bsize); in pagecache_isize_extended() 768 index = from >> PAGE_CACHE_SHIFT; in pagecache_isize_extended()
|
D | Kconfig | 40 can have degraded performance from the extra overhead that 215 # Heavily threaded applications may benefit from splitting the mm-wide 220 # PA-RISC 7xxx's spinlock_t would enlarge struct page from 32 to 44 bytes. 344 int "Low address space to protect from user allocation" 349 from userspace allocation. Keeping a user from writing to low pages 368 bool "Enable recovery from hardware memory errors" 371 Enables code to recover from some memory failures on systems 504 be allocated from it. This way, the kernel can use the memory for 553 in the case where decompressing from RAM is faster that swap device
|
D | page_io.c | 272 struct iov_iter from = { in __swap_writepage() local 278 from.bvec = &bv; /* older gcc versions are broken */ in __swap_writepage() 287 &kiocb, &from, in __swap_writepage()
|
D | filemap_xip.c | 450 xip_truncate_page(struct address_space *mapping, loff_t from) in xip_truncate_page() argument 452 pgoff_t index = from >> PAGE_CACHE_SHIFT; in xip_truncate_page() 453 unsigned offset = from & (PAGE_CACHE_SIZE-1); in xip_truncate_page()
|
D | Kconfig.debug | 9 Unmap pages from the kernel linear mapping after free_pages().
|
D | migrate.c | 1112 int migrate_pages(struct list_head *from, new_page_t get_new_page, in migrate_pages() argument 1131 list_for_each_entry_safe(page, page2, from, lru) { in migrate_pages() 1535 const nodemask_t *from, unsigned long flags) in migrate_vmas() argument 1542 err = vma->vm_ops->migrate(vma, to, from, flags); in migrate_vmas()
|
D | mempolicy.c | 1032 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, in do_migrate_pages() argument 1045 err = migrate_vmas(mm, from, to, flags); in do_migrate_pages() 1080 tmp = *from; in do_migrate_pages() 1103 if ((nodes_weight(*from) != nodes_weight(*to)) && in do_migrate_pages() 1107 d = node_remap(s, *from, *to); in do_migrate_pages() 1172 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, in do_migrate_pages() argument
|
D | slab.c | 812 struct array_cache *from, unsigned int max) in transfer_objects() argument 815 int nr = min3(from->avail, max, to->limit - to->avail); in transfer_objects() 820 memcpy(to->entry + to->avail, from->entry + from->avail -nr, in transfer_objects() 823 from->avail -= nr; in transfer_objects()
|
D | shmem.c | 1509 unsigned from = pos & (PAGE_CACHE_SIZE - 1); in shmem_write_end() local 1510 zero_user_segments(page, 0, from, in shmem_write_end() 1511 from + copied, PAGE_CACHE_SIZE); in shmem_write_end()
|
D | slub.c | 700 void *from, void *to) in restore_bytes() argument 702 slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data); in restore_bytes() 703 memset(from, data, to - from); in restore_bytes()
|