Home
last modified time | relevance | path

Searched refs:to (Results 1 – 13 of 13) sorted by relevance

/mm/
DKconfig15 This option allows you to change some of the ways that
88 SPARSEMEM_VMEMMAP uses a virtually mapped memmap to optimise
99 # Don't discard allocated memory used to track "memory" and "reserved" memblocks
100 # after early boot, so it can still be used to test for validity of memory.
141 determines what happens to newly added memory regions. Policy setting
145 Say Y here if you want all hot-plugged memory blocks to appear in
147 Say N here if you want the default policy to keep all hot-plugged
167 # Default to 4 for wider testing, though 8 might be more appropriate.
169 # PA-RISC 7xxx's spinlock_t would enlarge struct page from 32 to 44 bytes.
171 # a per-page lock leads to problems when multiple tables need to be locked
[all …]
DKconfig.debug6 could be used for debugging features that need to insert extra
7 field for every page. This extension enables us to save memory
8 by not allocating this extra memory according to boottime
19 slowdown, but helps to find certain types of memory corruption.
33 pages are not saved to the suspend image.
36 allowing the kernel mapping to be backed by large pages on some
57 help to find bare alloc_page(s) leaks. Even if you include this
59 "page_owner=on" to boot parameter in order to enable it. Eats
74 help to find page migration failures. Even if you include this
76 "page_pinner=on" to boot parameter in order to enable it. Eats
[all …]
Dhugetlb.c252 get_file_region_entry_from_cache(struct resv_map *resv, long from, long to) in get_file_region_entry_from_cache() argument
263 nrg->to = to; in get_file_region_entry_from_cache()
340 if (&prg->link != &resv->regions && prg->to == rg->from && in coalesce_file_region()
342 prg->to = rg->to; in coalesce_file_region()
352 if (&nrg->link != &resv->regions && nrg->from == rg->to && in coalesce_file_region()
364 long to, struct hstate *h, struct hugetlb_cgroup *cg, in hugetlb_resv_map_add() argument
370 nrg = get_file_region_entry_from_cache(map, from, to); in hugetlb_resv_map_add()
377 return to - from; in hugetlb_resv_map_add()
410 if (rg->to > last_accounted_offset) in add_reservation_in_range()
411 last_accounted_offset = rg->to; in add_reservation_in_range()
[all …]
Dmemcontrol.c192 struct mem_cgroup *to; member
1393 struct mem_cgroup *to; in mem_cgroup_under_move() local
1401 to = mc.to; in mem_cgroup_under_move()
1406 mem_cgroup_is_descendant(to, memcg); in mem_cgroup_under_move()
3382 struct mem_cgroup *from, struct mem_cgroup *to) in mem_cgroup_move_swap_account() argument
3387 new_id = mem_cgroup_id(to); in mem_cgroup_move_swap_account()
3391 mod_memcg_state(to, MEMCG_SWAP, 1); in mem_cgroup_move_swap_account()
3398 struct mem_cgroup *from, struct mem_cgroup *to) in mem_cgroup_move_swap_account() argument
5551 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count); in mem_cgroup_do_precharge()
5559 ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1); in mem_cgroup_do_precharge()
[all …]
Dtruncate.c783 void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to) in pagecache_isize_extended() argument
790 WARN_ON(to > inode->i_size); in pagecache_isize_extended()
792 if (from >= to || bsize == PAGE_SIZE) in pagecache_isize_extended()
796 if (to <= rounded_from || !(rounded_from & (PAGE_SIZE - 1))) in pagecache_isize_extended()
Dnommu.c499 static void free_page_series(unsigned long from, unsigned long to) in free_page_series() argument
501 for (; from < to; from += PAGE_SIZE) { in free_page_series()
1411 unsigned long from, unsigned long to) in shrink_vma() argument
1421 vma->vm_start = to; in shrink_vma()
1431 to = region->vm_top; in shrink_vma()
1434 region->vm_start = to; in shrink_vma()
1439 free_page_series(from, to); in shrink_vma()
Dsparse-vmemmap.c253 void *to; in vmemmap_restore_pte() local
259 to = page_to_virt(page); in vmemmap_restore_pte()
260 copy_page(to, (void *)walk->reuse_addr); in vmemmap_restore_pte()
Dmempolicy.c1095 const nodemask_t *to, int flags) in do_migrate_pages() argument
1159 if ((nodes_weight(*from) != nodes_weight(*to)) && in do_migrate_pages()
1160 (node_isset(s, *to))) in do_migrate_pages()
1163 d = node_remap(s, *from, *to); in do_migrate_pages()
1241 const nodemask_t *to, int flags) in do_migrate_pages() argument
Dslab.c575 static int transfer_objects(struct array_cache *to, in transfer_objects() argument
579 int nr = min3(from->avail, max, to->limit - to->avail); in transfer_objects()
584 memcpy(to->entry + to->avail, from->entry + from->avail - nr, in transfer_objects()
588 to->avail += nr; in transfer_objects()
Dshmem.c2529 static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to) in shmem_file_read_iter() argument
2546 if (!iter_is_iovec(to)) in shmem_file_read_iter()
2624 ret = copy_page_to_iter(page, offset, nr, to); in shmem_file_read_iter()
2631 if (!iov_iter_count(to)) in shmem_file_read_iter()
Dslub.c924 void *from, void *to) in restore_bytes() argument
926 slab_fix(s, "Restoring %s 0x%p-0x%p=0x%x", message, from, to - 1, data); in restore_bytes()
927 memset(from, data, to - from); in restore_bytes()
/mm/kasan/
Dquarantine.c65 static void qlist_move_all(struct qlist_head *from, struct qlist_head *to) in qlist_move_all() argument
70 if (qlist_empty(to)) { in qlist_move_all()
71 *to = *from; in qlist_move_all()
76 to->tail->next = from->head; in qlist_move_all()
77 to->tail = from->tail; in qlist_move_all()
78 to->bytes += from->bytes; in qlist_move_all()
288 struct qlist_head *to, in qlist_move_cache() argument
303 qlist_put(to, curr, obj_cache->size); in qlist_move_cache()
/mm/damon/
DKconfig8 This builds a framework that allows kernel subsystems to monitor
23 to the KUnit documentation.
51 to the KUnit documentation.
72 to the KUnit documentation.
84 This is suggested to be used as a proactive and lightweight