/mm/ |
D | page_idle.c | 120 loff_t pos, size_t count) in page_idle_bitmap_read() argument 127 if (pos % BITMAP_CHUNK_SIZE || count % BITMAP_CHUNK_SIZE) in page_idle_bitmap_read() 130 pfn = pos * BITS_PER_BYTE; in page_idle_bitmap_read() 165 loff_t pos, size_t count) in page_idle_bitmap_write() argument 172 if (pos % BITMAP_CHUNK_SIZE || count % BITMAP_CHUNK_SIZE) in page_idle_bitmap_write() 175 pfn = pos * BITS_PER_BYTE; in page_idle_bitmap_write()
|
D | filemap.c | 3325 loff_t pos, unsigned len, unsigned flags, in pagecache_write_begin() argument 3330 return aops->write_begin(file, mapping, pos, len, flags, in pagecache_write_begin() 3336 loff_t pos, unsigned len, unsigned copied, in pagecache_write_end() argument 3341 return aops->write_end(file, mapping, pos, len, copied, page, fsdata); in pagecache_write_end() 3372 loff_t pos = iocb->ki_pos; in generic_file_direct_write() local 3378 end = (pos + write_len - 1) >> PAGE_SHIFT; in generic_file_direct_write() 3382 if (filemap_range_has_page(inode->i_mapping, pos, in generic_file_direct_write() 3383 pos + write_len - 1)) in generic_file_direct_write() 3386 written = filemap_write_and_wait_range(mapping, pos, in generic_file_direct_write() 3387 pos + write_len - 1); in generic_file_direct_write() [all …]
|
D | vmstat.c | 1365 static void *frag_start(struct seq_file *m, loff_t *pos) in frag_start() argument 1368 loff_t node = *pos; in frag_start() 1378 static void *frag_next(struct seq_file *m, void *arg, loff_t *pos) in frag_next() argument 1382 (*pos)++; in frag_next() 1728 static void *vmstat_start(struct seq_file *m, loff_t *pos) in vmstat_start() argument 1733 if (*pos >= NR_VMSTAT_ITEMS) in vmstat_start() 1764 return (unsigned long *)m->private + *pos; in vmstat_start() 1767 static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos) in vmstat_next() argument 1769 (*pos)++; in vmstat_next() 1770 if (*pos >= NR_VMSTAT_ITEMS) in vmstat_next() [all …]
|
D | zswap.c | 978 unsigned int pos; in zswap_is_page_same_filled() local 982 for (pos = 1; pos < PAGE_SIZE / sizeof(*page); pos++) { in zswap_is_page_same_filled() 983 if (page[pos] != page[0]) in zswap_is_page_same_filled()
|
D | slab_common.c | 955 void *slab_start(struct seq_file *m, loff_t *pos) in slab_start() argument 958 return seq_list_start(&slab_caches, *pos); in slab_start() 961 void *slab_next(struct seq_file *m, void *p, loff_t *pos) in slab_next() argument 963 return seq_list_next(p, &slab_caches, pos); in slab_next()
|
D | slub.c | 1727 unsigned long *pos, void *start, in next_freelist_entry() argument 1738 idx = s->random_seq[*pos]; in next_freelist_entry() 1739 *pos += 1; in next_freelist_entry() 1740 if (*pos >= freelist_count) in next_freelist_entry() 1741 *pos = 0; in next_freelist_entry() 1753 unsigned long idx, pos, page_limit, freelist_count; in shuffle_freelist() local 1759 pos = get_random_int() % freelist_count; in shuffle_freelist() 1765 cur = next_freelist_entry(s, page, &pos, start, page_limit, in shuffle_freelist() 1771 next = next_freelist_entry(s, page, &pos, start, page_limit, in shuffle_freelist() 4727 long start, end, pos; in add_location() local [all …]
|
D | shmem.c | 531 LIST_HEAD(list), *pos, *next; in shmem_unused_huge_shrink() 543 list_for_each_safe(pos, next, &sbinfo->shrinklist) { in shmem_unused_huge_shrink() 544 info = list_entry(pos, struct shmem_inode_info, shrinklist); in shmem_unused_huge_shrink() 570 list_for_each_safe(pos, next, &to_remove) { in shmem_unused_huge_shrink() 571 info = list_entry(pos, struct shmem_inode_info, shrinklist); in shmem_unused_huge_shrink() 577 list_for_each_safe(pos, next, &list) { in shmem_unused_huge_shrink() 580 info = list_entry(pos, struct shmem_inode_info, shrinklist); in shmem_unused_huge_shrink() 2486 loff_t pos, unsigned len, unsigned flags, in shmem_write_begin() argument 2491 pgoff_t index = pos >> PAGE_SHIFT; in shmem_write_begin() 2498 if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size) in shmem_write_begin() [all …]
|
D | zsmalloc.c | 1965 int offset, pos; in zs_page_migrate() local 2000 pos = offset; in zs_page_migrate() 2002 while (pos < PAGE_SIZE) { in zs_page_migrate() 2003 head = obj_to_head(page, s_addr + pos); in zs_page_migrate() 2009 pos += class->size; in zs_page_migrate() 2019 for (addr = s_addr + offset; addr < s_addr + pos; in zs_page_migrate() 2067 for (addr = s_addr + offset; addr < s_addr + pos; in zs_page_migrate()
|
D | kmemleak.c | 1623 static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos) in kmemleak_seq_start() argument 1626 loff_t n = *pos; in kmemleak_seq_start() 1649 static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos) in kmemleak_seq_next() argument 1655 ++(*pos); in kmemleak_seq_next()
|
D | slab.h | 642 void *slab_start(struct seq_file *m, loff_t *pos); 643 void *slab_next(struct seq_file *m, void *p, loff_t *pos);
|
D | memcontrol.c | 1148 struct mem_cgroup *pos = NULL; in mem_cgroup_iter() local 1157 pos = prev; in mem_cgroup_iter() 1177 pos = READ_ONCE(iter->position); in mem_cgroup_iter() 1178 if (!pos || css_tryget(&pos->css)) in mem_cgroup_iter() 1188 (void)cmpxchg(&iter->position, pos, NULL); in mem_cgroup_iter() 1192 if (pos) in mem_cgroup_iter() 1193 css = &pos->css; in mem_cgroup_iter() 1231 (void)cmpxchg(&iter->position, pos, memcg); in mem_cgroup_iter() 1233 if (pos) in mem_cgroup_iter() 1234 css_put(&pos->css); in mem_cgroup_iter()
|
D | z3fold.c | 1333 struct list_head *pos; in z3fold_reclaim_page() local 1350 list_for_each_prev(pos, &pool->lru) { in z3fold_reclaim_page() 1351 page = list_entry(pos, struct page, lru); in z3fold_reclaim_page()
|
D | huge_memory.c | 2857 LIST_HEAD(list), *pos, *next; in deferred_split_scan() 2868 list_for_each_safe(pos, next, &ds_queue->split_queue) { in deferred_split_scan() 2869 page = list_entry((void *)pos, struct page, mapping); in deferred_split_scan() 2883 list_for_each_safe(pos, next, &list) { in deferred_split_scan() 2884 page = list_entry((void *)pos, struct page, mapping); in deferred_split_scan()
|
D | vmalloc.c | 3451 static void *s_start(struct seq_file *m, loff_t *pos) in s_start() argument 3458 return seq_list_start(&vmap_area_list, *pos); in s_start() 3461 static void *s_next(struct seq_file *m, void *p, loff_t *pos) in s_next() argument 3463 return seq_list_next(p, &vmap_area_list, pos); in s_next()
|
D | slab.c | 2375 unsigned int pos; member 2403 state->pos = rand % count; in freelist_state_initialize() 2412 if (state->pos >= state->count) in next_random_slot() 2413 state->pos = 0; in next_random_slot() 2414 return state->list[state->pos++]; in next_random_slot()
|
D | page_alloc.c | 7890 void *pos; in free_reserved_area() local 7895 for (pos = start; pos < end; pos += PAGE_SIZE, pages++) { in free_reserved_area() 7896 struct page *page = virt_to_page(pos); in free_reserved_area() 7956 #define adj_init_size(start, end, size, pos, adj) \ in mem_init_print_info() argument 7958 if (&start[0] <= &pos[0] && &pos[0] < &end[0] && size > adj) \ in mem_init_print_info()
|
D | swapfile.c | 2796 static void *swap_start(struct seq_file *swap, loff_t *pos) in swap_start() argument 2800 loff_t l = *pos; in swap_start() 2817 static void *swap_next(struct seq_file *swap, void *v, loff_t *pos) in swap_next() argument 2827 ++(*pos); in swap_next()
|
D | percpu.c | 2109 struct pcpu_chunk *pos; in free_percpu() local 2111 list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list) in free_percpu() 2112 if (pos != chunk) { in free_percpu()
|
/mm/damon/ |
D | dbgfs.c | 189 int pos = 0, parsed, ret; in str_to_schemes() local 200 while (pos < len && *nr_schemes < max_nr_schemes) { in str_to_schemes() 204 ret = sscanf(&str[pos], in str_to_schemes() 225 pos += parsed; in str_to_schemes() 335 int pos = 0, parsed, ret; in str_to_target_ids() local 341 while (*nr_ids < max_nr_ids && pos < len) { in str_to_target_ids() 342 ret = sscanf(&str[pos], "%lu%n", &id, &parsed); in str_to_target_ids() 343 pos += parsed; in str_to_target_ids() 525 int pos = 0, parsed, ret; in set_init_regions() local 535 while (pos < len) { in set_init_regions() [all …]
|
/mm/kfence/ |
D | core.c | 529 static void *start_object(struct seq_file *seq, loff_t *pos) in start_object() argument 531 if (*pos < CONFIG_KFENCE_NUM_OBJECTS) in start_object() 532 return (void *)((long)*pos + 1); in start_object() 540 static void *next_object(struct seq_file *seq, void *v, loff_t *pos) in next_object() argument 542 ++*pos; in next_object() 543 if (*pos < CONFIG_KFENCE_NUM_OBJECTS) in next_object() 544 return (void *)((long)*pos + 1); in next_object()
|