/mm/ |
D | filemap.c | 1785 loff_t pos = *ppos; in generic_file_read_iter() local 1796 retval = filemap_write_and_wait_range(mapping, pos, in generic_file_read_iter() 1797 pos + count - 1); in generic_file_read_iter() 1800 retval = mapping->a_ops->direct_IO(iocb, &data, pos); in generic_file_read_iter() 1804 *ppos = pos + retval; in generic_file_read_iter() 2385 loff_t pos; in generic_write_checks() local 2394 pos = iocb->ki_pos; in generic_write_checks() 2401 iov_iter_truncate(from, limit - (unsigned long)pos); in generic_write_checks() 2407 if (unlikely(pos + iov_iter_count(from) > MAX_NON_LFS && in generic_write_checks() 2409 if (pos >= MAX_NON_LFS) in generic_write_checks() [all …]
|
D | page_idle.c | 117 loff_t pos, size_t count) in page_idle_bitmap_read() argument 124 if (pos % BITMAP_CHUNK_SIZE || count % BITMAP_CHUNK_SIZE) in page_idle_bitmap_read() 127 pfn = pos * BITS_PER_BYTE; in page_idle_bitmap_read() 162 loff_t pos, size_t count) in page_idle_bitmap_write() argument 169 if (pos % BITMAP_CHUNK_SIZE || count % BITMAP_CHUNK_SIZE) in page_idle_bitmap_write() 172 pfn = pos * BITS_PER_BYTE; in page_idle_bitmap_write()
|
D | bootmem.c | 361 unsigned long pos; in mark_bootmem() local 364 pos = start; in mark_bootmem() 369 if (pos < bdata->node_min_pfn || in mark_bootmem() 370 pos >= bdata->node_low_pfn) { in mark_bootmem() 371 BUG_ON(pos != start); in mark_bootmem() 377 err = mark_bootmem_node(bdata, pos, max, reserve, flags); in mark_bootmem() 379 mark_bootmem(start, pos, 0, 0); in mark_bootmem() 385 pos = bdata->node_low_pfn; in mark_bootmem()
|
D | vmstat.c | 877 static void *frag_start(struct seq_file *m, loff_t *pos) in frag_start() argument 880 loff_t node = *pos; in frag_start() 890 static void *frag_next(struct seq_file *m, void *arg, loff_t *pos) in frag_next() argument 894 (*pos)++; in frag_next() 1309 static void *vmstat_start(struct seq_file *m, loff_t *pos) in vmstat_start() argument 1314 if (*pos >= ARRAY_SIZE(vmstat_text)) in vmstat_start() 1340 return (unsigned long *)m->private + *pos; in vmstat_start() 1343 static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos) in vmstat_next() argument 1345 (*pos)++; in vmstat_next() 1346 if (*pos >= ARRAY_SIZE(vmstat_text)) in vmstat_next() [all …]
|
D | memcontrol.c | 865 struct mem_cgroup *pos = NULL; in mem_cgroup_iter() local 874 pos = prev; in mem_cgroup_iter() 894 pos = READ_ONCE(iter->position); in mem_cgroup_iter() 895 if (!pos || css_tryget(&pos->css)) in mem_cgroup_iter() 905 (void)cmpxchg(&iter->position, pos, NULL); in mem_cgroup_iter() 909 if (pos) in mem_cgroup_iter() 910 css = &pos->css; in mem_cgroup_iter() 957 (void)cmpxchg(&iter->position, pos, memcg); in mem_cgroup_iter() 959 if (pos) in mem_cgroup_iter() 960 css_put(&pos->css); in mem_cgroup_iter() [all …]
|
D | slab.h | 369 void *slab_start(struct seq_file *m, loff_t *pos); 370 void *slab_next(struct seq_file *m, void *p, loff_t *pos);
|
D | slab_common.c | 1061 void *slab_start(struct seq_file *m, loff_t *pos) in slab_start() argument 1064 return seq_list_start(&slab_caches, *pos); in slab_start() 1067 void *slab_next(struct seq_file *m, void *p, loff_t *pos) in slab_next() argument 1069 return seq_list_next(p, &slab_caches, pos); in slab_next()
|
D | page_alloc.c | 4217 int pos, j, node; in build_zonelists_in_zone_order() local 4223 pos = 0; in build_zonelists_in_zone_order() 4230 &zonelist->_zonerefs[pos++]); in build_zonelists_in_zone_order() 4235 zonelist->_zonerefs[pos].zone = NULL; in build_zonelists_in_zone_order() 4236 zonelist->_zonerefs[pos].zone_idx = 0; in build_zonelists_in_zone_order() 5920 void *pos; in free_reserved_area() local 5925 for (pos = start; pos < end; pos += PAGE_SIZE, pages++) { in free_reserved_area() 5927 memset(pos, poison, PAGE_SIZE); in free_reserved_area() 5928 free_reserved_page(virt_to_page(pos)); in free_reserved_area() 5970 #define adj_init_size(start, end, size, pos, adj) \ in mem_init_print_info() argument [all …]
|
D | kmemleak.c | 1541 static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos) in kmemleak_seq_start() argument 1544 loff_t n = *pos; in kmemleak_seq_start() 1567 static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos) in kmemleak_seq_next() argument 1573 ++(*pos); in kmemleak_seq_next()
|
D | shmem.c | 1494 loff_t pos, unsigned len, unsigned flags, in shmem_write_begin() argument 1499 pgoff_t index = pos >> PAGE_CACHE_SHIFT; in shmem_write_begin() 1505 if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size) in shmem_write_begin() 1514 loff_t pos, unsigned len, unsigned copied, in shmem_write_end() argument 1519 if (pos + copied > inode->i_size) in shmem_write_end() 1520 i_size_write(inode, pos + copied); in shmem_write_end() 1524 unsigned from = pos & (PAGE_CACHE_SIZE - 1); in shmem_write_end()
|
D | slub.c | 4364 long start, end, pos; in add_location() local 4373 pos = start + (end - start + 1) / 2; in add_location() 4379 if (pos == end) in add_location() 4382 caddr = t->loc[pos].addr; in add_location() 4385 l = &t->loc[pos]; in add_location() 4407 end = pos; in add_location() 4409 start = pos; in add_location() 4418 l = t->loc + pos; in add_location() 4419 if (pos < t->count) in add_location() 4421 (t->count - pos) * sizeof(struct location)); in add_location()
|
D | vmalloc.c | 2572 static void *s_start(struct seq_file *m, loff_t *pos) in s_start() argument 2575 loff_t n = *pos; in s_start() 2591 static void *s_next(struct seq_file *m, void *p, loff_t *pos) in s_next() argument 2595 ++*pos; in s_next()
|
D | percpu.c | 1276 struct pcpu_chunk *pos; in free_percpu() local 1278 list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list) in free_percpu() 1279 if (pos != chunk) { in free_percpu()
|
D | swapfile.c | 2015 static void *swap_start(struct seq_file *swap, loff_t *pos) in swap_start() argument 2019 loff_t l = *pos; in swap_start() 2038 static void *swap_next(struct seq_file *swap, void *v, loff_t *pos) in swap_next() argument 2053 ++*pos; in swap_next()
|