/mm/ |
D | filemap.c | 324 loff_t pos, loff_t count) in sync_page_range() argument 326 pgoff_t start = pos >> PAGE_CACHE_SHIFT; in sync_page_range() 327 pgoff_t end = (pos + count - 1) >> PAGE_CACHE_SHIFT; in sync_page_range() 332 ret = filemap_fdatawrite_range(mapping, pos, pos + count - 1); in sync_page_range() 356 loff_t pos, loff_t count) in sync_page_range_nolock() argument 358 pgoff_t start = pos >> PAGE_CACHE_SHIFT; in sync_page_range_nolock() 359 pgoff_t end = (pos + count - 1) >> PAGE_CACHE_SHIFT; in sync_page_range_nolock() 364 ret = filemap_fdatawrite_range(mapping, pos, pos + count - 1); in sync_page_range_nolock() 1302 unsigned long nr_segs, loff_t pos) in generic_file_aio_read() argument 1326 if (pos < size) { in generic_file_aio_read() [all …]
|
D | filemap_xip.c | 61 loff_t isize, pos; in do_xip_mapping_read() local 66 pos = *ppos; in do_xip_mapping_read() 67 index = pos >> PAGE_CACHE_SHIFT; in do_xip_mapping_read() 68 offset = pos & ~PAGE_CACHE_MASK; in do_xip_mapping_read() 138 *ppos = pos + copied; in do_xip_mapping_read() 316 size_t count, loff_t pos, loff_t *ppos) in __xip_file_write() argument 334 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */ in __xip_file_write() 335 index = pos >> PAGE_CACHE_SHIFT; in __xip_file_write() 365 pos += status; in __xip_file_write() 375 *ppos = pos; in __xip_file_write() [all …]
|
D | vmstat.c | 398 static void *frag_start(struct seq_file *m, loff_t *pos) in frag_start() argument 401 loff_t node = *pos; in frag_start() 410 static void *frag_next(struct seq_file *m, void *arg, loff_t *pos) in frag_next() argument 414 (*pos)++; in frag_next() 818 static void *vmstat_start(struct seq_file *m, loff_t *pos) in vmstat_start() argument 826 if (*pos >= ARRAY_SIZE(vmstat_text)) in vmstat_start() 847 return v + *pos; in vmstat_start() 850 static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos) in vmstat_next() argument 852 (*pos)++; in vmstat_next() 853 if (*pos >= ARRAY_SIZE(vmstat_text)) in vmstat_next() [all …]
|
D | bootmem.c | 294 unsigned long pos; in mark_bootmem() local 297 pos = start; in mark_bootmem() 302 if (pos < bdata->node_min_pfn || in mark_bootmem() 303 pos >= bdata->node_low_pfn) { in mark_bootmem() 304 BUG_ON(pos != start); in mark_bootmem() 310 err = mark_bootmem_node(bdata, pos, max, reserve, flags); in mark_bootmem() 312 mark_bootmem(start, pos, 0, 0); in mark_bootmem() 318 pos = bdata->node_low_pfn; in mark_bootmem()
|
D | page_cgroup.c | 353 unsigned long pos = offset & SC_POS_MASK; in swap_cgroup_record() local 366 sc += pos; in swap_cgroup_record() 384 unsigned long pos = offset & SC_POS_MASK; in lookup_swap_cgroup() local 396 sc += pos; in lookup_swap_cgroup()
|
D | slub.c | 3495 long start, end, pos; in add_location() local 3504 pos = start + (end - start + 1) / 2; in add_location() 3510 if (pos == end) in add_location() 3513 caddr = t->loc[pos].addr; in add_location() 3516 l = &t->loc[pos]; in add_location() 3538 end = pos; in add_location() 3540 start = pos; in add_location() 3549 l = t->loc + pos; in add_location() 3550 if (pos < t->count) in add_location() 3552 (t->count - pos) * sizeof(struct location)); in add_location() [all …]
|
D | vmalloc.c | 1721 static void *s_start(struct seq_file *m, loff_t *pos) in s_start() argument 1723 loff_t n = *pos; in s_start() 1739 static void *s_next(struct seq_file *m, void *p, loff_t *pos) in s_next() argument 1743 ++*pos; in s_next()
|
D | swapfile.c | 1527 static void *swap_start(struct seq_file *swap, loff_t *pos) in swap_start() argument 1531 loff_t l = *pos; in swap_start() 1548 static void *swap_next(struct seq_file *swap, void *v, loff_t *pos) in swap_next() argument 1563 ++*pos; in swap_next()
|
D | shmem.c | 1594 loff_t pos, unsigned len, unsigned flags, in shmem_write_begin() argument 1598 pgoff_t index = pos >> PAGE_CACHE_SHIFT; in shmem_write_begin() 1605 loff_t pos, unsigned len, unsigned copied, in shmem_write_end() argument 1610 if (pos + copied > inode->i_size) in shmem_write_end() 1611 i_size_write(inode, pos + copied); in shmem_write_end() 1723 const struct iovec *iov, unsigned long nr_segs, loff_t pos) in shmem_file_aio_read() argument
|
D | slab.c | 4068 static void *s_start(struct seq_file *m, loff_t *pos) in s_start() argument 4070 loff_t n = *pos; in s_start() 4076 return seq_list_start(&cache_chain, *pos); in s_start() 4079 static void *s_next(struct seq_file *m, void *p, loff_t *pos) in s_next() argument 4081 return seq_list_next(p, &cache_chain, pos); in s_next() 4271 static void *leaks_start(struct seq_file *m, loff_t *pos) in leaks_start() argument 4274 return seq_list_start(&cache_chain, *pos); in leaks_start()
|
D | page_alloc.c | 2223 int pos, j, node; in build_zonelists_in_zone_order() local 2229 pos = 0; in build_zonelists_in_zone_order() 2236 &zonelist->_zonerefs[pos++]); in build_zonelists_in_zone_order() 2241 zonelist->_zonerefs[pos].zone = NULL; in build_zonelists_in_zone_order() 2242 zonelist->_zonerefs[pos].zone_idx = 0; in build_zonelists_in_zone_order()
|