/mm/ |
D | shmem.c | 140 static int shmem_swapin_page(struct inode *inode, pgoff_t index, 144 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, 149 int shmem_getpage(struct inode *inode, pgoff_t index, in shmem_getpage() argument 152 return shmem_getpage_gfp(inode, index, pagep, sgp, in shmem_getpage() 153 mapping_gfp_mask(inode->i_mapping), NULL, NULL, NULL); in shmem_getpage() 213 static inline bool shmem_inode_acct_block(struct inode *inode, long pages) in shmem_inode_acct_block() argument 215 struct shmem_inode_info *info = SHMEM_I(inode); in shmem_inode_acct_block() 216 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); in shmem_inode_acct_block() 235 static inline void shmem_inode_unacct_blocks(struct inode *inode, long pages) in shmem_inode_unacct_blocks() argument 237 struct shmem_inode_info *info = SHMEM_I(inode); in shmem_inode_unacct_blocks() [all …]
|
D | secretmem.c | 53 struct inode *inode = file_inode(vmf->vma->vm_file); in secretmem_fault() local 61 if (((loff_t)vmf->pgoff << PAGE_SHIFT) >= i_size_read(inode)) in secretmem_fault() 115 static int secretmem_release(struct inode *inode, struct file *file) in secretmem_release() argument 175 struct inode *inode = d_inode(dentry); in secretmem_setattr() local 176 struct address_space *mapping = inode->i_mapping; in secretmem_setattr() 182 if ((ia_valid & ATTR_SIZE) && inode->i_size) in secretmem_setattr() 201 struct inode *inode; in secretmem_file_create() local 203 inode = alloc_anon_inode(secretmem_mnt->mnt_sb); in secretmem_file_create() 204 if (IS_ERR(inode)) in secretmem_file_create() 205 return ERR_CAST(inode); in secretmem_file_create() [all …]
|
D | truncate.c | 720 void truncate_pagecache(struct inode *inode, loff_t newsize) in truncate_pagecache() argument 722 struct address_space *mapping = inode->i_mapping; in truncate_pagecache() 753 void truncate_setsize(struct inode *inode, loff_t newsize) in truncate_setsize() argument 755 loff_t oldsize = inode->i_size; in truncate_setsize() 757 i_size_write(inode, newsize); in truncate_setsize() 759 pagecache_isize_extended(inode, oldsize, newsize); in truncate_setsize() 760 truncate_pagecache(inode, newsize); in truncate_setsize() 783 void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to) in pagecache_isize_extended() argument 785 int bsize = i_blocksize(inode); in pagecache_isize_extended() 790 WARN_ON(to > inode->i_size); in pagecache_isize_extended() [all …]
|
D | cleancache.c | 144 static int cleancache_get_key(struct inode *inode, in cleancache_get_key() argument 147 int (*fhfn)(struct inode *, __u32 *fh, int *, struct inode *); in cleancache_get_key() 149 struct super_block *sb = inode->i_sb; in cleancache_get_key() 151 key->u.ino = inode->i_ino; in cleancache_get_key() 155 len = (*fhfn)(inode, &key->u.fh[0], &maxlen, NULL); in cleancache_get_key()
|
D | fadvise.c | 34 struct inode *inode; in generic_fadvise() local 42 inode = file_inode(file); in generic_fadvise() 43 if (S_ISFIFO(inode->i_mode)) in generic_fadvise() 52 if (IS_DAX(inode) || (bdi == &noop_backing_dev_info)) { in generic_fadvise() 131 endbyte != inode->i_size - 1) { in generic_fadvise()
|
D | page-writeback.c | 1880 struct inode *inode = mapping->host; in balance_dirty_pages_ratelimited() local 1881 struct backing_dev_info *bdi = inode_to_bdi(inode); in balance_dirty_pages_ratelimited() 1891 if (inode_cgwb_enabled(inode)) in balance_dirty_pages_ratelimited() 2446 struct inode *inode = mapping->host; in account_page_dirtied() local 2453 inode_attach_wb(inode, page); in account_page_dirtied() 2454 wb = inode_to_wb(inode); in account_page_dirtied() 2557 struct inode *inode = mapping->host; in account_page_redirty() local 2561 wb = unlocked_inode_to_wb_begin(inode, &cookie); in account_page_redirty() 2565 unlocked_inode_to_wb_end(inode, &cookie); in account_page_redirty() 2660 struct inode *inode = mapping->host; in __cancel_dirty_page() local [all …]
|
D | userfaultfd.c | 69 struct inode *inode; in mfill_atomic_install_pte() local 88 inode = dst_vma->vm_file->f_inode; in mfill_atomic_install_pte() 90 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); in mfill_atomic_install_pte() 193 struct inode *inode; in mfill_zeropage_pte() local 200 inode = dst_vma->vm_file->f_inode; in mfill_zeropage_pte() 202 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); in mfill_zeropage_pte() 226 struct inode *inode = file_inode(dst_vma->vm_file); in mcontinue_atomic_pte() local 231 ret = shmem_getpage(inode, pgoff, &page, SGP_NOALLOC); in mcontinue_atomic_pte()
|
D | swapfile.c | 2392 struct inode *inode = mapping->host; in setup_swap_extents() local 2395 if (S_ISBLK(inode->i_mode)) { in setup_swap_extents() 2529 struct inode *inode; in SYSCALL_DEFINE1() local 2674 inode = mapping->host; in SYSCALL_DEFINE1() 2675 if (S_ISBLK(inode->i_mode)) { in SYSCALL_DEFINE1() 2676 struct block_device *bdev = I_BDEV(inode); in SYSCALL_DEFINE1() 2682 inode_lock(inode); in SYSCALL_DEFINE1() 2683 inode->i_flags &= ~S_SWAPFILE; in SYSCALL_DEFINE1() 2684 inode_unlock(inode); in SYSCALL_DEFINE1() 2803 static int swaps_open(struct inode *inode, struct file *file) in swaps_open() argument [all …]
|
D | page_io.c | 83 struct inode *inode = mapping->host; in generic_swapfile_activate() local 94 blkbits = inode->i_blkbits; in generic_swapfile_activate() 103 last_block = i_size_read(inode) >> blkbits; in generic_swapfile_activate() 112 ret = bmap(inode, &first_block); in generic_swapfile_activate() 129 ret = bmap(inode, &block); in generic_swapfile_activate()
|
D | filemap.c | 2613 struct inode *inode = mapping->host; in filemap_read() local 2620 if (unlikely(iocb->ki_pos >= inode->i_sb->s_maxbytes)) in filemap_read() 2625 iov_iter_truncate(iter, inode->i_sb->s_maxbytes); in filemap_read() 2651 isize = i_size_read(inode); in filemap_read() 2759 struct inode *inode = mapping->host; in generic_file_read_iter() local 2762 size = i_size_read(inode); in generic_file_read_iter() 2795 IS_DAX(inode)) in generic_file_read_iter() 3068 struct inode *inode = mapping->host; in filemap_fault() local 3092 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); in filemap_fault() 3121 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); in filemap_fault() [all …]
|
D | memfd.c | 157 struct inode *inode = file_inode(file); in memfd_add_seals() local 197 inode_lock(inode); in memfd_add_seals() 226 inode_unlock(inode); in memfd_add_seals()
|
D | hugetlb.c | 238 static inline struct hugepage_subpool *subpool_inode(struct inode *inode) in subpool_inode() argument 240 return HUGETLBFS_SB(inode->i_sb)->spool; in subpool_inode() 748 void hugetlb_fix_reserve_counts(struct inode *inode) in hugetlb_fix_reserve_counts() argument 750 struct hugepage_subpool *spool = subpool_inode(inode); in hugetlb_fix_reserve_counts() 756 struct hstate *h = hstate_inode(inode); in hugetlb_fix_reserve_counts() 949 static inline struct resv_map *inode_resv_map(struct inode *inode) in inode_resv_map() argument 959 return (struct resv_map *)(&inode->i_data)->private_data; in inode_resv_map() 967 struct inode *inode = mapping->host; in vma_resv_map() local 969 return inode_resv_map(inode); in vma_resv_map() 4840 struct inode *inode = mapping->host; in huge_add_to_page_cache() local [all …]
|
D | z3fold.c | 172 struct inode *inode; member 367 pool->inode = alloc_anon_inode(z3fold_mnt->mnt_sb); in z3fold_register_migration() 368 if (IS_ERR(pool->inode)) { in z3fold_register_migration() 369 pool->inode = NULL; in z3fold_register_migration() 373 pool->inode->i_mapping->private_data = pool; in z3fold_register_migration() 374 pool->inode->i_mapping->a_ops = &z3fold_aops; in z3fold_register_migration() 380 if (pool->inode) in z3fold_unregister_migration() 381 iput(pool->inode); in z3fold_unregister_migration() 1150 __SetPageMovable(page, pool->inode->i_mapping); in z3fold_alloc() 1154 __SetPageMovable(page, pool->inode->i_mapping); in z3fold_alloc()
|
D | backing-dev.c | 57 struct inode *inode; in bdi_debug_stats_show() local 61 list_for_each_entry(inode, &wb->b_dirty, i_io_list) in bdi_debug_stats_show() 63 list_for_each_entry(inode, &wb->b_io, i_io_list) in bdi_debug_stats_show() 65 list_for_each_entry(inode, &wb->b_more_io, i_io_list) in bdi_debug_stats_show() 67 list_for_each_entry(inode, &wb->b_dirty_time, i_io_list) in bdi_debug_stats_show() 68 if (inode->i_state & I_DIRTY_TIME) in bdi_debug_stats_show()
|
D | nommu.c | 1754 int nommu_shrink_inode_mappings(struct inode *inode, size_t size, in nommu_shrink_inode_mappings() argument 1766 i_mmap_lock_read(inode->i_mapping); in nommu_shrink_inode_mappings() 1769 vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, low, high) { in nommu_shrink_inode_mappings() 1773 i_mmap_unlock_read(inode->i_mapping); in nommu_shrink_inode_mappings() 1785 vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, 0, ULONG_MAX) { in nommu_shrink_inode_mappings() 1800 i_mmap_unlock_read(inode->i_mapping); in nommu_shrink_inode_mappings()
|
D | zsmalloc.c | 270 struct inode *inode; member 2116 pool->inode = alloc_anon_inode(zsmalloc_mnt->mnt_sb); in zs_register_migration() 2117 if (IS_ERR(pool->inode)) { in zs_register_migration() 2118 pool->inode = NULL; in zs_register_migration() 2122 pool->inode->i_mapping->private_data = pool; in zs_register_migration() 2123 pool->inode->i_mapping->a_ops = &zsmalloc_aops; in zs_register_migration() 2158 iput(pool->inode); in zs_unregister_migration() 2216 __SetPageMovable(page, pool->inode->i_mapping); in SetZsPageMovable()
|
D | readahead.c | 265 struct inode *inode = ractl->mapping->host; in do_page_cache_ra() local 267 loff_t isize = i_size_read(inode); in do_page_cache_ra()
|
D | mmap.c | 1378 static inline u64 file_mmap_size_max(struct file *file, struct inode *inode) in file_mmap_size_max() argument 1380 if (S_ISREG(inode->i_mode)) in file_mmap_size_max() 1383 if (S_ISBLK(inode->i_mode)) in file_mmap_size_max() 1386 if (S_ISSOCK(inode->i_mode)) in file_mmap_size_max() 1397 static inline bool file_mmap_ok(struct file *file, struct inode *inode, in file_mmap_ok() argument 1400 u64 maxsize = file_mmap_size_max(file, inode); in file_mmap_ok() 1490 struct inode *inode = file_inode(file); in do_mmap() local 1493 if (!file_mmap_ok(file, inode, pgoff, len)) in do_mmap() 1523 if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE)) in do_mmap()
|
D | khugepaged.c | 463 struct inode *inode = vma->vm_file->f_inode; in hugepage_vma_check() local 465 return !inode_is_open_for_write(inode) && in hugepage_vma_check() 466 S_ISREG(inode->i_mode); in hugepage_vma_check()
|
D | debug.c | 114 struct inode *host; in __dump_page()
|
D | slab_common.c | 1188 static int slabinfo_open(struct inode *inode, struct file *file) in slabinfo_open() argument
|
D | slub.c | 6150 static int slab_debug_trace_open(struct inode *inode, struct file *filep) in slab_debug_trace_open() argument 6166 seq_release_private(inode, filep); in slab_debug_trace_open() 6177 seq_release_private(inode, filep); in slab_debug_trace_open() 6200 static int slab_debug_trace_release(struct inode *inode, struct file *file) in slab_debug_trace_release() argument 6206 return seq_release_private(inode, file); in slab_debug_trace_release()
|
D | kmemleak.c | 1715 static int kmemleak_open(struct inode *inode, struct file *file) in kmemleak_open() argument
|
/mm/damon/ |
D | dbgfs.c | 614 static int damon_dbgfs_open(struct inode *inode, struct file *file) in damon_dbgfs_open() argument 616 file->private_data = inode->i_private; in damon_dbgfs_open() 618 return nonseekable_open(inode, file); in damon_dbgfs_open() 791 struct inode *inode; in dbgfs_rm_context() local 807 inode = d_inode(dir); in dbgfs_rm_context() 808 if (!S_ISDIR(inode->i_mode)) { in dbgfs_rm_context()
|
/mm/kfence/ |
D | core.c | 669 static int open_objects(struct inode *inode, struct file *file) in open_objects() argument
|