• Home
  • Raw
  • Download

Lines Matching refs:inode

140 static int shmem_swapin_page(struct inode *inode, pgoff_t index,
144 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
149 int shmem_getpage(struct inode *inode, pgoff_t index, in shmem_getpage() argument
152 return shmem_getpage_gfp(inode, index, pagep, sgp, in shmem_getpage()
153 mapping_gfp_mask(inode->i_mapping), NULL, NULL, NULL); in shmem_getpage()
213 static inline bool shmem_inode_acct_block(struct inode *inode, long pages) in shmem_inode_acct_block() argument
215 struct shmem_inode_info *info = SHMEM_I(inode); in shmem_inode_acct_block()
216 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); in shmem_inode_acct_block()
235 static inline void shmem_inode_unacct_blocks(struct inode *inode, long pages) in shmem_inode_unacct_blocks() argument
237 struct shmem_inode_info *info = SHMEM_I(inode); in shmem_inode_unacct_blocks()
238 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); in shmem_inode_unacct_blocks()
360 static void shmem_recalc_inode(struct inode *inode) in shmem_recalc_inode() argument
362 struct shmem_inode_info *info = SHMEM_I(inode); in shmem_recalc_inode()
365 freed = info->alloced - info->swapped - inode->i_mapping->nrpages; in shmem_recalc_inode()
368 inode->i_blocks -= freed * BLOCKS_PER_PAGE; in shmem_recalc_inode()
369 shmem_inode_unacct_blocks(inode, freed); in shmem_recalc_inode()
373 bool shmem_charge(struct inode *inode, long pages) in shmem_charge() argument
375 struct shmem_inode_info *info = SHMEM_I(inode); in shmem_charge()
378 if (!shmem_inode_acct_block(inode, pages)) in shmem_charge()
382 inode->i_mapping->nrpages += pages; in shmem_charge()
386 inode->i_blocks += pages * BLOCKS_PER_PAGE; in shmem_charge()
387 shmem_recalc_inode(inode); in shmem_charge()
393 void shmem_uncharge(struct inode *inode, long pages) in shmem_uncharge() argument
395 struct shmem_inode_info *info = SHMEM_I(inode); in shmem_uncharge()
402 inode->i_blocks -= pages * BLOCKS_PER_PAGE; in shmem_uncharge()
403 shmem_recalc_inode(inode); in shmem_uncharge()
406 shmem_inode_unacct_blocks(inode, pages); in shmem_uncharge()
478 struct inode *inode, pgoff_t index) in shmem_is_huge() argument
490 switch (SHMEM_SB(inode->i_sb)->huge) { in shmem_is_huge()
495 i_size = round_up(i_size_read(inode), PAGE_SIZE); in shmem_is_huge()
555 struct inode *inode; in shmem_unused_huge_shrink() local
569 inode = igrab(&info->vfs_inode); in shmem_unused_huge_shrink()
572 if (!inode) { in shmem_unused_huge_shrink()
578 if (round_up(inode->i_size, PAGE_SIZE) == in shmem_unused_huge_shrink()
579 round_up(inode->i_size, HPAGE_PMD_SIZE)) { in shmem_unused_huge_shrink()
594 inode = &info->vfs_inode; in shmem_unused_huge_shrink()
596 iput(inode); in shmem_unused_huge_shrink()
603 inode = &info->vfs_inode; in shmem_unused_huge_shrink()
608 page = find_get_page(inode->i_mapping, in shmem_unused_huge_shrink()
609 (inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT); in shmem_unused_huge_shrink()
655 iput(inode); in shmem_unused_huge_shrink()
683 struct inode *inode, pgoff_t index) in shmem_is_huge() argument
845 struct inode *inode = file_inode(vma->vm_file); in shmem_swap_usage() local
846 struct shmem_inode_info *info = SHMEM_I(inode); in shmem_swap_usage()
847 struct address_space *mapping = inode->i_mapping; in shmem_swap_usage()
861 if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size) in shmem_swap_usage()
921 static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, in shmem_undo_range() argument
924 struct address_space *mapping = inode->i_mapping; in shmem_undo_range()
925 struct shmem_inode_info *info = SHMEM_I(inode); in shmem_undo_range()
972 shmem_getpage(inode, start - 1, &page, SGP_READ); in shmem_undo_range()
987 shmem_getpage(inode, end, &page, SGP_READ); in shmem_undo_range()
1058 shmem_recalc_inode(inode); in shmem_undo_range()
1062 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) in shmem_truncate_range() argument
1064 shmem_undo_range(inode, lstart, lend, false); in shmem_truncate_range()
1065 inode->i_ctime = inode->i_mtime = current_time(inode); in shmem_truncate_range()
1073 struct inode *inode = path->dentry->d_inode; in shmem_getattr() local
1074 struct shmem_inode_info *info = SHMEM_I(inode); in shmem_getattr()
1076 if (info->alloced - info->swapped != inode->i_mapping->nrpages) { in shmem_getattr()
1078 shmem_recalc_inode(inode); in shmem_getattr()
1081 generic_fillattr(&init_user_ns, inode, stat); in shmem_getattr()
1083 if (shmem_is_huge(NULL, inode, 0)) in shmem_getattr()
1092 struct inode *inode = d_inode(dentry); in shmem_setattr() local
1093 struct shmem_inode_info *info = SHMEM_I(inode); in shmem_setattr()
1100 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { in shmem_setattr()
1101 loff_t oldsize = inode->i_size; in shmem_setattr()
1110 error = shmem_reacct_size(SHMEM_I(inode)->flags, in shmem_setattr()
1114 i_size_write(inode, newsize); in shmem_setattr()
1115 inode->i_ctime = inode->i_mtime = current_time(inode); in shmem_setattr()
1120 unmap_mapping_range(inode->i_mapping, in shmem_setattr()
1123 shmem_truncate_range(inode, in shmem_setattr()
1127 unmap_mapping_range(inode->i_mapping, in shmem_setattr()
1132 setattr_copy(&init_user_ns, inode, attr); in shmem_setattr()
1134 error = posix_acl_chmod(&init_user_ns, inode, inode->i_mode); in shmem_setattr()
1138 static void shmem_evict_inode(struct inode *inode) in shmem_evict_inode() argument
1140 struct shmem_inode_info *info = SHMEM_I(inode); in shmem_evict_inode()
1141 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); in shmem_evict_inode()
1143 if (shmem_mapping(inode->i_mapping)) { in shmem_evict_inode()
1144 shmem_unacct_size(info->flags, inode->i_size); in shmem_evict_inode()
1145 inode->i_size = 0; in shmem_evict_inode()
1146 shmem_truncate_range(inode, 0, (loff_t)-1); in shmem_evict_inode()
1168 WARN_ON(inode->i_blocks); in shmem_evict_inode()
1169 shmem_free_inode(inode->i_sb); in shmem_evict_inode()
1170 clear_inode(inode); in shmem_evict_inode()
1220 static int shmem_unuse_swap_entries(struct inode *inode, struct pagevec pvec, in shmem_unuse_swap_entries() argument
1226 struct address_space *mapping = inode->i_mapping; in shmem_unuse_swap_entries()
1233 error = shmem_swapin_page(inode, indices[i], in shmem_unuse_swap_entries()
1252 static int shmem_unuse_inode(struct inode *inode, unsigned int type, in shmem_unuse_inode() argument
1255 struct address_space *mapping = inode->i_mapping; in shmem_unuse_inode()
1277 ret = shmem_unuse_swap_entries(inode, pvec, indices); in shmem_unuse_inode()
1349 struct inode *inode; in shmem_writepage() local
1369 inode = mapping->host; in shmem_writepage()
1370 info = SHMEM_I(inode); in shmem_writepage()
1400 if (inode->i_private) { in shmem_writepage()
1402 spin_lock(&inode->i_lock); in shmem_writepage()
1403 shmem_falloc = inode->i_private; in shmem_writepage()
1411 spin_unlock(&inode->i_lock); in shmem_writepage()
1440 shmem_recalc_inode(inode); in shmem_writepage()
1594 struct inode *inode, in shmem_alloc_and_acct_page() argument
1597 struct shmem_inode_info *info = SHMEM_I(inode); in shmem_alloc_and_acct_page()
1606 if (!shmem_inode_acct_block(inode, nr)) in shmem_alloc_and_acct_page()
1620 shmem_inode_unacct_blocks(inode, nr); in shmem_alloc_and_acct_page()
1715 static int shmem_swapin_page(struct inode *inode, pgoff_t index, in shmem_swapin_page() argument
1720 struct address_space *mapping = inode->i_mapping; in shmem_swapin_page()
1721 struct shmem_inode_info *info = SHMEM_I(inode); in shmem_swapin_page()
1781 shmem_recalc_inode(inode); in shmem_swapin_page()
1815 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, in shmem_getpage_gfp() argument
1820 struct address_space *mapping = inode->i_mapping; in shmem_getpage_gfp()
1821 struct shmem_inode_info *info = SHMEM_I(inode); in shmem_getpage_gfp()
1835 ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) { in shmem_getpage_gfp()
1839 sbinfo = SHMEM_SB(inode->i_sb); in shmem_getpage_gfp()
1855 error = shmem_swapin_page(inode, index, &page, in shmem_getpage_gfp()
1897 if (S_ISLNK(inode->i_mode)) in shmem_getpage_gfp()
1899 if (!shmem_is_huge(vma, inode, index)) in shmem_getpage_gfp()
1904 page = shmem_alloc_and_acct_page(huge_gfp, inode, index, true); in shmem_getpage_gfp()
1907 page = shmem_alloc_and_acct_page(gfp, inode, in shmem_getpage_gfp()
1950 inode->i_blocks += BLOCKS_PER_PAGE << compound_order(page); in shmem_getpage_gfp()
1951 shmem_recalc_inode(inode); in shmem_getpage_gfp()
1956 DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) < in shmem_getpage_gfp()
1998 ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) { in shmem_getpage_gfp()
2003 shmem_recalc_inode(inode); in shmem_getpage_gfp()
2017 shmem_inode_unacct_blocks(inode, compound_nr(page)); in shmem_getpage_gfp()
2031 shmem_recalc_inode(inode); in shmem_getpage_gfp()
2055 struct inode *inode = file_inode(vma->vm_file); in shmem_fault() local
2056 gfp_t gfp = mapping_gfp_mask(inode->i_mapping); in shmem_fault()
2077 if (unlikely(inode->i_private)) { in shmem_fault()
2080 spin_lock(&inode->i_lock); in shmem_fault()
2081 shmem_falloc = inode->i_private; in shmem_fault()
2098 spin_unlock(&inode->i_lock); in shmem_fault()
2108 spin_lock(&inode->i_lock); in shmem_fault()
2110 spin_unlock(&inode->i_lock); in shmem_fault()
2116 spin_unlock(&inode->i_lock); in shmem_fault()
2119 err = shmem_getpage_gfp(inode, vmf->pgoff, &vmf->page, SGP_CACHE, in shmem_fault()
2218 struct inode *inode = file_inode(vma->vm_file); in shmem_set_policy() local
2219 return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol); in shmem_set_policy()
2225 struct inode *inode = file_inode(vma->vm_file); in shmem_get_policy() local
2229 return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index); in shmem_get_policy()
2235 struct inode *inode = file_inode(file); in shmem_lock() local
2236 struct shmem_inode_info *info = SHMEM_I(inode); in shmem_lock()
2245 if (!user_shm_lock(inode->i_size, ucounts)) in shmem_lock()
2251 user_shm_unlock(inode->i_size, ucounts); in shmem_lock()
2283 static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir, in shmem_get_inode()
2286 struct inode *inode; in shmem_get_inode() local
2294 inode = new_inode(sb); in shmem_get_inode()
2295 if (inode) { in shmem_get_inode()
2296 inode->i_ino = ino; in shmem_get_inode()
2297 inode_init_owner(&init_user_ns, inode, dir, mode); in shmem_get_inode()
2298 inode->i_blocks = 0; in shmem_get_inode()
2299 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); in shmem_get_inode()
2300 inode->i_generation = prandom_u32(); in shmem_get_inode()
2301 info = SHMEM_I(inode); in shmem_get_inode()
2302 memset(info, 0, (char *)inode - (char *)info); in shmem_get_inode()
2310 cache_no_acl(inode); in shmem_get_inode()
2314 inode->i_op = &shmem_special_inode_operations; in shmem_get_inode()
2315 init_special_inode(inode, mode, dev); in shmem_get_inode()
2318 inode->i_mapping->a_ops = &shmem_aops; in shmem_get_inode()
2319 inode->i_op = &shmem_inode_operations; in shmem_get_inode()
2320 inode->i_fop = &shmem_file_operations; in shmem_get_inode()
2325 inc_nlink(inode); in shmem_get_inode()
2327 inode->i_size = 2 * BOGO_DIRENT_SIZE; in shmem_get_inode()
2328 inode->i_op = &shmem_dir_inode_operations; in shmem_get_inode()
2329 inode->i_fop = &simple_dir_operations; in shmem_get_inode()
2340 lockdep_annotate_inode_mutex_key(inode); in shmem_get_inode()
2343 return inode; in shmem_get_inode()
2355 struct inode *inode = file_inode(dst_vma->vm_file); in shmem_mfill_atomic_pte() local
2356 struct shmem_inode_info *info = SHMEM_I(inode); in shmem_mfill_atomic_pte()
2357 struct address_space *mapping = inode->i_mapping; in shmem_mfill_atomic_pte()
2365 if (!shmem_inode_acct_block(inode, 1)) { in shmem_mfill_atomic_pte()
2415 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); in shmem_mfill_atomic_pte()
2431 inode->i_blocks += BLOCKS_PER_PAGE; in shmem_mfill_atomic_pte()
2432 shmem_recalc_inode(inode); in shmem_mfill_atomic_pte()
2444 shmem_inode_unacct_blocks(inode, 1); in shmem_mfill_atomic_pte()
2454 static int shmem_initxattrs(struct inode *, const struct xattr *, void *);
2464 struct inode *inode = mapping->host; in shmem_write_begin() local
2465 struct shmem_inode_info *info = SHMEM_I(inode); in shmem_write_begin()
2474 if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size) in shmem_write_begin()
2478 ret = shmem_getpage(inode, index, pagep, SGP_WRITE); in shmem_write_begin()
2498 struct inode *inode = mapping->host; in shmem_write_end() local
2500 if (pos + copied > inode->i_size) in shmem_write_end()
2501 i_size_write(inode, pos + copied); in shmem_write_end()
2532 struct inode *inode = file_inode(file); in shmem_file_read_iter() local
2533 struct address_space *mapping = inode->i_mapping; in shmem_file_read_iter()
2556 loff_t i_size = i_size_read(inode); in shmem_file_read_iter()
2567 error = shmem_getpage(inode, index, &page, sgp); in shmem_file_read_iter()
2590 i_size = i_size_read(inode); in shmem_file_read_iter()
2648 struct inode *inode = mapping->host; in shmem_file_llseek() local
2652 MAX_LFS_FILESIZE, i_size_read(inode)); in shmem_file_llseek()
2656 inode_lock(inode); in shmem_file_llseek()
2658 offset = mapping_seek_hole_data(mapping, offset, inode->i_size, whence); in shmem_file_llseek()
2661 inode_unlock(inode); in shmem_file_llseek()
2668 struct inode *inode = file_inode(file); in shmem_fallocate() local
2669 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); in shmem_fallocate()
2670 struct shmem_inode_info *info = SHMEM_I(inode); in shmem_fallocate()
2678 inode_lock(inode); in shmem_fallocate()
2695 spin_lock(&inode->i_lock); in shmem_fallocate()
2696 inode->i_private = &shmem_falloc; in shmem_fallocate()
2697 spin_unlock(&inode->i_lock); in shmem_fallocate()
2702 shmem_truncate_range(inode, offset, offset + len - 1); in shmem_fallocate()
2705 spin_lock(&inode->i_lock); in shmem_fallocate()
2706 inode->i_private = NULL; in shmem_fallocate()
2709 spin_unlock(&inode->i_lock); in shmem_fallocate()
2715 error = inode_newsize_ok(inode, offset + len); in shmem_fallocate()
2719 if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) { in shmem_fallocate()
2737 spin_lock(&inode->i_lock); in shmem_fallocate()
2738 inode->i_private = &shmem_falloc; in shmem_fallocate()
2739 spin_unlock(&inode->i_lock); in shmem_fallocate()
2762 error = shmem_getpage(inode, index, &page, SGP_FALLOC); in shmem_fallocate()
2767 shmem_undo_range(inode, in shmem_fallocate()
2808 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) in shmem_fallocate()
2809 i_size_write(inode, offset + len); in shmem_fallocate()
2810 inode->i_ctime = current_time(inode); in shmem_fallocate()
2812 spin_lock(&inode->i_lock); in shmem_fallocate()
2813 inode->i_private = NULL; in shmem_fallocate()
2814 spin_unlock(&inode->i_lock); in shmem_fallocate()
2816 inode_unlock(inode); in shmem_fallocate()
2848 shmem_mknod(struct user_namespace *mnt_userns, struct inode *dir, in shmem_mknod()
2851 struct inode *inode; in shmem_mknod() local
2854 inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE); in shmem_mknod()
2855 if (inode) { in shmem_mknod()
2856 error = simple_acl_create(dir, inode); in shmem_mknod()
2859 error = security_inode_init_security(inode, dir, in shmem_mknod()
2868 d_instantiate(dentry, inode); in shmem_mknod()
2873 iput(inode); in shmem_mknod()
2878 shmem_tmpfile(struct user_namespace *mnt_userns, struct inode *dir, in shmem_tmpfile()
2881 struct inode *inode; in shmem_tmpfile() local
2884 inode = shmem_get_inode(dir->i_sb, dir, mode, 0, VM_NORESERVE); in shmem_tmpfile()
2885 if (inode) { in shmem_tmpfile()
2886 error = security_inode_init_security(inode, dir, in shmem_tmpfile()
2891 error = simple_acl_create(dir, inode); in shmem_tmpfile()
2894 d_tmpfile(dentry, inode); in shmem_tmpfile()
2898 iput(inode); in shmem_tmpfile()
2902 static int shmem_mkdir(struct user_namespace *mnt_userns, struct inode *dir, in shmem_mkdir()
2914 static int shmem_create(struct user_namespace *mnt_userns, struct inode *dir, in shmem_create()
2923 static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) in shmem_link()
2925 struct inode *inode = d_inode(old_dentry); in shmem_link() local
2935 if (inode->i_nlink) { in shmem_link()
2936 ret = shmem_reserve_inode(inode->i_sb, NULL); in shmem_link()
2942 inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode); in shmem_link()
2943 inc_nlink(inode); in shmem_link()
2944 ihold(inode); /* New dentry reference */ in shmem_link()
2946 d_instantiate(dentry, inode); in shmem_link()
2951 static int shmem_unlink(struct inode *dir, struct dentry *dentry) in shmem_unlink()
2953 struct inode *inode = d_inode(dentry); in shmem_unlink() local
2955 if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) in shmem_unlink()
2956 shmem_free_inode(inode->i_sb); in shmem_unlink()
2959 inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode); in shmem_unlink()
2960 drop_nlink(inode); in shmem_unlink()
2965 static int shmem_rmdir(struct inode *dir, struct dentry *dentry) in shmem_rmdir()
2975 static int shmem_exchange(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, … in shmem_exchange()
2998 struct inode *old_dir, struct dentry *old_dentry) in shmem_whiteout()
3031 struct inode *old_dir, struct dentry *old_dentry, in shmem_rename2()
3032 struct inode *new_dir, struct dentry *new_dentry, in shmem_rename2()
3035 struct inode *inode = d_inode(old_dentry); in shmem_rename2() local
3036 int they_are_dirs = S_ISDIR(inode->i_mode); in shmem_rename2()
3070 inode->i_ctime = current_time(old_dir); in shmem_rename2()
3074 static int shmem_symlink(struct user_namespace *mnt_userns, struct inode *dir, in shmem_symlink()
3079 struct inode *inode; in shmem_symlink() local
3086 inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK | 0777, 0, in shmem_symlink()
3088 if (!inode) in shmem_symlink()
3091 error = security_inode_init_security(inode, dir, &dentry->d_name, in shmem_symlink()
3094 iput(inode); in shmem_symlink()
3098 inode->i_size = len-1; in shmem_symlink()
3100 inode->i_link = kmemdup(symname, len, GFP_KERNEL); in shmem_symlink()
3101 if (!inode->i_link) { in shmem_symlink()
3102 iput(inode); in shmem_symlink()
3105 inode->i_op = &shmem_short_symlink_operations; in shmem_symlink()
3107 inode_nohighmem(inode); in shmem_symlink()
3108 error = shmem_getpage(inode, 0, &page, SGP_WRITE); in shmem_symlink()
3110 iput(inode); in shmem_symlink()
3113 inode->i_mapping->a_ops = &shmem_aops; in shmem_symlink()
3114 inode->i_op = &shmem_symlink_inode_operations; in shmem_symlink()
3123 d_instantiate(dentry, inode); in shmem_symlink()
3135 struct inode *inode, in shmem_get_link() argument
3141 page = find_get_page(inode->i_mapping, 0); in shmem_get_link()
3150 error = shmem_getpage(inode, 0, &page, SGP_READ); in shmem_get_link()
3177 static int shmem_initxattrs(struct inode *inode, in shmem_initxattrs() argument
3181 struct shmem_inode_info *info = SHMEM_I(inode); in shmem_initxattrs()
3211 struct dentry *unused, struct inode *inode, in shmem_xattr_handler_get() argument
3214 struct shmem_inode_info *info = SHMEM_I(inode); in shmem_xattr_handler_get()
3222 struct dentry *unused, struct inode *inode, in shmem_xattr_handler_set() argument
3226 struct shmem_inode_info *info = SHMEM_I(inode); in shmem_xattr_handler_set()
3280 static int shmem_match(struct inode *ino, void *vfh) in shmem_match()
3289 static struct dentry *shmem_find_alias(struct inode *inode) in shmem_find_alias() argument
3291 struct dentry *alias = d_find_alias(inode); in shmem_find_alias()
3293 return alias ?: d_find_any_alias(inode); in shmem_find_alias()
3300 struct inode *inode; in shmem_fh_to_dentry() local
3310 inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]), in shmem_fh_to_dentry()
3312 if (inode) { in shmem_fh_to_dentry()
3313 dentry = shmem_find_alias(inode); in shmem_fh_to_dentry()
3314 iput(inode); in shmem_fh_to_dentry()
3320 static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len, in shmem_encode_fh() argument
3321 struct inode *parent) in shmem_encode_fh()
3328 if (inode_unhashed(inode)) { in shmem_encode_fh()
3336 if (inode_unhashed(inode)) in shmem_encode_fh()
3337 __insert_inode_hash(inode, in shmem_encode_fh()
3338 inode->i_ino + inode->i_generation); in shmem_encode_fh()
3342 fh[0] = inode->i_generation; in shmem_encode_fh()
3343 fh[1] = inode->i_ino; in shmem_encode_fh()
3344 fh[2] = ((__u64)inode->i_ino) >> 32; in shmem_encode_fh()
3681 struct inode *inode; in shmem_fill_super() local
3748 inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE); in shmem_fill_super()
3749 if (!inode) in shmem_fill_super()
3751 inode->i_uid = sbinfo->uid; in shmem_fill_super()
3752 inode->i_gid = sbinfo->gid; in shmem_fill_super()
3753 sb->s_root = d_make_root(inode); in shmem_fill_super()
3790 static struct inode *shmem_alloc_inode(struct super_block *sb) in shmem_alloc_inode()
3799 static void shmem_free_in_core_inode(struct inode *inode) in shmem_free_in_core_inode() argument
3801 if (S_ISLNK(inode->i_mode)) in shmem_free_in_core_inode()
3802 kfree(inode->i_link); in shmem_free_in_core_inode()
3803 kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode)); in shmem_free_in_core_inode()
3806 static void shmem_destroy_inode(struct inode *inode) in shmem_destroy_inode() argument
3808 if (S_ISREG(inode->i_mode)) in shmem_destroy_inode()
3809 mpol_free_shared_policy(&SHMEM_I(inode)->policy); in shmem_destroy_inode()
4105 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) in shmem_truncate_range() argument
4107 truncate_inode_pages_range(inode->i_mapping, lstart, lend); in shmem_truncate_range()
4124 struct inode *inode; in __shmem_file_setup() local
4136 inode = shmem_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0, in __shmem_file_setup()
4138 if (unlikely(!inode)) { in __shmem_file_setup()
4142 inode->i_flags |= i_flags; in __shmem_file_setup()
4143 inode->i_size = size; in __shmem_file_setup()
4144 clear_nlink(inode); /* It is unlinked */ in __shmem_file_setup()
4145 res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size)); in __shmem_file_setup()
4147 res = alloc_file_pseudo(inode, mnt, name, O_RDWR, in __shmem_file_setup()
4150 iput(inode); in __shmem_file_setup()
4247 struct inode *inode = mapping->host; in shmem_read_mapping_page_gfp() local
4252 error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE, in shmem_read_mapping_page_gfp()