• Home
  • Raw
  • Download

Lines Matching refs:sbinfo

218 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);  in shmem_inode_acct_block()  local
223 if (sbinfo->max_blocks) { in shmem_inode_acct_block()
224 if (percpu_counter_compare(&sbinfo->used_blocks, in shmem_inode_acct_block()
225 sbinfo->max_blocks - pages) > 0) in shmem_inode_acct_block()
227 percpu_counter_add(&sbinfo->used_blocks, pages); in shmem_inode_acct_block()
240 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); in shmem_inode_unacct_blocks() local
242 if (sbinfo->max_blocks) in shmem_inode_unacct_blocks()
243 percpu_counter_sub(&sbinfo->used_blocks, pages); in shmem_inode_unacct_blocks()
266 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); in shmem_reserve_inode() local
267 if (sbinfo->max_inodes) { in shmem_reserve_inode()
268 spin_lock(&sbinfo->stat_lock); in shmem_reserve_inode()
269 if (!sbinfo->free_inodes) { in shmem_reserve_inode()
270 spin_unlock(&sbinfo->stat_lock); in shmem_reserve_inode()
273 sbinfo->free_inodes--; in shmem_reserve_inode()
274 spin_unlock(&sbinfo->stat_lock); in shmem_reserve_inode()
281 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); in shmem_free_inode() local
282 if (sbinfo->max_inodes) { in shmem_free_inode()
283 spin_lock(&sbinfo->stat_lock); in shmem_free_inode()
284 sbinfo->free_inodes++; in shmem_free_inode()
285 spin_unlock(&sbinfo->stat_lock); in shmem_free_inode()
460 static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, in shmem_unused_huge_shrink() argument
471 if (list_empty(&sbinfo->shrinklist)) in shmem_unused_huge_shrink()
474 spin_lock(&sbinfo->shrinklist_lock); in shmem_unused_huge_shrink()
475 list_for_each_safe(pos, next, &sbinfo->shrinklist) { in shmem_unused_huge_shrink()
501 spin_unlock(&sbinfo->shrinklist_lock); in shmem_unused_huge_shrink()
558 spin_lock(&sbinfo->shrinklist_lock); in shmem_unused_huge_shrink()
559 list_splice_tail(&list, &sbinfo->shrinklist); in shmem_unused_huge_shrink()
560 sbinfo->shrinklist_len -= removed; in shmem_unused_huge_shrink()
561 spin_unlock(&sbinfo->shrinklist_lock); in shmem_unused_huge_shrink()
569 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); in shmem_unused_huge_scan() local
571 if (!READ_ONCE(sbinfo->shrinklist_len)) in shmem_unused_huge_scan()
574 return shmem_unused_huge_shrink(sbinfo, sc, 0); in shmem_unused_huge_scan()
580 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); in shmem_unused_huge_count() local
581 return READ_ONCE(sbinfo->shrinklist_len); in shmem_unused_huge_count()
587 static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, in shmem_unused_huge_shrink() argument
594 static inline bool is_huge_enabled(struct shmem_sb_info *sbinfo) in is_huge_enabled() argument
597 (shmem_huge == SHMEM_HUGE_FORCE || sbinfo->huge) && in is_huge_enabled()
1021 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); in shmem_setattr() local
1063 spin_lock(&sbinfo->shrinklist_lock); in shmem_setattr()
1070 &sbinfo->shrinklist); in shmem_setattr()
1071 sbinfo->shrinklist_len++; in shmem_setattr()
1073 spin_unlock(&sbinfo->shrinklist_lock); in shmem_setattr()
1087 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); in shmem_evict_inode() local
1094 spin_lock(&sbinfo->shrinklist_lock); in shmem_evict_inode()
1097 sbinfo->shrinklist_len--; in shmem_evict_inode()
1099 spin_unlock(&sbinfo->shrinklist_lock); in shmem_evict_inode()
1410 static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) in shmem_get_sbmpol() argument
1413 if (sbinfo->mpol) { in shmem_get_sbmpol()
1414 spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */ in shmem_get_sbmpol()
1415 mpol = sbinfo->mpol; in shmem_get_sbmpol()
1417 spin_unlock(&sbinfo->stat_lock); in shmem_get_sbmpol()
1425 static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) in shmem_get_sbmpol() argument
1747 struct shmem_sb_info *sbinfo; in shmem_getpage_gfp() local
1767 sbinfo = SHMEM_SB(inode->i_sb); in shmem_getpage_gfp()
1814 switch (sbinfo->huge) { in shmem_getpage_gfp()
1854 ret = shmem_unused_huge_shrink(sbinfo, NULL, 1); in shmem_getpage_gfp()
1900 spin_lock(&sbinfo->shrinklist_lock); in shmem_getpage_gfp()
1907 &sbinfo->shrinklist); in shmem_getpage_gfp()
1908 sbinfo->shrinklist_len++; in shmem_getpage_gfp()
1910 spin_unlock(&sbinfo->shrinklist_lock); in shmem_getpage_gfp()
2243 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); in shmem_get_inode() local
2276 shmem_get_sbmpol(sbinfo)); in shmem_get_inode()
2726 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); in shmem_fallocate() local
2784 if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) { in shmem_fallocate()
2856 struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb); in shmem_statfs() local
2861 if (sbinfo->max_blocks) { in shmem_statfs()
2862 buf->f_blocks = sbinfo->max_blocks; in shmem_statfs()
2864 buf->f_bfree = sbinfo->max_blocks - in shmem_statfs()
2865 percpu_counter_sum(&sbinfo->used_blocks); in shmem_statfs()
2867 if (sbinfo->max_inodes) { in shmem_statfs()
2868 buf->f_files = sbinfo->max_inodes; in shmem_statfs()
2869 buf->f_ffree = sbinfo->free_inodes; in shmem_statfs()
3541 struct shmem_sb_info *sbinfo = SHMEM_SB(fc->root->d_sb); in shmem_reconfigure() local
3545 spin_lock(&sbinfo->stat_lock); in shmem_reconfigure()
3546 inodes = sbinfo->max_inodes - sbinfo->free_inodes; in shmem_reconfigure()
3548 if (!sbinfo->max_blocks) { in shmem_reconfigure()
3552 if (percpu_counter_compare(&sbinfo->used_blocks, in shmem_reconfigure()
3559 if (!sbinfo->max_inodes) { in shmem_reconfigure()
3570 sbinfo->huge = ctx->huge; in shmem_reconfigure()
3572 sbinfo->max_blocks = ctx->blocks; in shmem_reconfigure()
3574 sbinfo->max_inodes = ctx->inodes; in shmem_reconfigure()
3575 sbinfo->free_inodes = ctx->inodes - inodes; in shmem_reconfigure()
3582 mpol_put(sbinfo->mpol); in shmem_reconfigure()
3583 sbinfo->mpol = ctx->mpol; /* transfers initial ref */ in shmem_reconfigure()
3586 spin_unlock(&sbinfo->stat_lock); in shmem_reconfigure()
3589 spin_unlock(&sbinfo->stat_lock); in shmem_reconfigure()
3595 struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb); in shmem_show_options() local
3597 if (sbinfo->max_blocks != shmem_default_max_blocks()) in shmem_show_options()
3599 sbinfo->max_blocks << (PAGE_SHIFT - 10)); in shmem_show_options()
3600 if (sbinfo->max_inodes != shmem_default_max_inodes()) in shmem_show_options()
3601 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes); in shmem_show_options()
3602 if (sbinfo->mode != (0777 | S_ISVTX)) in shmem_show_options()
3603 seq_printf(seq, ",mode=%03ho", sbinfo->mode); in shmem_show_options()
3604 if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID)) in shmem_show_options()
3606 from_kuid_munged(&init_user_ns, sbinfo->uid)); in shmem_show_options()
3607 if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID)) in shmem_show_options()
3609 from_kgid_munged(&init_user_ns, sbinfo->gid)); in shmem_show_options()
3612 if (sbinfo->huge) in shmem_show_options()
3613 seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge)); in shmem_show_options()
3615 shmem_show_mpol(seq, sbinfo->mpol); in shmem_show_options()
3623 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); in shmem_put_super() local
3625 percpu_counter_destroy(&sbinfo->used_blocks); in shmem_put_super()
3626 mpol_put(sbinfo->mpol); in shmem_put_super()
3627 kfree(sbinfo); in shmem_put_super()
3635 struct shmem_sb_info *sbinfo; in shmem_fill_super() local
3639 sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info), in shmem_fill_super()
3641 if (!sbinfo) in shmem_fill_super()
3644 sb->s_fs_info = sbinfo; in shmem_fill_super()
3665 sbinfo->max_blocks = ctx->blocks; in shmem_fill_super()
3666 sbinfo->free_inodes = sbinfo->max_inodes = ctx->inodes; in shmem_fill_super()
3667 sbinfo->uid = ctx->uid; in shmem_fill_super()
3668 sbinfo->gid = ctx->gid; in shmem_fill_super()
3669 sbinfo->mode = ctx->mode; in shmem_fill_super()
3670 sbinfo->huge = ctx->huge; in shmem_fill_super()
3671 sbinfo->mpol = ctx->mpol; in shmem_fill_super()
3674 spin_lock_init(&sbinfo->stat_lock); in shmem_fill_super()
3675 if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL)) in shmem_fill_super()
3677 spin_lock_init(&sbinfo->shrinklist_lock); in shmem_fill_super()
3678 INIT_LIST_HEAD(&sbinfo->shrinklist); in shmem_fill_super()
3694 inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE); in shmem_fill_super()
3697 inode->i_uid = sbinfo->uid; in shmem_fill_super()
3698 inode->i_gid = sbinfo->gid; in shmem_fill_super()
3991 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); in shmem_huge_enabled() local
4002 switch (sbinfo->huge) { in shmem_huge_enabled()