Lines Matching refs:sbinfo
226 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); in shmem_inode_acct_block() local
231 if (sbinfo->max_blocks) { in shmem_inode_acct_block()
232 if (percpu_counter_compare(&sbinfo->used_blocks, in shmem_inode_acct_block()
233 sbinfo->max_blocks - pages) > 0) in shmem_inode_acct_block()
235 percpu_counter_add(&sbinfo->used_blocks, pages); in shmem_inode_acct_block()
248 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); in shmem_inode_unacct_blocks() local
250 if (sbinfo->max_blocks) in shmem_inode_unacct_blocks()
251 percpu_counter_sub(&sbinfo->used_blocks, pages); in shmem_inode_unacct_blocks()
284 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); in shmem_reserve_inode() local
288 spin_lock(&sbinfo->stat_lock); in shmem_reserve_inode()
289 if (sbinfo->max_inodes) { in shmem_reserve_inode()
290 if (!sbinfo->free_inodes) { in shmem_reserve_inode()
291 spin_unlock(&sbinfo->stat_lock); in shmem_reserve_inode()
294 sbinfo->free_inodes--; in shmem_reserve_inode()
297 ino = sbinfo->next_ino++; in shmem_reserve_inode()
299 ino = sbinfo->next_ino++; in shmem_reserve_inode()
300 if (unlikely(!sbinfo->full_inums && in shmem_reserve_inode()
309 sbinfo->next_ino = 1; in shmem_reserve_inode()
310 ino = sbinfo->next_ino++; in shmem_reserve_inode()
314 spin_unlock(&sbinfo->stat_lock); in shmem_reserve_inode()
329 next_ino = per_cpu_ptr(sbinfo->ino_batch, get_cpu()); in shmem_reserve_inode()
332 spin_lock(&sbinfo->stat_lock); in shmem_reserve_inode()
333 ino = sbinfo->next_ino; in shmem_reserve_inode()
334 sbinfo->next_ino += SHMEM_INO_BATCH; in shmem_reserve_inode()
335 spin_unlock(&sbinfo->stat_lock); in shmem_reserve_inode()
349 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); in shmem_free_inode() local
350 if (sbinfo->max_inodes) { in shmem_free_inode()
351 spin_lock(&sbinfo->stat_lock); in shmem_free_inode()
352 sbinfo->free_inodes++; in shmem_free_inode()
353 spin_unlock(&sbinfo->stat_lock); in shmem_free_inode()
528 static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, in shmem_unused_huge_shrink() argument
539 if (list_empty(&sbinfo->shrinklist)) in shmem_unused_huge_shrink()
542 spin_lock(&sbinfo->shrinklist_lock); in shmem_unused_huge_shrink()
543 list_for_each_safe(pos, next, &sbinfo->shrinklist) { in shmem_unused_huge_shrink()
564 sbinfo->shrinklist_len--; in shmem_unused_huge_shrink()
568 spin_unlock(&sbinfo->shrinklist_lock); in shmem_unused_huge_shrink()
628 spin_lock(&sbinfo->shrinklist_lock); in shmem_unused_huge_shrink()
629 list_move(&info->shrinklist, &sbinfo->shrinklist); in shmem_unused_huge_shrink()
630 sbinfo->shrinklist_len++; in shmem_unused_huge_shrink()
631 spin_unlock(&sbinfo->shrinklist_lock); in shmem_unused_huge_shrink()
642 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); in shmem_unused_huge_scan() local
644 if (!READ_ONCE(sbinfo->shrinklist_len)) in shmem_unused_huge_scan()
647 return shmem_unused_huge_shrink(sbinfo, sc, 0); in shmem_unused_huge_scan()
653 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); in shmem_unused_huge_count() local
654 return READ_ONCE(sbinfo->shrinklist_len); in shmem_unused_huge_count()
660 static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, in shmem_unused_huge_shrink() argument
667 static inline bool is_huge_enabled(struct shmem_sb_info *sbinfo) in is_huge_enabled() argument
670 (shmem_huge == SHMEM_HUGE_FORCE || sbinfo->huge) && in is_huge_enabled()
1099 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); in shmem_setattr() local
1141 spin_lock(&sbinfo->shrinklist_lock); in shmem_setattr()
1148 &sbinfo->shrinklist); in shmem_setattr()
1149 sbinfo->shrinklist_len++; in shmem_setattr()
1151 spin_unlock(&sbinfo->shrinklist_lock); in shmem_setattr()
1165 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); in shmem_evict_inode() local
1172 spin_lock(&sbinfo->shrinklist_lock); in shmem_evict_inode()
1175 sbinfo->shrinklist_len--; in shmem_evict_inode()
1177 spin_unlock(&sbinfo->shrinklist_lock); in shmem_evict_inode()
1491 static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) in shmem_get_sbmpol() argument
1494 if (sbinfo->mpol) { in shmem_get_sbmpol()
1495 spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */ in shmem_get_sbmpol()
1496 mpol = sbinfo->mpol; in shmem_get_sbmpol()
1498 spin_unlock(&sbinfo->stat_lock); in shmem_get_sbmpol()
1506 static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) in shmem_get_sbmpol() argument
1817 struct shmem_sb_info *sbinfo; in shmem_getpage_gfp() local
1836 sbinfo = SHMEM_SB(inode->i_sb); in shmem_getpage_gfp()
1894 switch (sbinfo->huge) { in shmem_getpage_gfp()
1937 ret = shmem_unused_huge_shrink(sbinfo, NULL, 1); in shmem_getpage_gfp()
1975 spin_lock(&sbinfo->shrinklist_lock); in shmem_getpage_gfp()
1982 &sbinfo->shrinklist); in shmem_getpage_gfp()
1983 sbinfo->shrinklist_len++; in shmem_getpage_gfp()
1985 spin_unlock(&sbinfo->shrinklist_lock); in shmem_getpage_gfp()
2310 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); in shmem_get_inode() local
2344 shmem_get_sbmpol(sbinfo)); in shmem_get_inode()
2741 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); in shmem_fallocate() local
2799 if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) { in shmem_fallocate()
2871 struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb); in shmem_statfs() local
2876 if (sbinfo->max_blocks) { in shmem_statfs()
2877 buf->f_blocks = sbinfo->max_blocks; in shmem_statfs()
2879 buf->f_bfree = sbinfo->max_blocks - in shmem_statfs()
2880 percpu_counter_sum(&sbinfo->used_blocks); in shmem_statfs()
2882 if (sbinfo->max_inodes) { in shmem_statfs()
2883 buf->f_files = sbinfo->max_inodes; in shmem_statfs()
2884 buf->f_ffree = sbinfo->free_inodes; in shmem_statfs()
3563 struct shmem_sb_info *sbinfo = SHMEM_SB(fc->root->d_sb); in shmem_reconfigure() local
3567 spin_lock(&sbinfo->stat_lock); in shmem_reconfigure()
3568 inodes = sbinfo->max_inodes - sbinfo->free_inodes; in shmem_reconfigure()
3570 if (!sbinfo->max_blocks) { in shmem_reconfigure()
3574 if (percpu_counter_compare(&sbinfo->used_blocks, in shmem_reconfigure()
3581 if (!sbinfo->max_inodes) { in shmem_reconfigure()
3592 sbinfo->next_ino > UINT_MAX) { in shmem_reconfigure()
3598 sbinfo->huge = ctx->huge; in shmem_reconfigure()
3600 sbinfo->full_inums = ctx->full_inums; in shmem_reconfigure()
3602 sbinfo->max_blocks = ctx->blocks; in shmem_reconfigure()
3604 sbinfo->max_inodes = ctx->inodes; in shmem_reconfigure()
3605 sbinfo->free_inodes = ctx->inodes - inodes; in shmem_reconfigure()
3612 mpol_put(sbinfo->mpol); in shmem_reconfigure()
3613 sbinfo->mpol = ctx->mpol; /* transfers initial ref */ in shmem_reconfigure()
3616 spin_unlock(&sbinfo->stat_lock); in shmem_reconfigure()
3619 spin_unlock(&sbinfo->stat_lock); in shmem_reconfigure()
3625 struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb); in shmem_show_options() local
3627 if (sbinfo->max_blocks != shmem_default_max_blocks()) in shmem_show_options()
3629 sbinfo->max_blocks << (PAGE_SHIFT - 10)); in shmem_show_options()
3630 if (sbinfo->max_inodes != shmem_default_max_inodes()) in shmem_show_options()
3631 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes); in shmem_show_options()
3632 if (sbinfo->mode != (0777 | S_ISVTX)) in shmem_show_options()
3633 seq_printf(seq, ",mode=%03ho", sbinfo->mode); in shmem_show_options()
3634 if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID)) in shmem_show_options()
3636 from_kuid_munged(&init_user_ns, sbinfo->uid)); in shmem_show_options()
3637 if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID)) in shmem_show_options()
3639 from_kgid_munged(&init_user_ns, sbinfo->gid)); in shmem_show_options()
3661 if (IS_ENABLED(CONFIG_TMPFS_INODE64) || sbinfo->full_inums) in shmem_show_options()
3662 seq_printf(seq, ",inode%d", (sbinfo->full_inums ? 64 : 32)); in shmem_show_options()
3665 if (sbinfo->huge) in shmem_show_options()
3666 seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge)); in shmem_show_options()
3668 shmem_show_mpol(seq, sbinfo->mpol); in shmem_show_options()
3676 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); in shmem_put_super() local
3678 free_percpu(sbinfo->ino_batch); in shmem_put_super()
3679 percpu_counter_destroy(&sbinfo->used_blocks); in shmem_put_super()
3680 mpol_put(sbinfo->mpol); in shmem_put_super()
3681 kfree(sbinfo); in shmem_put_super()
3689 struct shmem_sb_info *sbinfo; in shmem_fill_super() local
3693 sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info), in shmem_fill_super()
3695 if (!sbinfo) in shmem_fill_super()
3698 sb->s_fs_info = sbinfo; in shmem_fill_super()
3721 sbinfo->max_blocks = ctx->blocks; in shmem_fill_super()
3722 sbinfo->free_inodes = sbinfo->max_inodes = ctx->inodes; in shmem_fill_super()
3724 sbinfo->ino_batch = alloc_percpu(ino_t); in shmem_fill_super()
3725 if (!sbinfo->ino_batch) in shmem_fill_super()
3728 sbinfo->uid = ctx->uid; in shmem_fill_super()
3729 sbinfo->gid = ctx->gid; in shmem_fill_super()
3730 sbinfo->full_inums = ctx->full_inums; in shmem_fill_super()
3731 sbinfo->mode = ctx->mode; in shmem_fill_super()
3732 sbinfo->huge = ctx->huge; in shmem_fill_super()
3733 sbinfo->mpol = ctx->mpol; in shmem_fill_super()
3736 spin_lock_init(&sbinfo->stat_lock); in shmem_fill_super()
3737 if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL)) in shmem_fill_super()
3739 spin_lock_init(&sbinfo->shrinklist_lock); in shmem_fill_super()
3740 INIT_LIST_HEAD(&sbinfo->shrinklist); in shmem_fill_super()
3756 inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE); in shmem_fill_super()
3759 inode->i_uid = sbinfo->uid; in shmem_fill_super()
3760 inode->i_gid = sbinfo->gid; in shmem_fill_super()
4056 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); in shmem_huge_enabled() local
4066 switch (sbinfo->huge) { in shmem_huge_enabled()