Lines Matching +full:not +full:- +full:swapped
6 * 2000-2001 Christoph Rohland
7 * 2000-2001 SAP AG
9 * Copyright (C) 2002-2011 Hugh Dickins.
11 * Copyright (C) 2002-2005 VERITAS Software Corporation.
18 * tiny-shmem:
60 #include <linux/backing-dev.h>
99 * inode->i_private (with i_mutex making sure that it has only one user at
100 * a time): we would prefer not to enlarge the shmem inode just for that.
136 return min(nr_pages - totalhigh_pages(), nr_pages / 2); in shmem_default_max_inodes()
156 mapping_gfp_mask(inode->i_mapping), NULL, NULL, NULL); in shmem_getpage()
161 return sb->s_fs_info; in SHMEM_SB()
165 * shmem_file_setup pre-accounts the whole fixed size of a VM object,
168 * consistent with the pre-accounting of private mappings ...
173 0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size)); in shmem_acct_size()
187 return security_vm_enough_memory_mm(current->mm, in shmem_reacct_size()
188 VM_ACCT(newsize) - VM_ACCT(oldsize)); in shmem_reacct_size()
190 vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize)); in shmem_reacct_size()
198 * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
199 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
206 return security_vm_enough_memory_mm(current->mm, in shmem_acct_block()
219 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); in shmem_inode_acct_block()
221 if (shmem_acct_block(info->flags, pages)) in shmem_inode_acct_block()
224 if (sbinfo->max_blocks) { in shmem_inode_acct_block()
225 if (percpu_counter_compare(&sbinfo->used_blocks, in shmem_inode_acct_block()
226 sbinfo->max_blocks - pages) > 0) in shmem_inode_acct_block()
228 percpu_counter_add(&sbinfo->used_blocks, pages); in shmem_inode_acct_block()
234 shmem_unacct_blocks(info->flags, pages); in shmem_inode_acct_block()
241 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); in shmem_inode_unacct_blocks()
243 if (sbinfo->max_blocks) in shmem_inode_unacct_blocks()
244 percpu_counter_sub(&sbinfo->used_blocks, pages); in shmem_inode_unacct_blocks()
245 shmem_unacct_blocks(info->flags, pages); in shmem_inode_unacct_blocks()
259 return vma->vm_ops == &shmem_vm_ops; in vma_is_shmem()
280 if (!(sb->s_flags & SB_KERNMOUNT)) { in shmem_reserve_inode()
281 spin_lock(&sbinfo->stat_lock); in shmem_reserve_inode()
282 if (sbinfo->max_inodes) { in shmem_reserve_inode()
283 if (!sbinfo->free_inodes) { in shmem_reserve_inode()
284 spin_unlock(&sbinfo->stat_lock); in shmem_reserve_inode()
285 return -ENOSPC; in shmem_reserve_inode()
287 sbinfo->free_inodes--; in shmem_reserve_inode()
290 ino = sbinfo->next_ino++; in shmem_reserve_inode()
292 ino = sbinfo->next_ino++; in shmem_reserve_inode()
293 if (unlikely(!sbinfo->full_inums && in shmem_reserve_inode()
301 __func__, MINOR(sb->s_dev)); in shmem_reserve_inode()
302 sbinfo->next_ino = 1; in shmem_reserve_inode()
303 ino = sbinfo->next_ino++; in shmem_reserve_inode()
307 spin_unlock(&sbinfo->stat_lock); in shmem_reserve_inode()
310 * __shmem_file_setup, one of our callers, is lock-free: it in shmem_reserve_inode()
313 * unknown contexts. As such, use a per-cpu batched allocator in shmem_reserve_inode()
314 * which doesn't require the per-sb stat_lock unless we are at in shmem_reserve_inode()
318 * shmem mounts are not exposed to userspace, so we don't need in shmem_reserve_inode()
322 next_ino = per_cpu_ptr(sbinfo->ino_batch, get_cpu()); in shmem_reserve_inode()
325 spin_lock(&sbinfo->stat_lock); in shmem_reserve_inode()
326 ino = sbinfo->next_ino; in shmem_reserve_inode()
327 sbinfo->next_ino += SHMEM_INO_BATCH; in shmem_reserve_inode()
328 spin_unlock(&sbinfo->stat_lock); in shmem_reserve_inode()
343 if (sbinfo->max_inodes) { in shmem_free_inode()
344 spin_lock(&sbinfo->stat_lock); in shmem_free_inode()
345 sbinfo->free_inodes++; in shmem_free_inode()
346 spin_unlock(&sbinfo->stat_lock); in shmem_free_inode()
351 * shmem_recalc_inode - recalculate the block usage of an inode
357 * But normally info->alloced == inode->i_mapping->nrpages + info->swapped
358 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
367 freed = info->alloced - info->swapped - inode->i_mapping->nrpages; in shmem_recalc_inode()
369 info->alloced -= freed; in shmem_recalc_inode()
370 inode->i_blocks -= freed * BLOCKS_PER_PAGE; in shmem_recalc_inode()
384 inode->i_mapping->nrpages += pages; in shmem_charge()
386 spin_lock_irqsave(&info->lock, flags); in shmem_charge()
387 info->alloced += pages; in shmem_charge()
388 inode->i_blocks += pages * BLOCKS_PER_PAGE; in shmem_charge()
390 spin_unlock_irqrestore(&info->lock, flags); in shmem_charge()
402 spin_lock_irqsave(&info->lock, flags); in shmem_uncharge()
403 info->alloced -= pages; in shmem_uncharge()
404 inode->i_blocks -= pages * BLOCKS_PER_PAGE; in shmem_uncharge()
406 spin_unlock_irqrestore(&info->lock, flags); in shmem_uncharge()
417 XA_STATE(xas, &mapping->i_pages, index); in shmem_replace_entry()
424 return -ENOENT; in shmem_replace_entry()
431 * that an entry was not already brought back from swap by a racing thread.
433 * Checking page is not enough: by the time a SwapCache page is locked, it
439 return xa_load(&mapping->i_pages, index) == swp_to_radix_entry(swap); in shmem_confirm_swap()
471 #define SHMEM_HUGE_DENY (-1)
472 #define SHMEM_HUGE_FORCE (-2)
475 /* ifdef here to avoid bloating shmem.o when not necessary */
494 return -EINVAL; in shmem_parse_huge()
529 unsigned long batch = sc ? sc->nr_to_scan : 128; in shmem_unused_huge_shrink()
532 if (list_empty(&sbinfo->shrinklist)) in shmem_unused_huge_shrink()
535 spin_lock(&sbinfo->shrinklist_lock); in shmem_unused_huge_shrink()
536 list_for_each_safe(pos, next, &sbinfo->shrinklist) { in shmem_unused_huge_shrink()
540 inode = igrab(&info->vfs_inode); in shmem_unused_huge_shrink()
544 list_del_init(&info->shrinklist); in shmem_unused_huge_shrink()
549 if (round_up(inode->i_size, PAGE_SIZE) == in shmem_unused_huge_shrink()
550 round_up(inode->i_size, HPAGE_PMD_SIZE)) { in shmem_unused_huge_shrink()
551 list_move(&info->shrinklist, &to_remove); in shmem_unused_huge_shrink()
555 list_move(&info->shrinklist, &list); in shmem_unused_huge_shrink()
557 sbinfo->shrinklist_len--; in shmem_unused_huge_shrink()
558 if (!--batch) in shmem_unused_huge_shrink()
561 spin_unlock(&sbinfo->shrinklist_lock); in shmem_unused_huge_shrink()
565 inode = &info->vfs_inode; in shmem_unused_huge_shrink()
566 list_del_init(&info->shrinklist); in shmem_unused_huge_shrink()
574 inode = &info->vfs_inode; in shmem_unused_huge_shrink()
579 page = find_get_page(inode->i_mapping, in shmem_unused_huge_shrink()
580 (inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT); in shmem_unused_huge_shrink()
612 list_del_init(&info->shrinklist); in shmem_unused_huge_shrink()
621 spin_lock(&sbinfo->shrinklist_lock); in shmem_unused_huge_shrink()
622 list_move(&info->shrinklist, &sbinfo->shrinklist); in shmem_unused_huge_shrink()
623 sbinfo->shrinklist_len++; in shmem_unused_huge_shrink()
624 spin_unlock(&sbinfo->shrinklist_lock); in shmem_unused_huge_shrink()
637 if (!READ_ONCE(sbinfo->shrinklist_len)) in shmem_unused_huge_scan()
647 return READ_ONCE(sbinfo->shrinklist_len); in shmem_unused_huge_count()
663 (shmem_huge == SHMEM_HUGE_FORCE || sbinfo->huge) && in is_huge_enabled()
677 XA_STATE_ORDER(xas, &mapping->i_pages, index, compound_order(page)); in shmem_add_to_page_cache()
689 page->mapping = mapping; in shmem_add_to_page_cache()
690 page->index = index; in shmem_add_to_page_cache()
709 xas_set_err(&xas, -EEXIST); in shmem_add_to_page_cache()
723 mapping->nrpages += nr; in shmem_add_to_page_cache()
737 page->mapping = NULL; in shmem_add_to_page_cache()
747 struct address_space *mapping = page->mapping; in shmem_delete_from_page_cache()
752 xa_lock_irq(&mapping->i_pages); in shmem_delete_from_page_cache()
753 error = shmem_replace_entry(mapping, page->index, page, radswap); in shmem_delete_from_page_cache()
754 page->mapping = NULL; in shmem_delete_from_page_cache()
755 mapping->nrpages--; in shmem_delete_from_page_cache()
758 xa_unlock_irq(&mapping->i_pages); in shmem_delete_from_page_cache()
771 old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0); in shmem_free_swap()
773 return -ENOENT; in shmem_free_swap()
780 * given offsets are swapped out.
783 * as long as the inode doesn't go away and racy results are not a problem.
788 XA_STATE(xas, &mapping->i_pages, start); in shmem_partial_swap_usage()
790 unsigned long swapped = 0; in shmem_partial_swap_usage() local
793 xas_for_each(&xas, page, end - 1) { in shmem_partial_swap_usage()
797 swapped++; in shmem_partial_swap_usage()
807 return swapped << PAGE_SHIFT; in shmem_partial_swap_usage()
812 * given vma is swapped out.
815 * as long as the inode doesn't go away and racy results are not a problem.
819 struct inode *inode = file_inode(vma->vm_file); in shmem_swap_usage()
821 struct address_space *mapping = inode->i_mapping; in shmem_swap_usage()
822 unsigned long swapped; in shmem_swap_usage() local
824 /* Be careful as we don't hold info->lock */ in shmem_swap_usage()
825 swapped = READ_ONCE(info->swapped); in shmem_swap_usage()
832 if (!swapped) in shmem_swap_usage()
835 if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size) in shmem_swap_usage()
836 return swapped << PAGE_SHIFT; in shmem_swap_usage()
840 linear_page_index(vma, vma->vm_start), in shmem_swap_usage()
841 linear_page_index(vma, vma->vm_end)); in shmem_swap_usage()
866 index = indices[pvec.nr - 1] + 1; in shmem_unlock_mapping()
875 * Check whether a hole-punch or truncation needs to split a huge page,
893 page->index >= start && page->index + HPAGE_PMD_NR <= end) in shmem_punch_compound()
907 struct address_space *mapping = inode->i_mapping; in shmem_undo_range()
909 pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT; in shmem_undo_range()
911 unsigned int partial_start = lstart & (PAGE_SIZE - 1); in shmem_undo_range()
912 unsigned int partial_end = (lend + 1) & (PAGE_SIZE - 1); in shmem_undo_range()
919 if (lend == -1) in shmem_undo_range()
920 end = -1; /* unsigned, so actually very big */ in shmem_undo_range()
926 min(end - index, (pgoff_t)PAGEVEC_SIZE), in shmem_undo_range()
966 shmem_getpage(inode, start - 1, &page, SGP_READ); in shmem_undo_range()
997 min(end - index, (pgoff_t)PAGEVEC_SIZE), in shmem_undo_range()
1000 /* If all gone or hole-punch or unfalloc, we're done */ in shmem_undo_range()
1001 if (index == start || end != -1) in shmem_undo_range()
1019 index--; in shmem_undo_range()
1032 index--; in shmem_undo_range()
1055 spin_lock_irq(&info->lock); in shmem_undo_range()
1056 info->swapped -= nr_swaps_freed; in shmem_undo_range()
1058 spin_unlock_irq(&info->lock); in shmem_undo_range()
1064 inode->i_ctime = inode->i_mtime = current_time(inode); in shmem_truncate_range()
1071 struct inode *inode = path->dentry->d_inode; in shmem_getattr()
1073 struct shmem_sb_info *sb_info = SHMEM_SB(inode->i_sb); in shmem_getattr()
1075 if (info->alloced - info->swapped != inode->i_mapping->nrpages) { in shmem_getattr()
1076 spin_lock_irq(&info->lock); in shmem_getattr()
1078 spin_unlock_irq(&info->lock); in shmem_getattr()
1083 stat->blksize = HPAGE_PMD_SIZE; in shmem_getattr()
1092 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); in shmem_setattr()
1099 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { in shmem_setattr()
1100 loff_t oldsize = inode->i_size; in shmem_setattr()
1101 loff_t newsize = attr->ia_size; in shmem_setattr()
1104 if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) || in shmem_setattr()
1105 (newsize > oldsize && (info->seals & F_SEAL_GROW))) in shmem_setattr()
1106 return -EPERM; in shmem_setattr()
1109 error = shmem_reacct_size(SHMEM_I(inode)->flags, in shmem_setattr()
1114 inode->i_ctime = inode->i_mtime = current_time(inode); in shmem_setattr()
1119 unmap_mapping_range(inode->i_mapping, in shmem_setattr()
1121 if (info->alloced) in shmem_setattr()
1123 newsize, (loff_t)-1); in shmem_setattr()
1126 unmap_mapping_range(inode->i_mapping, in shmem_setattr()
1134 spin_lock(&sbinfo->shrinklist_lock); in shmem_setattr()
1137 * ->shrink_list in shmem_unused_huge_shrink() in shmem_setattr()
1139 if (list_empty_careful(&info->shrinklist)) { in shmem_setattr()
1140 list_add_tail(&info->shrinklist, in shmem_setattr()
1141 &sbinfo->shrinklist); in shmem_setattr()
1142 sbinfo->shrinklist_len++; in shmem_setattr()
1144 spin_unlock(&sbinfo->shrinklist_lock); in shmem_setattr()
1150 if (attr->ia_valid & ATTR_MODE) in shmem_setattr()
1151 error = posix_acl_chmod(inode, inode->i_mode); in shmem_setattr()
1158 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); in shmem_evict_inode()
1160 if (inode->i_mapping->a_ops == &shmem_aops) { in shmem_evict_inode()
1161 shmem_unacct_size(info->flags, inode->i_size); in shmem_evict_inode()
1162 inode->i_size = 0; in shmem_evict_inode()
1163 shmem_truncate_range(inode, 0, (loff_t)-1); in shmem_evict_inode()
1164 if (!list_empty(&info->shrinklist)) { in shmem_evict_inode()
1165 spin_lock(&sbinfo->shrinklist_lock); in shmem_evict_inode()
1166 if (!list_empty(&info->shrinklist)) { in shmem_evict_inode()
1167 list_del_init(&info->shrinklist); in shmem_evict_inode()
1168 sbinfo->shrinklist_len--; in shmem_evict_inode()
1170 spin_unlock(&sbinfo->shrinklist_lock); in shmem_evict_inode()
1172 while (!list_empty(&info->swaplist)) { in shmem_evict_inode()
1174 wait_var_event(&info->stop_eviction, in shmem_evict_inode()
1175 !atomic_read(&info->stop_eviction)); in shmem_evict_inode()
1178 if (!atomic_read(&info->stop_eviction)) in shmem_evict_inode()
1179 list_del_init(&info->swaplist); in shmem_evict_inode()
1184 simple_xattrs_free(&info->xattrs); in shmem_evict_inode()
1185 WARN_ON(inode->i_blocks); in shmem_evict_inode()
1186 shmem_free_inode(inode->i_sb); in shmem_evict_inode()
1197 XA_STATE(xas, &mapping->i_pages, start); in shmem_find_swap_entries()
1236 * Move the swapped pages for an inode to page cache. Returns the count
1237 * of pages swapped in, or the error in case of failure.
1245 struct address_space *mapping = inode->i_mapping; in shmem_unuse_swap_entries()
1261 if (error == -ENOMEM) in shmem_unuse_swap_entries()
1274 struct address_space *mapping = inode->i_mapping; in shmem_unuse_inode()
1301 *fs_pages_to_unuse -= ret; in shmem_unuse_inode()
1308 start = indices[pvec.nr - 1]; in shmem_unuse_inode()
1330 if (!info->swapped) { in shmem_unuse()
1331 list_del_init(&info->swaplist); in shmem_unuse()
1336 * but before doing so, make sure shmem_evict_inode() will not in shmem_unuse()
1338 * (igrab() would protect from unlink, but not from unmount). in shmem_unuse()
1340 atomic_inc(&info->stop_eviction); in shmem_unuse()
1343 error = shmem_unuse_inode(&info->vfs_inode, type, frontswap, in shmem_unuse()
1349 if (!info->swapped) in shmem_unuse()
1350 list_del_init(&info->swaplist); in shmem_unuse()
1351 if (atomic_dec_and_test(&info->stop_eviction)) in shmem_unuse()
1352 wake_up_var(&info->stop_eviction); in shmem_unuse()
1374 mapping = page->mapping; in shmem_writepage()
1375 index = page->index; in shmem_writepage()
1376 inode = mapping->host; in shmem_writepage()
1378 if (info->flags & VM_LOCKED) in shmem_writepage()
1385 * shmem_writepage; but a stacking filesystem might use ->writepage of in shmem_writepage()
1387 * swap only in response to memory pressure, and not for the writeback in shmem_writepage()
1390 if (!wbc->for_reclaim) { in shmem_writepage()
1401 * not yet completed the fallocation, then (a) we want to keep track in shmem_writepage()
1402 * of this page in case we have to undo it, and (b) it may not be a in shmem_writepage()
1407 if (inode->i_private) { in shmem_writepage()
1409 spin_lock(&inode->i_lock); in shmem_writepage()
1410 shmem_falloc = inode->i_private; in shmem_writepage()
1412 !shmem_falloc->waitq && in shmem_writepage()
1413 index >= shmem_falloc->start && in shmem_writepage()
1414 index < shmem_falloc->next) in shmem_writepage()
1415 shmem_falloc->nr_unswapped++; in shmem_writepage()
1418 spin_unlock(&inode->i_lock); in shmem_writepage()
1432 * Add inode to shmem_unuse()'s list of swapped-out inodes, in shmem_writepage()
1433 * if it's not already there. Do it now before the page is in shmem_writepage()
1436 * we've incremented swapped, because shmem_unuse_inode() will in shmem_writepage()
1437 * prune a !swapped inode from the swaplist under this mutex. in shmem_writepage()
1440 if (list_empty(&info->swaplist)) in shmem_writepage()
1441 list_add(&info->swaplist, &shmem_swaplist); in shmem_writepage()
1446 spin_lock_irq(&info->lock); in shmem_writepage()
1448 info->swapped++; in shmem_writepage()
1449 spin_unlock_irq(&info->lock); in shmem_writepage()
1464 if (wbc->for_reclaim) in shmem_writepage()
1475 if (!mpol || mpol->mode == MPOL_DEFAULT) in shmem_show_mpol()
1486 if (sbinfo->mpol) { in shmem_get_sbmpol()
1487 spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */ in shmem_get_sbmpol()
1488 mpol = sbinfo->mpol; in shmem_get_sbmpol()
1490 spin_unlock(&sbinfo->stat_lock); in shmem_get_sbmpol()
1513 vma->vm_pgoff = index + info->vfs_inode.i_ino; in shmem_pseudo_vma_init()
1514 vma->vm_policy = mpol_shared_policy_lookup(&info->policy, index); in shmem_pseudo_vma_init()
1520 mpol_cond_put(vma->vm_policy); in shmem_pseudo_vma_destroy()
1543 struct address_space *mapping = info->vfs_inode.i_mapping; in shmem_alloc_hugepage()
1548 if (xa_find(&mapping->i_pages, &hindex, hindex + HPAGE_PMD_NR - 1, in shmem_alloc_hugepage()
1583 int err = -ENOSPC; in shmem_alloc_and_acct_page()
1602 err = -ENOMEM; in shmem_alloc_and_acct_page()
1646 return -ENOMEM; in shmem_replace_page()
1662 xa_lock_irq(&swap_mapping->i_pages); in shmem_replace_page()
1669 xa_unlock_irq(&swap_mapping->i_pages); in shmem_replace_page()
1673 * Is this possible? I think not, now that our callers check in shmem_replace_page()
1694 * Caller has to make sure that *pagep contains a valid swapped page.
1703 struct address_space *mapping = inode->i_mapping; in shmem_swapin_page()
1705 struct mm_struct *charge_mm = vma ? vma->vm_mm : current->mm; in shmem_swapin_page()
1726 error = -ENOMEM; in shmem_swapin_page()
1735 error = -EEXIST; in shmem_swapin_page()
1739 error = -EIO; in shmem_swapin_page()
1762 spin_lock_irq(&info->lock); in shmem_swapin_page()
1763 info->swapped--; in shmem_swapin_page()
1765 spin_unlock_irq(&info->lock); in shmem_swapin_page()
1778 error = -EEXIST; in shmem_swapin_page()
1789 * shmem_getpage_gfp - find page in cache, or get from swap, or allocate
1791 * If we allocate a new one we do not mark it dirty. That's up to the
1803 struct address_space *mapping = inode->i_mapping; in shmem_getpage_gfp()
1815 return -EFBIG; in shmem_getpage_gfp()
1821 return -EINVAL; in shmem_getpage_gfp()
1824 sbinfo = SHMEM_SB(inode->i_sb); in shmem_getpage_gfp()
1825 charge_mm = vma ? vma->vm_mm : current->mm; in shmem_getpage_gfp()
1831 if (error == -EEXIST) in shmem_getpage_gfp()
1839 hindex = page->index; in shmem_getpage_gfp()
1856 * Fast cache lookup did not find it: in shmem_getpage_gfp()
1866 if (mapping->a_ops != &shmem_aops) in shmem_getpage_gfp()
1872 switch (sbinfo->huge) { in shmem_getpage_gfp()
1906 if (error != -ENOSPC) in shmem_getpage_gfp()
1912 while (retry--) { in shmem_getpage_gfp()
1939 spin_lock_irq(&info->lock); in shmem_getpage_gfp()
1940 info->alloced += compound_nr(page); in shmem_getpage_gfp()
1941 inode->i_blocks += BLOCKS_PER_PAGE << compound_order(page); in shmem_getpage_gfp()
1943 spin_unlock_irq(&info->lock); in shmem_getpage_gfp()
1948 hindex + HPAGE_PMD_NR - 1) { in shmem_getpage_gfp()
1953 spin_lock(&sbinfo->shrinklist_lock); in shmem_getpage_gfp()
1956 * ->shrink_list in shmem_unused_huge_shrink() in shmem_getpage_gfp()
1958 if (list_empty_careful(&info->shrinklist)) { in shmem_getpage_gfp()
1959 list_add_tail(&info->shrinklist, in shmem_getpage_gfp()
1960 &sbinfo->shrinklist); in shmem_getpage_gfp()
1961 sbinfo->shrinklist_len++; in shmem_getpage_gfp()
1963 spin_unlock(&sbinfo->shrinklist_lock); in shmem_getpage_gfp()
1973 * Let SGP_WRITE caller clear ends if write does not fill page; in shmem_getpage_gfp()
1993 spin_lock_irq(&info->lock); in shmem_getpage_gfp()
1995 spin_unlock_irq(&info->lock); in shmem_getpage_gfp()
1997 error = -EINVAL; in shmem_getpage_gfp()
2001 *pagep = page + index - hindex; in shmem_getpage_gfp()
2020 if (error == -ENOSPC && !once++) { in shmem_getpage_gfp()
2021 spin_lock_irq(&info->lock); in shmem_getpage_gfp()
2023 spin_unlock_irq(&info->lock); in shmem_getpage_gfp()
2026 if (error == -EEXIST) in shmem_getpage_gfp()
2033 * entry unconditionally - even if something else had already woken the
2039 list_del_init(&wait->entry); in synchronous_wake_function()
2045 struct vm_area_struct *vma = vmf->vma; in shmem_fault()
2046 struct inode *inode = file_inode(vma->vm_file); in shmem_fault()
2047 gfp_t gfp = mapping_gfp_mask(inode->i_mapping); in shmem_fault()
2054 * prevent the hole-punch from ever completing: which in turn in shmem_fault()
2061 * It does not matter if we sometimes reach this check just before the in shmem_fault()
2062 * hole-punch begins, so that one fault then races with the punch: in shmem_fault()
2069 if (unlikely(inode->i_private)) { in shmem_fault()
2072 spin_lock(&inode->i_lock); in shmem_fault()
2073 shmem_falloc = inode->i_private; in shmem_fault()
2075 shmem_falloc->waitq && in shmem_fault()
2076 vmf->pgoff >= shmem_falloc->start && in shmem_fault()
2077 vmf->pgoff < shmem_falloc->next) { in shmem_fault()
2087 shmem_falloc_waitq = shmem_falloc->waitq; in shmem_fault()
2090 spin_unlock(&inode->i_lock); in shmem_fault()
2095 * stack of the hole-punching task: shmem_falloc_waitq in shmem_fault()
2097 * finish_wait() does not dereference it in that case; in shmem_fault()
2100 spin_lock(&inode->i_lock); in shmem_fault()
2102 spin_unlock(&inode->i_lock); in shmem_fault()
2108 spin_unlock(&inode->i_lock); in shmem_fault()
2113 if ((vma->vm_flags & VM_NOHUGEPAGE) || in shmem_fault()
2114 test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) in shmem_fault()
2116 else if (vma->vm_flags & VM_HUGEPAGE) in shmem_fault()
2119 err = shmem_getpage_gfp(inode, vmf->pgoff, &vmf->page, sgp, in shmem_fault()
2139 return -ENOMEM; in shmem_get_unmapped_area()
2141 get_area = current->mm->get_unmapped_area; in shmem_get_unmapped_area()
2150 if (addr > TASK_SIZE - len) in shmem_get_unmapped_area()
2172 VM_BUG_ON(file->f_op != &shmem_file_operations); in shmem_get_unmapped_area()
2173 sb = file_inode(file)->i_sb; in shmem_get_unmapped_area()
2181 sb = shm_mnt->mnt_sb; in shmem_get_unmapped_area()
2183 if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER) in shmem_get_unmapped_area()
2187 offset = (pgoff << PAGE_SHIFT) & (HPAGE_PMD_SIZE-1); in shmem_get_unmapped_area()
2190 if ((addr & (HPAGE_PMD_SIZE-1)) == offset) in shmem_get_unmapped_area()
2193 inflated_len = len + HPAGE_PMD_SIZE - PAGE_SIZE; in shmem_get_unmapped_area()
2205 inflated_offset = inflated_addr & (HPAGE_PMD_SIZE-1); in shmem_get_unmapped_area()
2206 inflated_addr += offset - inflated_offset; in shmem_get_unmapped_area()
2210 if (inflated_addr > TASK_SIZE - len) in shmem_get_unmapped_area()
2218 struct inode *inode = file_inode(vma->vm_file); in shmem_set_policy()
2219 return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol); in shmem_set_policy()
2225 struct inode *inode = file_inode(vma->vm_file); in shmem_get_policy()
2228 index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; in shmem_get_policy()
2229 return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index); in shmem_get_policy()
2237 int retval = -ENOMEM; in shmem_lock()
2240 * What serializes the accesses to info->flags? in shmem_lock()
2244 if (lock && !(info->flags & VM_LOCKED)) { in shmem_lock()
2245 if (!user_shm_lock(inode->i_size, user)) in shmem_lock()
2247 info->flags |= VM_LOCKED; in shmem_lock()
2248 mapping_set_unevictable(file->f_mapping); in shmem_lock()
2250 if (!lock && (info->flags & VM_LOCKED) && user) { in shmem_lock()
2251 user_shm_unlock(inode->i_size, user); in shmem_lock()
2252 info->flags &= ~VM_LOCKED; in shmem_lock()
2253 mapping_clear_unevictable(file->f_mapping); in shmem_lock()
2266 ret = seal_check_future_write(info->seals, vma); in shmem_mmap()
2270 /* arm64 - allow memory tagging on RAM-based files */ in shmem_mmap()
2271 vma->vm_flags |= VM_MTE_ALLOWED; in shmem_mmap()
2274 vma->vm_ops = &shmem_vm_ops; in shmem_mmap()
2276 ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) < in shmem_mmap()
2277 (vma->vm_end & HPAGE_PMD_MASK)) { in shmem_mmap()
2278 khugepaged_enter(vma, vma->vm_flags); in shmem_mmap()
2296 inode->i_ino = ino; in shmem_get_inode()
2298 inode->i_blocks = 0; in shmem_get_inode()
2299 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); in shmem_get_inode()
2300 inode->i_generation = prandom_u32(); in shmem_get_inode()
2302 memset(info, 0, (char *)inode - (char *)info); in shmem_get_inode()
2303 spin_lock_init(&info->lock); in shmem_get_inode()
2304 atomic_set(&info->stop_eviction, 0); in shmem_get_inode()
2305 info->seals = F_SEAL_SEAL; in shmem_get_inode()
2306 info->flags = flags & VM_NORESERVE; in shmem_get_inode()
2307 INIT_LIST_HEAD(&info->shrinklist); in shmem_get_inode()
2308 INIT_LIST_HEAD(&info->swaplist); in shmem_get_inode()
2309 simple_xattrs_init(&info->xattrs); in shmem_get_inode()
2314 inode->i_op = &shmem_special_inode_operations; in shmem_get_inode()
2318 inode->i_mapping->a_ops = &shmem_aops; in shmem_get_inode()
2319 inode->i_op = &shmem_inode_operations; in shmem_get_inode()
2320 inode->i_fop = &shmem_file_operations; in shmem_get_inode()
2321 mpol_shared_policy_init(&info->policy, in shmem_get_inode()
2327 inode->i_size = 2 * BOGO_DIRENT_SIZE; in shmem_get_inode()
2328 inode->i_op = &shmem_dir_inode_operations; in shmem_get_inode()
2329 inode->i_fop = &simple_dir_operations; in shmem_get_inode()
2333 * Must not load anything in the rbtree, in shmem_get_inode()
2334 * mpol_free_shared_policy will not be called. in shmem_get_inode()
2336 mpol_shared_policy_init(&info->policy, NULL); in shmem_get_inode()
2348 return mapping->a_ops == &shmem_aops; in shmem_mapping()
2359 struct inode *inode = file_inode(dst_vma->vm_file); in shmem_mfill_atomic_pte()
2361 struct address_space *mapping = inode->i_mapping; in shmem_mfill_atomic_pte()
2371 ret = -ENOMEM; in shmem_mfill_atomic_pte()
2374 * We may have got a page, returned -ENOENT triggering a retry, in shmem_mfill_atomic_pte()
2375 * and now we find ourselves with -ENOMEM. Release the page, to in shmem_mfill_atomic_pte()
2402 return -ENOENT; in shmem_mfill_atomic_pte()
2417 ret = -EFAULT; in shmem_mfill_atomic_pte()
2428 _dst_pte = mk_pte(page, dst_vma->vm_page_prot); in shmem_mfill_atomic_pte()
2429 if (dst_vma->vm_flags & VM_WRITE) in shmem_mfill_atomic_pte()
2437 * only if VM_WRITE is not set is faster. in shmem_mfill_atomic_pte()
2444 ret = -EFAULT; in shmem_mfill_atomic_pte()
2449 ret = -EEXIST; in shmem_mfill_atomic_pte()
2455 spin_lock_irq(&info->lock); in shmem_mfill_atomic_pte()
2456 info->alloced++; in shmem_mfill_atomic_pte()
2457 inode->i_blocks += BLOCKS_PER_PAGE; in shmem_mfill_atomic_pte()
2459 spin_unlock_irq(&info->lock); in shmem_mfill_atomic_pte()
2465 /* No need to invalidate - it was non-present before */ in shmem_mfill_atomic_pte()
2521 struct inode *inode = mapping->host; in shmem_write_begin()
2526 if (unlikely(info->seals & (F_SEAL_GROW | in shmem_write_begin()
2528 if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) in shmem_write_begin()
2529 return -EPERM; in shmem_write_begin()
2530 if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size) in shmem_write_begin()
2531 return -EPERM; in shmem_write_begin()
2542 struct inode *inode = mapping->host; in shmem_write_end()
2544 if (pos + copied > inode->i_size) in shmem_write_end()
2560 unsigned from = pos & (PAGE_SIZE - 1); in shmem_write_end()
2575 struct file *file = iocb->ki_filp; in shmem_file_read_iter()
2577 struct address_space *mapping = inode->i_mapping; in shmem_file_read_iter()
2583 loff_t *ppos = &iocb->ki_pos; in shmem_file_read_iter()
2613 if (error == -EINVAL) in shmem_file_read_iter()
2638 nr -= offset; in shmem_file_read_iter()
2659 * Ok, we have the page, and it's up-to-date, so in shmem_file_read_iter()
2672 error = -EFAULT; in shmem_file_read_iter()
2735 struct address_space *mapping = file->f_mapping; in shmem_file_llseek()
2736 struct inode *inode = mapping->host; in shmem_file_llseek()
2746 if (offset < 0 || offset >= inode->i_size) in shmem_file_llseek()
2747 offset = -ENXIO; in shmem_file_llseek()
2750 end = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT; in shmem_file_llseek()
2754 if (new_offset < inode->i_size) in shmem_file_llseek()
2757 offset = -ENXIO; in shmem_file_llseek()
2759 offset = inode->i_size; in shmem_file_llseek()
2773 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); in shmem_fallocate()
2780 return -EOPNOTSUPP; in shmem_fallocate()
2785 struct address_space *mapping = file->f_mapping; in shmem_fallocate()
2787 loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1; in shmem_fallocate()
2791 if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) { in shmem_fallocate()
2792 error = -EPERM; in shmem_fallocate()
2799 spin_lock(&inode->i_lock); in shmem_fallocate()
2800 inode->i_private = &shmem_falloc; in shmem_fallocate()
2801 spin_unlock(&inode->i_lock); in shmem_fallocate()
2805 1 + unmap_end - unmap_start, 0); in shmem_fallocate()
2806 shmem_truncate_range(inode, offset, offset + len - 1); in shmem_fallocate()
2807 /* No need to unmap again: hole-punching leaves COWed pages */ in shmem_fallocate()
2809 spin_lock(&inode->i_lock); in shmem_fallocate()
2810 inode->i_private = NULL; in shmem_fallocate()
2813 spin_unlock(&inode->i_lock); in shmem_fallocate()
2823 if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) { in shmem_fallocate()
2824 error = -EPERM; in shmem_fallocate()
2829 end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT; in shmem_fallocate()
2831 if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) { in shmem_fallocate()
2832 error = -ENOSPC; in shmem_fallocate()
2841 spin_lock(&inode->i_lock); in shmem_fallocate()
2842 inode->i_private = &shmem_falloc; in shmem_fallocate()
2843 spin_unlock(&inode->i_lock); in shmem_fallocate()
2853 error = -EINTR; in shmem_fallocate()
2855 error = -ENOMEM; in shmem_fallocate()
2863 ((loff_t)index << PAGE_SHIFT) - 1, true); in shmem_fallocate()
2889 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) in shmem_fallocate()
2891 inode->i_ctime = current_time(inode); in shmem_fallocate()
2893 spin_lock(&inode->i_lock); in shmem_fallocate()
2894 inode->i_private = NULL; in shmem_fallocate()
2895 spin_unlock(&inode->i_lock); in shmem_fallocate()
2903 struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb); in shmem_statfs()
2905 buf->f_type = TMPFS_MAGIC; in shmem_statfs()
2906 buf->f_bsize = PAGE_SIZE; in shmem_statfs()
2907 buf->f_namelen = NAME_MAX; in shmem_statfs()
2908 if (sbinfo->max_blocks) { in shmem_statfs()
2909 buf->f_blocks = sbinfo->max_blocks; in shmem_statfs()
2910 buf->f_bavail = in shmem_statfs()
2911 buf->f_bfree = sbinfo->max_blocks - in shmem_statfs()
2912 percpu_counter_sum(&sbinfo->used_blocks); in shmem_statfs()
2914 if (sbinfo->max_inodes) { in shmem_statfs()
2915 buf->f_files = sbinfo->max_inodes; in shmem_statfs()
2916 buf->f_ffree = sbinfo->free_inodes; in shmem_statfs()
2929 int error = -ENOSPC; in shmem_mknod()
2931 inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE); in shmem_mknod()
2937 &dentry->d_name, in shmem_mknod()
2939 if (error && error != -EOPNOTSUPP) in shmem_mknod()
2943 dir->i_size += BOGO_DIRENT_SIZE; in shmem_mknod()
2944 dir->i_ctime = dir->i_mtime = current_time(dir); in shmem_mknod()
2946 dget(dentry); /* Extra count - pin the dentry in core */ in shmem_mknod()
2958 int error = -ENOSPC; in shmem_tmpfile()
2960 inode = shmem_get_inode(dir->i_sb, dir, mode, 0, VM_NORESERVE); in shmem_tmpfile()
2965 if (error && error != -EOPNOTSUPP) in shmem_tmpfile()
3009 if (inode->i_nlink) { in shmem_link()
3010 ret = shmem_reserve_inode(inode->i_sb, NULL); in shmem_link()
3015 dir->i_size += BOGO_DIRENT_SIZE; in shmem_link()
3016 inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode); in shmem_link()
3029 if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) in shmem_unlink()
3030 shmem_free_inode(inode->i_sb); in shmem_unlink()
3032 dir->i_size -= BOGO_DIRENT_SIZE; in shmem_unlink()
3033 inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode); in shmem_unlink()
3035 dput(dentry); /* Undo the count from "create" - this does all the work */ in shmem_unlink()
3042 return -ENOTEMPTY; in shmem_rmdir()
3063 old_dir->i_ctime = old_dir->i_mtime = in shmem_exchange()
3064 new_dir->i_ctime = new_dir->i_mtime = in shmem_exchange()
3065 d_inode(old_dentry)->i_ctime = in shmem_exchange()
3066 d_inode(new_dentry)->i_ctime = current_time(old_dir); in shmem_exchange()
3076 whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name); in shmem_whiteout()
3078 return -ENOMEM; in shmem_whiteout()
3091 * not sure which one, but that isn't even important. in shmem_whiteout()
3106 int they_are_dirs = S_ISDIR(inode->i_mode); in shmem_rename2()
3109 return -EINVAL; in shmem_rename2()
3115 return -ENOTEMPTY; in shmem_rename2()
3136 old_dir->i_size -= BOGO_DIRENT_SIZE; in shmem_rename2()
3137 new_dir->i_size += BOGO_DIRENT_SIZE; in shmem_rename2()
3138 old_dir->i_ctime = old_dir->i_mtime = in shmem_rename2()
3139 new_dir->i_ctime = new_dir->i_mtime = in shmem_rename2()
3140 inode->i_ctime = current_time(old_dir); in shmem_rename2()
3153 return -ENAMETOOLONG; in shmem_symlink()
3155 inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK | 0777, 0, in shmem_symlink()
3158 return -ENOSPC; in shmem_symlink()
3160 error = security_inode_init_security(inode, dir, &dentry->d_name, in shmem_symlink()
3162 if (error && error != -EOPNOTSUPP) { in shmem_symlink()
3167 inode->i_size = len-1; in shmem_symlink()
3169 inode->i_link = kmemdup(symname, len, GFP_KERNEL); in shmem_symlink()
3170 if (!inode->i_link) { in shmem_symlink()
3172 return -ENOMEM; in shmem_symlink()
3174 inode->i_op = &shmem_short_symlink_operations; in shmem_symlink()
3182 inode->i_mapping->a_ops = &shmem_aops; in shmem_symlink()
3183 inode->i_op = &shmem_symlink_inode_operations; in shmem_symlink()
3190 dir->i_size += BOGO_DIRENT_SIZE; in shmem_symlink()
3191 dir->i_ctime = dir->i_mtime = current_time(dir); in shmem_symlink()
3210 page = find_get_page(inode->i_mapping, 0); in shmem_get_link()
3212 return ERR_PTR(-ECHILD); in shmem_get_link()
3215 return ERR_PTR(-ECHILD); in shmem_get_link()
3247 for (xattr = xattr_array; xattr->name != NULL; xattr++) { in shmem_initxattrs()
3248 new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len); in shmem_initxattrs()
3250 return -ENOMEM; in shmem_initxattrs()
3252 len = strlen(xattr->name) + 1; in shmem_initxattrs()
3253 new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len, in shmem_initxattrs()
3255 if (!new_xattr->name) { in shmem_initxattrs()
3257 return -ENOMEM; in shmem_initxattrs()
3260 memcpy(new_xattr->name, XATTR_SECURITY_PREFIX, in shmem_initxattrs()
3262 memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN, in shmem_initxattrs()
3263 xattr->name, len); in shmem_initxattrs()
3265 simple_xattr_list_add(&info->xattrs, new_xattr); in shmem_initxattrs()
3278 return simple_xattr_get(&info->xattrs, name, buffer, size); in shmem_xattr_handler_get()
3289 return simple_xattr_set(&info->xattrs, name, value, size, flags, NULL); in shmem_xattr_handler_set()
3317 return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size); in shmem_listxattr()
3337 return ERR_PTR(-ESTALE); in shmem_get_parent()
3345 return ino->i_ino == inum && fh[0] == ino->i_generation; in shmem_match()
3367 inum = fid->raw[2]; in shmem_fh_to_dentry()
3368 inum = (inum << 32) | fid->raw[1]; in shmem_fh_to_dentry()
3370 inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]), in shmem_fh_to_dentry()
3371 shmem_match, fid->raw); in shmem_fh_to_dentry()
3389 /* Unfortunately insert_inode_hash is not idempotent, in shmem_encode_fh()
3398 inode->i_ino + inode->i_generation); in shmem_encode_fh()
3402 fh[0] = inode->i_generation; in shmem_encode_fh()
3403 fh[1] = inode->i_ino; in shmem_encode_fh()
3404 fh[2] = ((__u64)inode->i_ino) >> 32; in shmem_encode_fh()
3453 struct shmem_options *ctx = fc->fs_private; in shmem_parse_one()
3467 size = memparse(param->string, &rest); in shmem_parse_one()
3476 ctx->blocks = DIV_ROUND_UP(size, PAGE_SIZE); in shmem_parse_one()
3477 ctx->seen |= SHMEM_SEEN_BLOCKS; in shmem_parse_one()
3480 ctx->blocks = memparse(param->string, &rest); in shmem_parse_one()
3483 ctx->seen |= SHMEM_SEEN_BLOCKS; in shmem_parse_one()
3486 ctx->inodes = memparse(param->string, &rest); in shmem_parse_one()
3489 ctx->seen |= SHMEM_SEEN_INODES; in shmem_parse_one()
3492 ctx->mode = result.uint_32 & 07777; in shmem_parse_one()
3503 if (!kuid_has_mapping(fc->user_ns, kuid)) in shmem_parse_one()
3506 ctx->uid = kuid; in shmem_parse_one()
3517 if (!kgid_has_mapping(fc->user_ns, kgid)) in shmem_parse_one()
3520 ctx->gid = kgid; in shmem_parse_one()
3523 ctx->huge = result.uint_32; in shmem_parse_one()
3524 if (ctx->huge != SHMEM_HUGE_NEVER && in shmem_parse_one()
3528 ctx->seen |= SHMEM_SEEN_HUGE; in shmem_parse_one()
3532 mpol_put(ctx->mpol); in shmem_parse_one()
3533 ctx->mpol = NULL; in shmem_parse_one()
3534 if (mpol_parse_str(param->string, &ctx->mpol)) in shmem_parse_one()
3540 ctx->full_inums = false; in shmem_parse_one()
3541 ctx->seen |= SHMEM_SEEN_INUMS; in shmem_parse_one()
3548 ctx->full_inums = true; in shmem_parse_one()
3549 ctx->seen |= SHMEM_SEEN_INUMS; in shmem_parse_one()
3555 return invalfc(fc, "Unsupported parameter '%s'", param->key); in shmem_parse_one()
3557 return invalfc(fc, "Bad value for '%s'", param->key); in shmem_parse_one()
3565 int err = security_sb_eat_lsm_opts(options, &fc->security); in shmem_parse_options()
3574 * NUL-terminate this option: unfortunately, in shmem_parse_options()
3575 * mount options form a comma-separated list, in shmem_parse_options()
3583 options[-1] = '\0'; in shmem_parse_options()
3607 * Note that we disallow change from limited->unlimited blocks/inodes while any
3608 * are in use; but we must separately disallow unlimited->limited, because in
3613 struct shmem_options *ctx = fc->fs_private; in shmem_reconfigure()
3614 struct shmem_sb_info *sbinfo = SHMEM_SB(fc->root->d_sb); in shmem_reconfigure()
3618 spin_lock(&sbinfo->stat_lock); in shmem_reconfigure()
3619 inodes = sbinfo->max_inodes - sbinfo->free_inodes; in shmem_reconfigure()
3620 if ((ctx->seen & SHMEM_SEEN_BLOCKS) && ctx->blocks) { in shmem_reconfigure()
3621 if (!sbinfo->max_blocks) { in shmem_reconfigure()
3625 if (percpu_counter_compare(&sbinfo->used_blocks, in shmem_reconfigure()
3626 ctx->blocks) > 0) { in shmem_reconfigure()
3631 if ((ctx->seen & SHMEM_SEEN_INODES) && ctx->inodes) { in shmem_reconfigure()
3632 if (!sbinfo->max_inodes) { in shmem_reconfigure()
3636 if (ctx->inodes < inodes) { in shmem_reconfigure()
3642 if ((ctx->seen & SHMEM_SEEN_INUMS) && !ctx->full_inums && in shmem_reconfigure()
3643 sbinfo->next_ino > UINT_MAX) { in shmem_reconfigure()
3644 err = "Current inum too high to switch to 32-bit inums"; in shmem_reconfigure()
3648 if (ctx->seen & SHMEM_SEEN_HUGE) in shmem_reconfigure()
3649 sbinfo->huge = ctx->huge; in shmem_reconfigure()
3650 if (ctx->seen & SHMEM_SEEN_INUMS) in shmem_reconfigure()
3651 sbinfo->full_inums = ctx->full_inums; in shmem_reconfigure()
3652 if (ctx->seen & SHMEM_SEEN_BLOCKS) in shmem_reconfigure()
3653 sbinfo->max_blocks = ctx->blocks; in shmem_reconfigure()
3654 if (ctx->seen & SHMEM_SEEN_INODES) { in shmem_reconfigure()
3655 sbinfo->max_inodes = ctx->inodes; in shmem_reconfigure()
3656 sbinfo->free_inodes = ctx->inodes - inodes; in shmem_reconfigure()
3662 if (ctx->mpol) { in shmem_reconfigure()
3663 mpol_put(sbinfo->mpol); in shmem_reconfigure()
3664 sbinfo->mpol = ctx->mpol; /* transfers initial ref */ in shmem_reconfigure()
3665 ctx->mpol = NULL; in shmem_reconfigure()
3667 spin_unlock(&sbinfo->stat_lock); in shmem_reconfigure()
3670 spin_unlock(&sbinfo->stat_lock); in shmem_reconfigure()
3676 struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb); in shmem_show_options()
3678 if (sbinfo->max_blocks != shmem_default_max_blocks()) in shmem_show_options()
3680 sbinfo->max_blocks << (PAGE_SHIFT - 10)); in shmem_show_options()
3681 if (sbinfo->max_inodes != shmem_default_max_inodes()) in shmem_show_options()
3682 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes); in shmem_show_options()
3683 if (sbinfo->mode != (0777 | S_ISVTX)) in shmem_show_options()
3684 seq_printf(seq, ",mode=%03ho", sbinfo->mode); in shmem_show_options()
3685 if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID)) in shmem_show_options()
3687 from_kuid_munged(&init_user_ns, sbinfo->uid)); in shmem_show_options()
3688 if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID)) in shmem_show_options()
3690 from_kgid_munged(&init_user_ns, sbinfo->gid)); in shmem_show_options()
3695 * /proc/config.gz to confirm 64-bit inums were successfully applied in shmem_show_options()
3696 * (which may not even exist if IKCONFIG_PROC isn't enabled). in shmem_show_options()
3698 * We hide it when inode64 isn't the default and we are using 32-bit in shmem_show_options()
3704 * +-----------------+-----------------+ in shmem_show_options()
3706 * +------------------+-----------------+-----------------+ in shmem_show_options()
3709 * +------------------+-----------------+-----------------+ in shmem_show_options()
3712 if (IS_ENABLED(CONFIG_TMPFS_INODE64) || sbinfo->full_inums) in shmem_show_options()
3713 seq_printf(seq, ",inode%d", (sbinfo->full_inums ? 64 : 32)); in shmem_show_options()
3716 if (sbinfo->huge) in shmem_show_options()
3717 seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge)); in shmem_show_options()
3719 shmem_show_mpol(seq, sbinfo->mpol); in shmem_show_options()
3729 free_percpu(sbinfo->ino_batch); in shmem_put_super()
3730 percpu_counter_destroy(&sbinfo->used_blocks); in shmem_put_super()
3731 mpol_put(sbinfo->mpol); in shmem_put_super()
3733 sb->s_fs_info = NULL; in shmem_put_super()
3738 struct shmem_options *ctx = fc->fs_private; in shmem_fill_super()
3741 int err = -ENOMEM; in shmem_fill_super()
3747 return -ENOMEM; in shmem_fill_super()
3749 sb->s_fs_info = sbinfo; in shmem_fill_super()
3757 if (!(sb->s_flags & SB_KERNMOUNT)) { in shmem_fill_super()
3758 if (!(ctx->seen & SHMEM_SEEN_BLOCKS)) in shmem_fill_super()
3759 ctx->blocks = shmem_default_max_blocks(); in shmem_fill_super()
3760 if (!(ctx->seen & SHMEM_SEEN_INODES)) in shmem_fill_super()
3761 ctx->inodes = shmem_default_max_inodes(); in shmem_fill_super()
3762 if (!(ctx->seen & SHMEM_SEEN_INUMS)) in shmem_fill_super()
3763 ctx->full_inums = IS_ENABLED(CONFIG_TMPFS_INODE64); in shmem_fill_super()
3765 sb->s_flags |= SB_NOUSER; in shmem_fill_super()
3767 sb->s_export_op = &shmem_export_ops; in shmem_fill_super()
3768 sb->s_flags |= SB_NOSEC; in shmem_fill_super()
3770 sb->s_flags |= SB_NOUSER; in shmem_fill_super()
3772 sbinfo->max_blocks = ctx->blocks; in shmem_fill_super()
3773 sbinfo->free_inodes = sbinfo->max_inodes = ctx->inodes; in shmem_fill_super()
3774 if (sb->s_flags & SB_KERNMOUNT) { in shmem_fill_super()
3775 sbinfo->ino_batch = alloc_percpu(ino_t); in shmem_fill_super()
3776 if (!sbinfo->ino_batch) in shmem_fill_super()
3779 sbinfo->uid = ctx->uid; in shmem_fill_super()
3780 sbinfo->gid = ctx->gid; in shmem_fill_super()
3781 sbinfo->full_inums = ctx->full_inums; in shmem_fill_super()
3782 sbinfo->mode = ctx->mode; in shmem_fill_super()
3783 sbinfo->huge = ctx->huge; in shmem_fill_super()
3784 sbinfo->mpol = ctx->mpol; in shmem_fill_super()
3785 ctx->mpol = NULL; in shmem_fill_super()
3787 spin_lock_init(&sbinfo->stat_lock); in shmem_fill_super()
3788 if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL)) in shmem_fill_super()
3790 spin_lock_init(&sbinfo->shrinklist_lock); in shmem_fill_super()
3791 INIT_LIST_HEAD(&sbinfo->shrinklist); in shmem_fill_super()
3793 sb->s_maxbytes = MAX_LFS_FILESIZE; in shmem_fill_super()
3794 sb->s_blocksize = PAGE_SIZE; in shmem_fill_super()
3795 sb->s_blocksize_bits = PAGE_SHIFT; in shmem_fill_super()
3796 sb->s_magic = TMPFS_MAGIC; in shmem_fill_super()
3797 sb->s_op = &shmem_ops; in shmem_fill_super()
3798 sb->s_time_gran = 1; in shmem_fill_super()
3800 sb->s_xattr = shmem_xattr_handlers; in shmem_fill_super()
3803 sb->s_flags |= SB_POSIXACL; in shmem_fill_super()
3805 uuid_gen(&sb->s_uuid); in shmem_fill_super()
3807 inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE); in shmem_fill_super()
3810 inode->i_uid = sbinfo->uid; in shmem_fill_super()
3811 inode->i_gid = sbinfo->gid; in shmem_fill_super()
3812 sb->s_root = d_make_root(inode); in shmem_fill_super()
3813 if (!sb->s_root) in shmem_fill_super()
3829 struct shmem_options *ctx = fc->fs_private; in shmem_free_fc()
3832 mpol_put(ctx->mpol); in shmem_free_fc()
3855 return &info->vfs_inode; in shmem_alloc_inode()
3860 if (S_ISLNK(inode->i_mode)) in shmem_free_in_core_inode()
3861 kfree(inode->i_link); in shmem_free_in_core_inode()
3867 if (S_ISREG(inode->i_mode)) in shmem_destroy_inode()
3868 mpol_free_shared_policy(&SHMEM_I(inode)->policy); in shmem_destroy_inode()
3874 inode_init_once(&info->vfs_inode); in shmem_init_inode()
3989 return -ENOMEM; in shmem_init_fs_context()
3991 ctx->mode = 0777 | S_ISVTX; in shmem_init_fs_context()
3992 ctx->uid = current_fsuid(); in shmem_init_fs_context()
3993 ctx->gid = current_fsgid(); in shmem_init_fs_context()
3995 fc->fs_private = ctx; in shmem_init_fs_context()
3996 fc->ops = &shmem_fs_context_ops; in shmem_init_fs_context()
4019 pr_err("Could not register tmpfs\n"); in shmem_init()
4026 pr_err("Could not kern_mount tmpfs\n"); in shmem_init()
4032 SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; in shmem_init()
4066 buf[count - 1] = '\n'; in shmem_enabled_show()
4077 return -EINVAL; in shmem_enabled_store()
4080 if (count && tmp[count - 1] == '\n') in shmem_enabled_store()
4081 tmp[count - 1] = '\0'; in shmem_enabled_store()
4084 if (huge == -EINVAL) in shmem_enabled_store()
4085 return -EINVAL; in shmem_enabled_store()
4088 return -EINVAL; in shmem_enabled_store()
4092 SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; in shmem_enabled_store()
4103 struct inode *inode = file_inode(vma->vm_file); in shmem_huge_enabled()
4104 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); in shmem_huge_enabled()
4108 if (!transhuge_vma_enabled(vma, vma->vm_flags)) in shmem_huge_enabled()
4114 switch (sbinfo->huge) { in shmem_huge_enabled()
4120 off = round_up(vma->vm_pgoff, HPAGE_PMD_NR); in shmem_huge_enabled()
4128 return (vma->vm_flags & VM_HUGEPAGE); in shmem_huge_enabled()
4139 * tiny-shmem: simple shmemfs and tmpfs using ramfs code
4142 * shmem code (swap-backed and resource-limited) are outweighed by
4185 return current->mm->get_unmapped_area(file, addr, len, pgoff, flags); in shmem_get_unmapped_area()
4191 truncate_inode_pages_range(inode->i_mapping, lstart, lend); in shmem_truncate_range()
4215 return ERR_PTR(-EINVAL); in __shmem_file_setup()
4218 return ERR_PTR(-ENOMEM); in __shmem_file_setup()
4220 inode = shmem_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0, in __shmem_file_setup()
4224 return ERR_PTR(-ENOSPC); in __shmem_file_setup()
4226 inode->i_flags |= i_flags; in __shmem_file_setup()
4227 inode->i_size = size; in __shmem_file_setup()
4239 * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be
4246 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4254 * shmem_file_setup - get an unlinked file living in tmpfs
4257 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4266 * shmem_file_setup_with_mnt - get an unlinked file living in tmpfs
4270 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4280 * shmem_zero_setup - setup a shared anonymous mapping
4286 loff_t size = vma->vm_end - vma->vm_start; in shmem_zero_setup()
4294 file = shmem_kernel_file_setup("dev/zero", size, vma->vm_flags); in shmem_zero_setup()
4298 if (vma->vm_file) in shmem_zero_setup()
4299 fput(vma->vm_file); in shmem_zero_setup()
4300 vma->vm_file = file; in shmem_zero_setup()
4301 vma->vm_ops = &shmem_vm_ops; in shmem_zero_setup()
4304 ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) < in shmem_zero_setup()
4305 (vma->vm_end & HPAGE_PMD_MASK)) { in shmem_zero_setup()
4306 khugepaged_enter(vma, vma->vm_flags); in shmem_zero_setup()
4313 * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.
4320 * But read_cache_page_gfp() uses the ->readpage() method: which does not
4331 struct inode *inode = mapping->host; in shmem_read_mapping_page_gfp()
4335 BUG_ON(mapping->a_ops != &shmem_aops); in shmem_read_mapping_page_gfp()