xref: /OK3568_Linux_fs/kernel/mm/shmem.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Resizable virtual memory filesystem for Linux.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Copyright (C) 2000 Linus Torvalds.
5*4882a593Smuzhiyun  *		 2000 Transmeta Corp.
6*4882a593Smuzhiyun  *		 2000-2001 Christoph Rohland
7*4882a593Smuzhiyun  *		 2000-2001 SAP AG
8*4882a593Smuzhiyun  *		 2002 Red Hat Inc.
9*4882a593Smuzhiyun  * Copyright (C) 2002-2011 Hugh Dickins.
10*4882a593Smuzhiyun  * Copyright (C) 2011 Google Inc.
11*4882a593Smuzhiyun  * Copyright (C) 2002-2005 VERITAS Software Corporation.
12*4882a593Smuzhiyun  * Copyright (C) 2004 Andi Kleen, SuSE Labs
13*4882a593Smuzhiyun  *
14*4882a593Smuzhiyun  * Extended attribute support for tmpfs:
15*4882a593Smuzhiyun  * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
16*4882a593Smuzhiyun  * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
17*4882a593Smuzhiyun  *
18*4882a593Smuzhiyun  * tiny-shmem:
19*4882a593Smuzhiyun  * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
20*4882a593Smuzhiyun  *
21*4882a593Smuzhiyun  * This file is released under the GPL.
22*4882a593Smuzhiyun  */
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #include <linux/fs.h>
25*4882a593Smuzhiyun #include <linux/init.h>
26*4882a593Smuzhiyun #include <linux/vfs.h>
27*4882a593Smuzhiyun #include <linux/mount.h>
28*4882a593Smuzhiyun #include <linux/ramfs.h>
29*4882a593Smuzhiyun #include <linux/pagemap.h>
30*4882a593Smuzhiyun #include <linux/file.h>
31*4882a593Smuzhiyun #include <linux/mm.h>
32*4882a593Smuzhiyun #include <linux/random.h>
33*4882a593Smuzhiyun #include <linux/sched/signal.h>
34*4882a593Smuzhiyun #include <linux/export.h>
35*4882a593Smuzhiyun #include <linux/swap.h>
36*4882a593Smuzhiyun #include <linux/uio.h>
37*4882a593Smuzhiyun #include <linux/khugepaged.h>
38*4882a593Smuzhiyun #include <linux/hugetlb.h>
39*4882a593Smuzhiyun #include <linux/frontswap.h>
40*4882a593Smuzhiyun #include <linux/fs_parser.h>
41*4882a593Smuzhiyun #include <linux/mm_inline.h>
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun #include <asm/tlbflush.h> /* for arch/microblaze update_mmu_cache() */
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun #include "internal.h"
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun #undef CREATE_TRACE_POINTS
48*4882a593Smuzhiyun #include <trace/hooks/shmem_fs.h>
49*4882a593Smuzhiyun #include <trace/hooks/mm.h>
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun static struct vfsmount *shm_mnt;
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun #ifdef CONFIG_SHMEM
54*4882a593Smuzhiyun /*
55*4882a593Smuzhiyun  * This virtual memory filesystem is heavily based on the ramfs. It
56*4882a593Smuzhiyun  * extends ramfs by the ability to use swap and honor resource limits
57*4882a593Smuzhiyun  * which makes it a completely usable filesystem.
58*4882a593Smuzhiyun  */
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun #include <linux/xattr.h>
61*4882a593Smuzhiyun #include <linux/exportfs.h>
62*4882a593Smuzhiyun #include <linux/posix_acl.h>
63*4882a593Smuzhiyun #include <linux/posix_acl_xattr.h>
64*4882a593Smuzhiyun #include <linux/mman.h>
65*4882a593Smuzhiyun #include <linux/string.h>
66*4882a593Smuzhiyun #include <linux/slab.h>
67*4882a593Smuzhiyun #include <linux/backing-dev.h>
68*4882a593Smuzhiyun #include <linux/shmem_fs.h>
69*4882a593Smuzhiyun #include <linux/writeback.h>
70*4882a593Smuzhiyun #include <linux/blkdev.h>
71*4882a593Smuzhiyun #include <linux/pagevec.h>
72*4882a593Smuzhiyun #include <linux/percpu_counter.h>
73*4882a593Smuzhiyun #include <linux/falloc.h>
74*4882a593Smuzhiyun #include <linux/splice.h>
75*4882a593Smuzhiyun #include <linux/security.h>
76*4882a593Smuzhiyun #include <linux/swapops.h>
77*4882a593Smuzhiyun #include <linux/mempolicy.h>
78*4882a593Smuzhiyun #include <linux/namei.h>
79*4882a593Smuzhiyun #include <linux/ctype.h>
80*4882a593Smuzhiyun #include <linux/migrate.h>
81*4882a593Smuzhiyun #include <linux/highmem.h>
82*4882a593Smuzhiyun #include <linux/seq_file.h>
83*4882a593Smuzhiyun #include <linux/magic.h>
84*4882a593Smuzhiyun #include <linux/syscalls.h>
85*4882a593Smuzhiyun #include <linux/fcntl.h>
86*4882a593Smuzhiyun #include <uapi/linux/memfd.h>
87*4882a593Smuzhiyun #include <linux/userfaultfd_k.h>
88*4882a593Smuzhiyun #include <linux/rmap.h>
89*4882a593Smuzhiyun #include <linux/uuid.h>
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun #include <linux/uaccess.h>
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun #include "internal.h"
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun #define BLOCKS_PER_PAGE  (PAGE_SIZE/512)
96*4882a593Smuzhiyun #define VM_ACCT(size)    (PAGE_ALIGN(size) >> PAGE_SHIFT)
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun /* Pretend that each entry is of this size in directory's i_size */
99*4882a593Smuzhiyun #define BOGO_DIRENT_SIZE 20
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
102*4882a593Smuzhiyun #define SHORT_SYMLINK_LEN 128
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun /*
105*4882a593Smuzhiyun  * shmem_fallocate communicates with shmem_fault or shmem_writepage via
106*4882a593Smuzhiyun  * inode->i_private (with i_mutex making sure that it has only one user at
107*4882a593Smuzhiyun  * a time): we would prefer not to enlarge the shmem inode just for that.
108*4882a593Smuzhiyun  */
109*4882a593Smuzhiyun struct shmem_falloc {
110*4882a593Smuzhiyun 	wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
111*4882a593Smuzhiyun 	pgoff_t start;		/* start of range currently being fallocated */
112*4882a593Smuzhiyun 	pgoff_t next;		/* the next page offset to be fallocated */
113*4882a593Smuzhiyun 	pgoff_t nr_falloced;	/* how many new pages have been fallocated */
114*4882a593Smuzhiyun 	pgoff_t nr_unswapped;	/* how often writepage refused to swap out */
115*4882a593Smuzhiyun };
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun struct shmem_options {
118*4882a593Smuzhiyun 	unsigned long long blocks;
119*4882a593Smuzhiyun 	unsigned long long inodes;
120*4882a593Smuzhiyun 	struct mempolicy *mpol;
121*4882a593Smuzhiyun 	kuid_t uid;
122*4882a593Smuzhiyun 	kgid_t gid;
123*4882a593Smuzhiyun 	umode_t mode;
124*4882a593Smuzhiyun 	bool full_inums;
125*4882a593Smuzhiyun 	int huge;
126*4882a593Smuzhiyun 	int seen;
127*4882a593Smuzhiyun #define SHMEM_SEEN_BLOCKS 1
128*4882a593Smuzhiyun #define SHMEM_SEEN_INODES 2
129*4882a593Smuzhiyun #define SHMEM_SEEN_HUGE 4
130*4882a593Smuzhiyun #define SHMEM_SEEN_INUMS 8
131*4882a593Smuzhiyun };
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun #ifdef CONFIG_TMPFS
shmem_default_max_blocks(void)134*4882a593Smuzhiyun static unsigned long shmem_default_max_blocks(void)
135*4882a593Smuzhiyun {
136*4882a593Smuzhiyun 	return totalram_pages() / 2;
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun 
shmem_default_max_inodes(void)139*4882a593Smuzhiyun static unsigned long shmem_default_max_inodes(void)
140*4882a593Smuzhiyun {
141*4882a593Smuzhiyun 	unsigned long nr_pages = totalram_pages();
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	return min(nr_pages - totalhigh_pages(), nr_pages / 2);
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun #endif
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun static bool shmem_should_replace_page(struct page *page, gfp_t gfp);
148*4882a593Smuzhiyun static int shmem_replace_page(struct page **pagep, gfp_t gfp,
149*4882a593Smuzhiyun 				struct shmem_inode_info *info, pgoff_t index);
150*4882a593Smuzhiyun static int shmem_swapin_page(struct inode *inode, pgoff_t index,
151*4882a593Smuzhiyun 			     struct page **pagep, enum sgp_type sgp,
152*4882a593Smuzhiyun 			     gfp_t gfp, struct vm_area_struct *vma,
153*4882a593Smuzhiyun 			     vm_fault_t *fault_type);
154*4882a593Smuzhiyun static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
155*4882a593Smuzhiyun 		struct page **pagep, enum sgp_type sgp,
156*4882a593Smuzhiyun 		gfp_t gfp, struct vm_area_struct *vma,
157*4882a593Smuzhiyun 		struct vm_fault *vmf, vm_fault_t *fault_type);
158*4882a593Smuzhiyun 
shmem_getpage(struct inode * inode,pgoff_t index,struct page ** pagep,enum sgp_type sgp)159*4882a593Smuzhiyun int shmem_getpage(struct inode *inode, pgoff_t index,
160*4882a593Smuzhiyun 		struct page **pagep, enum sgp_type sgp)
161*4882a593Smuzhiyun {
162*4882a593Smuzhiyun 	return shmem_getpage_gfp(inode, index, pagep, sgp,
163*4882a593Smuzhiyun 		mapping_gfp_mask(inode->i_mapping), NULL, NULL, NULL);
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun 
SHMEM_SB(struct super_block * sb)166*4882a593Smuzhiyun static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun 	return sb->s_fs_info;
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun /*
172*4882a593Smuzhiyun  * shmem_file_setup pre-accounts the whole fixed size of a VM object,
173*4882a593Smuzhiyun  * for shared memory and for shared anonymous (/dev/zero) mappings
174*4882a593Smuzhiyun  * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
175*4882a593Smuzhiyun  * consistent with the pre-accounting of private mappings ...
176*4882a593Smuzhiyun  */
shmem_acct_size(unsigned long flags,loff_t size)177*4882a593Smuzhiyun static inline int shmem_acct_size(unsigned long flags, loff_t size)
178*4882a593Smuzhiyun {
179*4882a593Smuzhiyun 	return (flags & VM_NORESERVE) ?
180*4882a593Smuzhiyun 		0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size));
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun 
shmem_unacct_size(unsigned long flags,loff_t size)183*4882a593Smuzhiyun static inline void shmem_unacct_size(unsigned long flags, loff_t size)
184*4882a593Smuzhiyun {
185*4882a593Smuzhiyun 	if (!(flags & VM_NORESERVE))
186*4882a593Smuzhiyun 		vm_unacct_memory(VM_ACCT(size));
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun 
shmem_reacct_size(unsigned long flags,loff_t oldsize,loff_t newsize)189*4882a593Smuzhiyun static inline int shmem_reacct_size(unsigned long flags,
190*4882a593Smuzhiyun 		loff_t oldsize, loff_t newsize)
191*4882a593Smuzhiyun {
192*4882a593Smuzhiyun 	if (!(flags & VM_NORESERVE)) {
193*4882a593Smuzhiyun 		if (VM_ACCT(newsize) > VM_ACCT(oldsize))
194*4882a593Smuzhiyun 			return security_vm_enough_memory_mm(current->mm,
195*4882a593Smuzhiyun 					VM_ACCT(newsize) - VM_ACCT(oldsize));
196*4882a593Smuzhiyun 		else if (VM_ACCT(newsize) < VM_ACCT(oldsize))
197*4882a593Smuzhiyun 			vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize));
198*4882a593Smuzhiyun 	}
199*4882a593Smuzhiyun 	return 0;
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun /*
203*4882a593Smuzhiyun  * ... whereas tmpfs objects are accounted incrementally as
204*4882a593Smuzhiyun  * pages are allocated, in order to allow large sparse files.
205*4882a593Smuzhiyun  * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
206*4882a593Smuzhiyun  * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
207*4882a593Smuzhiyun  */
shmem_acct_block(unsigned long flags,long pages)208*4882a593Smuzhiyun static inline int shmem_acct_block(unsigned long flags, long pages)
209*4882a593Smuzhiyun {
210*4882a593Smuzhiyun 	if (!(flags & VM_NORESERVE))
211*4882a593Smuzhiyun 		return 0;
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 	return security_vm_enough_memory_mm(current->mm,
214*4882a593Smuzhiyun 			pages * VM_ACCT(PAGE_SIZE));
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun 
shmem_unacct_blocks(unsigned long flags,long pages)217*4882a593Smuzhiyun static inline void shmem_unacct_blocks(unsigned long flags, long pages)
218*4882a593Smuzhiyun {
219*4882a593Smuzhiyun 	if (flags & VM_NORESERVE)
220*4882a593Smuzhiyun 		vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE));
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun 
shmem_inode_acct_block(struct inode * inode,long pages)223*4882a593Smuzhiyun static inline bool shmem_inode_acct_block(struct inode *inode, long pages)
224*4882a593Smuzhiyun {
225*4882a593Smuzhiyun 	struct shmem_inode_info *info = SHMEM_I(inode);
226*4882a593Smuzhiyun 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 	if (shmem_acct_block(info->flags, pages))
229*4882a593Smuzhiyun 		return false;
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	if (sbinfo->max_blocks) {
232*4882a593Smuzhiyun 		if (percpu_counter_compare(&sbinfo->used_blocks,
233*4882a593Smuzhiyun 					   sbinfo->max_blocks - pages) > 0)
234*4882a593Smuzhiyun 			goto unacct;
235*4882a593Smuzhiyun 		percpu_counter_add(&sbinfo->used_blocks, pages);
236*4882a593Smuzhiyun 	}
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	return true;
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun unacct:
241*4882a593Smuzhiyun 	shmem_unacct_blocks(info->flags, pages);
242*4882a593Smuzhiyun 	return false;
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun 
shmem_inode_unacct_blocks(struct inode * inode,long pages)245*4882a593Smuzhiyun static inline void shmem_inode_unacct_blocks(struct inode *inode, long pages)
246*4882a593Smuzhiyun {
247*4882a593Smuzhiyun 	struct shmem_inode_info *info = SHMEM_I(inode);
248*4882a593Smuzhiyun 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	if (sbinfo->max_blocks)
251*4882a593Smuzhiyun 		percpu_counter_sub(&sbinfo->used_blocks, pages);
252*4882a593Smuzhiyun 	shmem_unacct_blocks(info->flags, pages);
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun static const struct super_operations shmem_ops;
256*4882a593Smuzhiyun static const struct address_space_operations shmem_aops;
257*4882a593Smuzhiyun static const struct file_operations shmem_file_operations;
258*4882a593Smuzhiyun static const struct inode_operations shmem_inode_operations;
259*4882a593Smuzhiyun static const struct inode_operations shmem_dir_inode_operations;
260*4882a593Smuzhiyun static const struct inode_operations shmem_special_inode_operations;
261*4882a593Smuzhiyun static const struct vm_operations_struct shmem_vm_ops;
262*4882a593Smuzhiyun static struct file_system_type shmem_fs_type;
263*4882a593Smuzhiyun 
vma_is_shmem(struct vm_area_struct * vma)264*4882a593Smuzhiyun bool vma_is_shmem(struct vm_area_struct *vma)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun 	return vma->vm_ops == &shmem_vm_ops;
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun static LIST_HEAD(shmem_swaplist);
270*4882a593Smuzhiyun static DEFINE_MUTEX(shmem_swaplist_mutex);
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun /*
273*4882a593Smuzhiyun  * shmem_reserve_inode() performs bookkeeping to reserve a shmem inode, and
274*4882a593Smuzhiyun  * produces a novel ino for the newly allocated inode.
275*4882a593Smuzhiyun  *
276*4882a593Smuzhiyun  * It may also be called when making a hard link to permit the space needed by
277*4882a593Smuzhiyun  * each dentry. However, in that case, no new inode number is needed since that
278*4882a593Smuzhiyun  * internally draws from another pool of inode numbers (currently global
279*4882a593Smuzhiyun  * get_next_ino()). This case is indicated by passing NULL as inop.
280*4882a593Smuzhiyun  */
281*4882a593Smuzhiyun #define SHMEM_INO_BATCH 1024
shmem_reserve_inode(struct super_block * sb,ino_t * inop)282*4882a593Smuzhiyun static int shmem_reserve_inode(struct super_block *sb, ino_t *inop)
283*4882a593Smuzhiyun {
284*4882a593Smuzhiyun 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
285*4882a593Smuzhiyun 	ino_t ino;
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 	if (!(sb->s_flags & SB_KERNMOUNT)) {
288*4882a593Smuzhiyun 		spin_lock(&sbinfo->stat_lock);
289*4882a593Smuzhiyun 		if (sbinfo->max_inodes) {
290*4882a593Smuzhiyun 			if (!sbinfo->free_inodes) {
291*4882a593Smuzhiyun 				spin_unlock(&sbinfo->stat_lock);
292*4882a593Smuzhiyun 				return -ENOSPC;
293*4882a593Smuzhiyun 			}
294*4882a593Smuzhiyun 			sbinfo->free_inodes--;
295*4882a593Smuzhiyun 		}
296*4882a593Smuzhiyun 		if (inop) {
297*4882a593Smuzhiyun 			ino = sbinfo->next_ino++;
298*4882a593Smuzhiyun 			if (unlikely(is_zero_ino(ino)))
299*4882a593Smuzhiyun 				ino = sbinfo->next_ino++;
300*4882a593Smuzhiyun 			if (unlikely(!sbinfo->full_inums &&
301*4882a593Smuzhiyun 				     ino > UINT_MAX)) {
302*4882a593Smuzhiyun 				/*
303*4882a593Smuzhiyun 				 * Emulate get_next_ino uint wraparound for
304*4882a593Smuzhiyun 				 * compatibility
305*4882a593Smuzhiyun 				 */
306*4882a593Smuzhiyun 				if (IS_ENABLED(CONFIG_64BIT))
307*4882a593Smuzhiyun 					pr_warn("%s: inode number overflow on device %d, consider using inode64 mount option\n",
308*4882a593Smuzhiyun 						__func__, MINOR(sb->s_dev));
309*4882a593Smuzhiyun 				sbinfo->next_ino = 1;
310*4882a593Smuzhiyun 				ino = sbinfo->next_ino++;
311*4882a593Smuzhiyun 			}
312*4882a593Smuzhiyun 			*inop = ino;
313*4882a593Smuzhiyun 		}
314*4882a593Smuzhiyun 		spin_unlock(&sbinfo->stat_lock);
315*4882a593Smuzhiyun 	} else if (inop) {
316*4882a593Smuzhiyun 		/*
317*4882a593Smuzhiyun 		 * __shmem_file_setup, one of our callers, is lock-free: it
318*4882a593Smuzhiyun 		 * doesn't hold stat_lock in shmem_reserve_inode since
319*4882a593Smuzhiyun 		 * max_inodes is always 0, and is called from potentially
320*4882a593Smuzhiyun 		 * unknown contexts. As such, use a per-cpu batched allocator
321*4882a593Smuzhiyun 		 * which doesn't require the per-sb stat_lock unless we are at
322*4882a593Smuzhiyun 		 * the batch boundary.
323*4882a593Smuzhiyun 		 *
324*4882a593Smuzhiyun 		 * We don't need to worry about inode{32,64} since SB_KERNMOUNT
325*4882a593Smuzhiyun 		 * shmem mounts are not exposed to userspace, so we don't need
326*4882a593Smuzhiyun 		 * to worry about things like glibc compatibility.
327*4882a593Smuzhiyun 		 */
328*4882a593Smuzhiyun 		ino_t *next_ino;
329*4882a593Smuzhiyun 		next_ino = per_cpu_ptr(sbinfo->ino_batch, get_cpu());
330*4882a593Smuzhiyun 		ino = *next_ino;
331*4882a593Smuzhiyun 		if (unlikely(ino % SHMEM_INO_BATCH == 0)) {
332*4882a593Smuzhiyun 			spin_lock(&sbinfo->stat_lock);
333*4882a593Smuzhiyun 			ino = sbinfo->next_ino;
334*4882a593Smuzhiyun 			sbinfo->next_ino += SHMEM_INO_BATCH;
335*4882a593Smuzhiyun 			spin_unlock(&sbinfo->stat_lock);
336*4882a593Smuzhiyun 			if (unlikely(is_zero_ino(ino)))
337*4882a593Smuzhiyun 				ino++;
338*4882a593Smuzhiyun 		}
339*4882a593Smuzhiyun 		*inop = ino;
340*4882a593Smuzhiyun 		*next_ino = ++ino;
341*4882a593Smuzhiyun 		put_cpu();
342*4882a593Smuzhiyun 	}
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 	return 0;
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun 
shmem_free_inode(struct super_block * sb)347*4882a593Smuzhiyun static void shmem_free_inode(struct super_block *sb)
348*4882a593Smuzhiyun {
349*4882a593Smuzhiyun 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
350*4882a593Smuzhiyun 	if (sbinfo->max_inodes) {
351*4882a593Smuzhiyun 		spin_lock(&sbinfo->stat_lock);
352*4882a593Smuzhiyun 		sbinfo->free_inodes++;
353*4882a593Smuzhiyun 		spin_unlock(&sbinfo->stat_lock);
354*4882a593Smuzhiyun 	}
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun /**
358*4882a593Smuzhiyun  * shmem_recalc_inode - recalculate the block usage of an inode
359*4882a593Smuzhiyun  * @inode: inode to recalc
360*4882a593Smuzhiyun  *
361*4882a593Smuzhiyun  * We have to calculate the free blocks since the mm can drop
362*4882a593Smuzhiyun  * undirtied hole pages behind our back.
363*4882a593Smuzhiyun  *
364*4882a593Smuzhiyun  * But normally   info->alloced == inode->i_mapping->nrpages + info->swapped
365*4882a593Smuzhiyun  * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
366*4882a593Smuzhiyun  *
367*4882a593Smuzhiyun  * It has to be called with the spinlock held.
368*4882a593Smuzhiyun  */
shmem_recalc_inode(struct inode * inode)369*4882a593Smuzhiyun static void shmem_recalc_inode(struct inode *inode)
370*4882a593Smuzhiyun {
371*4882a593Smuzhiyun 	struct shmem_inode_info *info = SHMEM_I(inode);
372*4882a593Smuzhiyun 	long freed;
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun 	freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
375*4882a593Smuzhiyun 	if (freed > 0) {
376*4882a593Smuzhiyun 		info->alloced -= freed;
377*4882a593Smuzhiyun 		inode->i_blocks -= freed * BLOCKS_PER_PAGE;
378*4882a593Smuzhiyun 		shmem_inode_unacct_blocks(inode, freed);
379*4882a593Smuzhiyun 	}
380*4882a593Smuzhiyun }
381*4882a593Smuzhiyun 
shmem_charge(struct inode * inode,long pages)382*4882a593Smuzhiyun bool shmem_charge(struct inode *inode, long pages)
383*4882a593Smuzhiyun {
384*4882a593Smuzhiyun 	struct shmem_inode_info *info = SHMEM_I(inode);
385*4882a593Smuzhiyun 	unsigned long flags;
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun 	if (!shmem_inode_acct_block(inode, pages))
388*4882a593Smuzhiyun 		return false;
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun 	/* nrpages adjustment first, then shmem_recalc_inode() when balanced */
391*4882a593Smuzhiyun 	inode->i_mapping->nrpages += pages;
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun 	spin_lock_irqsave(&info->lock, flags);
394*4882a593Smuzhiyun 	info->alloced += pages;
395*4882a593Smuzhiyun 	inode->i_blocks += pages * BLOCKS_PER_PAGE;
396*4882a593Smuzhiyun 	shmem_recalc_inode(inode);
397*4882a593Smuzhiyun 	spin_unlock_irqrestore(&info->lock, flags);
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun 	return true;
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun 
shmem_uncharge(struct inode * inode,long pages)402*4882a593Smuzhiyun void shmem_uncharge(struct inode *inode, long pages)
403*4882a593Smuzhiyun {
404*4882a593Smuzhiyun 	struct shmem_inode_info *info = SHMEM_I(inode);
405*4882a593Smuzhiyun 	unsigned long flags;
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun 	/* nrpages adjustment done by __delete_from_page_cache() or caller */
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun 	spin_lock_irqsave(&info->lock, flags);
410*4882a593Smuzhiyun 	info->alloced -= pages;
411*4882a593Smuzhiyun 	inode->i_blocks -= pages * BLOCKS_PER_PAGE;
412*4882a593Smuzhiyun 	shmem_recalc_inode(inode);
413*4882a593Smuzhiyun 	spin_unlock_irqrestore(&info->lock, flags);
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 	shmem_inode_unacct_blocks(inode, pages);
416*4882a593Smuzhiyun }
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun /*
419*4882a593Smuzhiyun  * Replace item expected in xarray by a new item, while holding xa_lock.
420*4882a593Smuzhiyun  */
shmem_replace_entry(struct address_space * mapping,pgoff_t index,void * expected,void * replacement)421*4882a593Smuzhiyun static int shmem_replace_entry(struct address_space *mapping,
422*4882a593Smuzhiyun 			pgoff_t index, void *expected, void *replacement)
423*4882a593Smuzhiyun {
424*4882a593Smuzhiyun 	XA_STATE(xas, &mapping->i_pages, index);
425*4882a593Smuzhiyun 	void *item;
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun 	VM_BUG_ON(!expected);
428*4882a593Smuzhiyun 	VM_BUG_ON(!replacement);
429*4882a593Smuzhiyun 	item = xas_load(&xas);
430*4882a593Smuzhiyun 	if (item != expected)
431*4882a593Smuzhiyun 		return -ENOENT;
432*4882a593Smuzhiyun 	xas_store(&xas, replacement);
433*4882a593Smuzhiyun 	return 0;
434*4882a593Smuzhiyun }
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun /*
437*4882a593Smuzhiyun  * Sometimes, before we decide whether to proceed or to fail, we must check
438*4882a593Smuzhiyun  * that an entry was not already brought back from swap by a racing thread.
439*4882a593Smuzhiyun  *
440*4882a593Smuzhiyun  * Checking page is not enough: by the time a SwapCache page is locked, it
441*4882a593Smuzhiyun  * might be reused, and again be SwapCache, using the same swap as before.
442*4882a593Smuzhiyun  */
shmem_confirm_swap(struct address_space * mapping,pgoff_t index,swp_entry_t swap)443*4882a593Smuzhiyun static bool shmem_confirm_swap(struct address_space *mapping,
444*4882a593Smuzhiyun 			       pgoff_t index, swp_entry_t swap)
445*4882a593Smuzhiyun {
446*4882a593Smuzhiyun 	return xa_load(&mapping->i_pages, index) == swp_to_radix_entry(swap);
447*4882a593Smuzhiyun }
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun /*
450*4882a593Smuzhiyun  * Definitions for "huge tmpfs": tmpfs mounted with the huge= option
451*4882a593Smuzhiyun  *
452*4882a593Smuzhiyun  * SHMEM_HUGE_NEVER:
453*4882a593Smuzhiyun  *	disables huge pages for the mount;
454*4882a593Smuzhiyun  * SHMEM_HUGE_ALWAYS:
455*4882a593Smuzhiyun  *	enables huge pages for the mount;
456*4882a593Smuzhiyun  * SHMEM_HUGE_WITHIN_SIZE:
457*4882a593Smuzhiyun  *	only allocate huge pages if the page will be fully within i_size,
458*4882a593Smuzhiyun  *	also respect fadvise()/madvise() hints;
459*4882a593Smuzhiyun  * SHMEM_HUGE_ADVISE:
460*4882a593Smuzhiyun  *	only allocate huge pages if requested with fadvise()/madvise();
461*4882a593Smuzhiyun  */
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun #define SHMEM_HUGE_NEVER	0
464*4882a593Smuzhiyun #define SHMEM_HUGE_ALWAYS	1
465*4882a593Smuzhiyun #define SHMEM_HUGE_WITHIN_SIZE	2
466*4882a593Smuzhiyun #define SHMEM_HUGE_ADVISE	3
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun /*
469*4882a593Smuzhiyun  * Special values.
470*4882a593Smuzhiyun  * Only can be set via /sys/kernel/mm/transparent_hugepage/shmem_enabled:
471*4882a593Smuzhiyun  *
472*4882a593Smuzhiyun  * SHMEM_HUGE_DENY:
473*4882a593Smuzhiyun  *	disables huge on shm_mnt and all mounts, for emergency use;
474*4882a593Smuzhiyun  * SHMEM_HUGE_FORCE:
475*4882a593Smuzhiyun  *	enables huge on shm_mnt and all mounts, w/o needing option, for testing;
476*4882a593Smuzhiyun  *
477*4882a593Smuzhiyun  */
478*4882a593Smuzhiyun #define SHMEM_HUGE_DENY		(-1)
479*4882a593Smuzhiyun #define SHMEM_HUGE_FORCE	(-2)
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun #ifdef CONFIG_TRANSPARENT_HUGEPAGE
482*4882a593Smuzhiyun /* ifdef here to avoid bloating shmem.o when not necessary */
483*4882a593Smuzhiyun 
484*4882a593Smuzhiyun static int shmem_huge __read_mostly;
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun #if defined(CONFIG_SYSFS)
shmem_parse_huge(const char * str)487*4882a593Smuzhiyun static int shmem_parse_huge(const char *str)
488*4882a593Smuzhiyun {
489*4882a593Smuzhiyun 	if (!strcmp(str, "never"))
490*4882a593Smuzhiyun 		return SHMEM_HUGE_NEVER;
491*4882a593Smuzhiyun 	if (!strcmp(str, "always"))
492*4882a593Smuzhiyun 		return SHMEM_HUGE_ALWAYS;
493*4882a593Smuzhiyun 	if (!strcmp(str, "within_size"))
494*4882a593Smuzhiyun 		return SHMEM_HUGE_WITHIN_SIZE;
495*4882a593Smuzhiyun 	if (!strcmp(str, "advise"))
496*4882a593Smuzhiyun 		return SHMEM_HUGE_ADVISE;
497*4882a593Smuzhiyun 	if (!strcmp(str, "deny"))
498*4882a593Smuzhiyun 		return SHMEM_HUGE_DENY;
499*4882a593Smuzhiyun 	if (!strcmp(str, "force"))
500*4882a593Smuzhiyun 		return SHMEM_HUGE_FORCE;
501*4882a593Smuzhiyun 	return -EINVAL;
502*4882a593Smuzhiyun }
503*4882a593Smuzhiyun #endif
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun #if defined(CONFIG_SYSFS) || defined(CONFIG_TMPFS)
shmem_format_huge(int huge)506*4882a593Smuzhiyun static const char *shmem_format_huge(int huge)
507*4882a593Smuzhiyun {
508*4882a593Smuzhiyun 	switch (huge) {
509*4882a593Smuzhiyun 	case SHMEM_HUGE_NEVER:
510*4882a593Smuzhiyun 		return "never";
511*4882a593Smuzhiyun 	case SHMEM_HUGE_ALWAYS:
512*4882a593Smuzhiyun 		return "always";
513*4882a593Smuzhiyun 	case SHMEM_HUGE_WITHIN_SIZE:
514*4882a593Smuzhiyun 		return "within_size";
515*4882a593Smuzhiyun 	case SHMEM_HUGE_ADVISE:
516*4882a593Smuzhiyun 		return "advise";
517*4882a593Smuzhiyun 	case SHMEM_HUGE_DENY:
518*4882a593Smuzhiyun 		return "deny";
519*4882a593Smuzhiyun 	case SHMEM_HUGE_FORCE:
520*4882a593Smuzhiyun 		return "force";
521*4882a593Smuzhiyun 	default:
522*4882a593Smuzhiyun 		VM_BUG_ON(1);
523*4882a593Smuzhiyun 		return "bad_val";
524*4882a593Smuzhiyun 	}
525*4882a593Smuzhiyun }
526*4882a593Smuzhiyun #endif
527*4882a593Smuzhiyun 
shmem_unused_huge_shrink(struct shmem_sb_info * sbinfo,struct shrink_control * sc,unsigned long nr_to_split)528*4882a593Smuzhiyun static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
529*4882a593Smuzhiyun 		struct shrink_control *sc, unsigned long nr_to_split)
530*4882a593Smuzhiyun {
531*4882a593Smuzhiyun 	LIST_HEAD(list), *pos, *next;
532*4882a593Smuzhiyun 	LIST_HEAD(to_remove);
533*4882a593Smuzhiyun 	struct inode *inode;
534*4882a593Smuzhiyun 	struct shmem_inode_info *info;
535*4882a593Smuzhiyun 	struct page *page;
536*4882a593Smuzhiyun 	unsigned long batch = sc ? sc->nr_to_scan : 128;
537*4882a593Smuzhiyun 	int split = 0;
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 	if (list_empty(&sbinfo->shrinklist))
540*4882a593Smuzhiyun 		return SHRINK_STOP;
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun 	spin_lock(&sbinfo->shrinklist_lock);
543*4882a593Smuzhiyun 	list_for_each_safe(pos, next, &sbinfo->shrinklist) {
544*4882a593Smuzhiyun 		info = list_entry(pos, struct shmem_inode_info, shrinklist);
545*4882a593Smuzhiyun 
546*4882a593Smuzhiyun 		/* pin the inode */
547*4882a593Smuzhiyun 		inode = igrab(&info->vfs_inode);
548*4882a593Smuzhiyun 
549*4882a593Smuzhiyun 		/* inode is about to be evicted */
550*4882a593Smuzhiyun 		if (!inode) {
551*4882a593Smuzhiyun 			list_del_init(&info->shrinklist);
552*4882a593Smuzhiyun 			goto next;
553*4882a593Smuzhiyun 		}
554*4882a593Smuzhiyun 
555*4882a593Smuzhiyun 		/* Check if there's anything to gain */
556*4882a593Smuzhiyun 		if (round_up(inode->i_size, PAGE_SIZE) ==
557*4882a593Smuzhiyun 				round_up(inode->i_size, HPAGE_PMD_SIZE)) {
558*4882a593Smuzhiyun 			list_move(&info->shrinklist, &to_remove);
559*4882a593Smuzhiyun 			goto next;
560*4882a593Smuzhiyun 		}
561*4882a593Smuzhiyun 
562*4882a593Smuzhiyun 		list_move(&info->shrinklist, &list);
563*4882a593Smuzhiyun next:
564*4882a593Smuzhiyun 		sbinfo->shrinklist_len--;
565*4882a593Smuzhiyun 		if (!--batch)
566*4882a593Smuzhiyun 			break;
567*4882a593Smuzhiyun 	}
568*4882a593Smuzhiyun 	spin_unlock(&sbinfo->shrinklist_lock);
569*4882a593Smuzhiyun 
570*4882a593Smuzhiyun 	list_for_each_safe(pos, next, &to_remove) {
571*4882a593Smuzhiyun 		info = list_entry(pos, struct shmem_inode_info, shrinklist);
572*4882a593Smuzhiyun 		inode = &info->vfs_inode;
573*4882a593Smuzhiyun 		list_del_init(&info->shrinklist);
574*4882a593Smuzhiyun 		iput(inode);
575*4882a593Smuzhiyun 	}
576*4882a593Smuzhiyun 
577*4882a593Smuzhiyun 	list_for_each_safe(pos, next, &list) {
578*4882a593Smuzhiyun 		int ret;
579*4882a593Smuzhiyun 
580*4882a593Smuzhiyun 		info = list_entry(pos, struct shmem_inode_info, shrinklist);
581*4882a593Smuzhiyun 		inode = &info->vfs_inode;
582*4882a593Smuzhiyun 
583*4882a593Smuzhiyun 		if (nr_to_split && split >= nr_to_split)
584*4882a593Smuzhiyun 			goto move_back;
585*4882a593Smuzhiyun 
586*4882a593Smuzhiyun 		page = find_get_page(inode->i_mapping,
587*4882a593Smuzhiyun 				(inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT);
588*4882a593Smuzhiyun 		if (!page)
589*4882a593Smuzhiyun 			goto drop;
590*4882a593Smuzhiyun 
591*4882a593Smuzhiyun 		/* No huge page at the end of the file: nothing to split */
592*4882a593Smuzhiyun 		if (!PageTransHuge(page)) {
593*4882a593Smuzhiyun 			put_page(page);
594*4882a593Smuzhiyun 			goto drop;
595*4882a593Smuzhiyun 		}
596*4882a593Smuzhiyun 
597*4882a593Smuzhiyun 		/*
598*4882a593Smuzhiyun 		 * Move the inode on the list back to shrinklist if we failed
599*4882a593Smuzhiyun 		 * to lock the page at this time.
600*4882a593Smuzhiyun 		 *
601*4882a593Smuzhiyun 		 * Waiting for the lock may lead to deadlock in the
602*4882a593Smuzhiyun 		 * reclaim path.
603*4882a593Smuzhiyun 		 */
604*4882a593Smuzhiyun 		if (!trylock_page(page)) {
605*4882a593Smuzhiyun 			put_page(page);
606*4882a593Smuzhiyun 			goto move_back;
607*4882a593Smuzhiyun 		}
608*4882a593Smuzhiyun 
609*4882a593Smuzhiyun 		ret = split_huge_page(page);
610*4882a593Smuzhiyun 		unlock_page(page);
611*4882a593Smuzhiyun 		put_page(page);
612*4882a593Smuzhiyun 
613*4882a593Smuzhiyun 		/* If split failed move the inode on the list back to shrinklist */
614*4882a593Smuzhiyun 		if (ret)
615*4882a593Smuzhiyun 			goto move_back;
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun 		split++;
618*4882a593Smuzhiyun drop:
619*4882a593Smuzhiyun 		list_del_init(&info->shrinklist);
620*4882a593Smuzhiyun 		goto put;
621*4882a593Smuzhiyun move_back:
622*4882a593Smuzhiyun 		/*
623*4882a593Smuzhiyun 		 * Make sure the inode is either on the global list or deleted
624*4882a593Smuzhiyun 		 * from any local list before iput() since it could be deleted
625*4882a593Smuzhiyun 		 * in another thread once we put the inode (then the local list
626*4882a593Smuzhiyun 		 * is corrupted).
627*4882a593Smuzhiyun 		 */
628*4882a593Smuzhiyun 		spin_lock(&sbinfo->shrinklist_lock);
629*4882a593Smuzhiyun 		list_move(&info->shrinklist, &sbinfo->shrinklist);
630*4882a593Smuzhiyun 		sbinfo->shrinklist_len++;
631*4882a593Smuzhiyun 		spin_unlock(&sbinfo->shrinklist_lock);
632*4882a593Smuzhiyun put:
633*4882a593Smuzhiyun 		iput(inode);
634*4882a593Smuzhiyun 	}
635*4882a593Smuzhiyun 
636*4882a593Smuzhiyun 	return split;
637*4882a593Smuzhiyun }
638*4882a593Smuzhiyun 
shmem_unused_huge_scan(struct super_block * sb,struct shrink_control * sc)639*4882a593Smuzhiyun static long shmem_unused_huge_scan(struct super_block *sb,
640*4882a593Smuzhiyun 		struct shrink_control *sc)
641*4882a593Smuzhiyun {
642*4882a593Smuzhiyun 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
643*4882a593Smuzhiyun 
644*4882a593Smuzhiyun 	if (!READ_ONCE(sbinfo->shrinklist_len))
645*4882a593Smuzhiyun 		return SHRINK_STOP;
646*4882a593Smuzhiyun 
647*4882a593Smuzhiyun 	return shmem_unused_huge_shrink(sbinfo, sc, 0);
648*4882a593Smuzhiyun }
649*4882a593Smuzhiyun 
shmem_unused_huge_count(struct super_block * sb,struct shrink_control * sc)650*4882a593Smuzhiyun static long shmem_unused_huge_count(struct super_block *sb,
651*4882a593Smuzhiyun 		struct shrink_control *sc)
652*4882a593Smuzhiyun {
653*4882a593Smuzhiyun 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
654*4882a593Smuzhiyun 	return READ_ONCE(sbinfo->shrinklist_len);
655*4882a593Smuzhiyun }
656*4882a593Smuzhiyun #else /* !CONFIG_TRANSPARENT_HUGEPAGE */
657*4882a593Smuzhiyun 
658*4882a593Smuzhiyun #define shmem_huge SHMEM_HUGE_DENY
659*4882a593Smuzhiyun 
shmem_unused_huge_shrink(struct shmem_sb_info * sbinfo,struct shrink_control * sc,unsigned long nr_to_split)660*4882a593Smuzhiyun static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
661*4882a593Smuzhiyun 		struct shrink_control *sc, unsigned long nr_to_split)
662*4882a593Smuzhiyun {
663*4882a593Smuzhiyun 	return 0;
664*4882a593Smuzhiyun }
665*4882a593Smuzhiyun #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
666*4882a593Smuzhiyun 
is_huge_enabled(struct shmem_sb_info * sbinfo)667*4882a593Smuzhiyun static inline bool is_huge_enabled(struct shmem_sb_info *sbinfo)
668*4882a593Smuzhiyun {
669*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
670*4882a593Smuzhiyun 	    (shmem_huge == SHMEM_HUGE_FORCE || sbinfo->huge) &&
671*4882a593Smuzhiyun 	    shmem_huge != SHMEM_HUGE_DENY)
672*4882a593Smuzhiyun 		return true;
673*4882a593Smuzhiyun 	return false;
674*4882a593Smuzhiyun }
675*4882a593Smuzhiyun 
676*4882a593Smuzhiyun /*
677*4882a593Smuzhiyun  * Like add_to_page_cache_locked, but error if expected item has gone.
678*4882a593Smuzhiyun  */
shmem_add_to_page_cache(struct page * page,struct address_space * mapping,pgoff_t index,void * expected,gfp_t gfp,struct mm_struct * charge_mm)679*4882a593Smuzhiyun static int shmem_add_to_page_cache(struct page *page,
680*4882a593Smuzhiyun 				   struct address_space *mapping,
681*4882a593Smuzhiyun 				   pgoff_t index, void *expected, gfp_t gfp,
682*4882a593Smuzhiyun 				   struct mm_struct *charge_mm)
683*4882a593Smuzhiyun {
684*4882a593Smuzhiyun 	XA_STATE_ORDER(xas, &mapping->i_pages, index, compound_order(page));
685*4882a593Smuzhiyun 	unsigned long i = 0;
686*4882a593Smuzhiyun 	unsigned long nr = compound_nr(page);
687*4882a593Smuzhiyun 	int error;
688*4882a593Smuzhiyun 
689*4882a593Smuzhiyun 	VM_BUG_ON_PAGE(PageTail(page), page);
690*4882a593Smuzhiyun 	VM_BUG_ON_PAGE(index != round_down(index, nr), page);
691*4882a593Smuzhiyun 	VM_BUG_ON_PAGE(!PageLocked(page), page);
692*4882a593Smuzhiyun 	VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
693*4882a593Smuzhiyun 	VM_BUG_ON(expected && PageTransHuge(page));
694*4882a593Smuzhiyun 
695*4882a593Smuzhiyun 	page_ref_add(page, nr);
696*4882a593Smuzhiyun 	page->mapping = mapping;
697*4882a593Smuzhiyun 	page->index = index;
698*4882a593Smuzhiyun 
699*4882a593Smuzhiyun 	if (!PageSwapCache(page)) {
700*4882a593Smuzhiyun 		error = mem_cgroup_charge(page, charge_mm, gfp);
701*4882a593Smuzhiyun 		if (error) {
702*4882a593Smuzhiyun 			if (PageTransHuge(page)) {
703*4882a593Smuzhiyun 				count_vm_event(THP_FILE_FALLBACK);
704*4882a593Smuzhiyun 				count_vm_event(THP_FILE_FALLBACK_CHARGE);
705*4882a593Smuzhiyun 			}
706*4882a593Smuzhiyun 			goto error;
707*4882a593Smuzhiyun 		}
708*4882a593Smuzhiyun 	}
709*4882a593Smuzhiyun 	cgroup_throttle_swaprate(page, gfp);
710*4882a593Smuzhiyun 
711*4882a593Smuzhiyun 	do {
712*4882a593Smuzhiyun 		void *entry;
713*4882a593Smuzhiyun 		xas_lock_irq(&xas);
714*4882a593Smuzhiyun 		entry = xas_find_conflict(&xas);
715*4882a593Smuzhiyun 		if (entry != expected)
716*4882a593Smuzhiyun 			xas_set_err(&xas, -EEXIST);
717*4882a593Smuzhiyun 		xas_create_range(&xas);
718*4882a593Smuzhiyun 		if (xas_error(&xas))
719*4882a593Smuzhiyun 			goto unlock;
720*4882a593Smuzhiyun next:
721*4882a593Smuzhiyun 		xas_store(&xas, page);
722*4882a593Smuzhiyun 		if (++i < nr) {
723*4882a593Smuzhiyun 			xas_next(&xas);
724*4882a593Smuzhiyun 			goto next;
725*4882a593Smuzhiyun 		}
726*4882a593Smuzhiyun 		if (PageTransHuge(page)) {
727*4882a593Smuzhiyun 			count_vm_event(THP_FILE_ALLOC);
728*4882a593Smuzhiyun 			__inc_node_page_state(page, NR_SHMEM_THPS);
729*4882a593Smuzhiyun 		}
730*4882a593Smuzhiyun 		mapping->nrpages += nr;
731*4882a593Smuzhiyun 		__mod_lruvec_page_state(page, NR_FILE_PAGES, nr);
732*4882a593Smuzhiyun 		__mod_lruvec_page_state(page, NR_SHMEM, nr);
733*4882a593Smuzhiyun unlock:
734*4882a593Smuzhiyun 		xas_unlock_irq(&xas);
735*4882a593Smuzhiyun 	} while (xas_nomem(&xas, gfp));
736*4882a593Smuzhiyun 
737*4882a593Smuzhiyun 	if (xas_error(&xas)) {
738*4882a593Smuzhiyun 		error = xas_error(&xas);
739*4882a593Smuzhiyun 		goto error;
740*4882a593Smuzhiyun 	}
741*4882a593Smuzhiyun 
742*4882a593Smuzhiyun 	return 0;
743*4882a593Smuzhiyun error:
744*4882a593Smuzhiyun 	page->mapping = NULL;
745*4882a593Smuzhiyun 	page_ref_sub(page, nr);
746*4882a593Smuzhiyun 	return error;
747*4882a593Smuzhiyun }
748*4882a593Smuzhiyun 
749*4882a593Smuzhiyun /*
750*4882a593Smuzhiyun  * Like delete_from_page_cache, but substitutes swap for page.
751*4882a593Smuzhiyun  */
shmem_delete_from_page_cache(struct page * page,void * radswap)752*4882a593Smuzhiyun static void shmem_delete_from_page_cache(struct page *page, void *radswap)
753*4882a593Smuzhiyun {
754*4882a593Smuzhiyun 	struct address_space *mapping = page->mapping;
755*4882a593Smuzhiyun 	int error;
756*4882a593Smuzhiyun 
757*4882a593Smuzhiyun 	VM_BUG_ON_PAGE(PageCompound(page), page);
758*4882a593Smuzhiyun 
759*4882a593Smuzhiyun 	xa_lock_irq(&mapping->i_pages);
760*4882a593Smuzhiyun 	error = shmem_replace_entry(mapping, page->index, page, radswap);
761*4882a593Smuzhiyun 	page->mapping = NULL;
762*4882a593Smuzhiyun 	mapping->nrpages--;
763*4882a593Smuzhiyun 	__dec_lruvec_page_state(page, NR_FILE_PAGES);
764*4882a593Smuzhiyun 	__dec_lruvec_page_state(page, NR_SHMEM);
765*4882a593Smuzhiyun 	xa_unlock_irq(&mapping->i_pages);
766*4882a593Smuzhiyun 	put_page(page);
767*4882a593Smuzhiyun 	BUG_ON(error);
768*4882a593Smuzhiyun }
769*4882a593Smuzhiyun 
770*4882a593Smuzhiyun /*
771*4882a593Smuzhiyun  * Remove swap entry from page cache, free the swap and its page cache.
772*4882a593Smuzhiyun  */
shmem_free_swap(struct address_space * mapping,pgoff_t index,void * radswap)773*4882a593Smuzhiyun static int shmem_free_swap(struct address_space *mapping,
774*4882a593Smuzhiyun 			   pgoff_t index, void *radswap)
775*4882a593Smuzhiyun {
776*4882a593Smuzhiyun 	void *old;
777*4882a593Smuzhiyun 
778*4882a593Smuzhiyun 	old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0);
779*4882a593Smuzhiyun 	if (old != radswap)
780*4882a593Smuzhiyun 		return -ENOENT;
781*4882a593Smuzhiyun 	free_swap_and_cache(radix_to_swp_entry(radswap));
782*4882a593Smuzhiyun 	return 0;
783*4882a593Smuzhiyun }
784*4882a593Smuzhiyun 
785*4882a593Smuzhiyun /*
786*4882a593Smuzhiyun  * Determine (in bytes) how many of the shmem object's pages mapped by the
787*4882a593Smuzhiyun  * given offsets are swapped out.
788*4882a593Smuzhiyun  *
789*4882a593Smuzhiyun  * This is safe to call without i_mutex or the i_pages lock thanks to RCU,
790*4882a593Smuzhiyun  * as long as the inode doesn't go away and racy results are not a problem.
791*4882a593Smuzhiyun  */
shmem_partial_swap_usage(struct address_space * mapping,pgoff_t start,pgoff_t end)792*4882a593Smuzhiyun unsigned long shmem_partial_swap_usage(struct address_space *mapping,
793*4882a593Smuzhiyun 						pgoff_t start, pgoff_t end)
794*4882a593Smuzhiyun {
795*4882a593Smuzhiyun 	XA_STATE(xas, &mapping->i_pages, start);
796*4882a593Smuzhiyun 	struct page *page;
797*4882a593Smuzhiyun 	unsigned long swapped = 0;
798*4882a593Smuzhiyun 
799*4882a593Smuzhiyun 	rcu_read_lock();
800*4882a593Smuzhiyun 	xas_for_each(&xas, page, end - 1) {
801*4882a593Smuzhiyun 		if (xas_retry(&xas, page))
802*4882a593Smuzhiyun 			continue;
803*4882a593Smuzhiyun 		if (xa_is_value(page))
804*4882a593Smuzhiyun 			swapped++;
805*4882a593Smuzhiyun 
806*4882a593Smuzhiyun 		if (need_resched()) {
807*4882a593Smuzhiyun 			xas_pause(&xas);
808*4882a593Smuzhiyun 			cond_resched_rcu();
809*4882a593Smuzhiyun 		}
810*4882a593Smuzhiyun 	}
811*4882a593Smuzhiyun 
812*4882a593Smuzhiyun 	rcu_read_unlock();
813*4882a593Smuzhiyun 
814*4882a593Smuzhiyun 	return swapped << PAGE_SHIFT;
815*4882a593Smuzhiyun }
816*4882a593Smuzhiyun 
817*4882a593Smuzhiyun /*
818*4882a593Smuzhiyun  * Determine (in bytes) how many of the shmem object's pages mapped by the
819*4882a593Smuzhiyun  * given vma is swapped out.
820*4882a593Smuzhiyun  *
821*4882a593Smuzhiyun  * This is safe to call without i_mutex or the i_pages lock thanks to RCU,
822*4882a593Smuzhiyun  * as long as the inode doesn't go away and racy results are not a problem.
823*4882a593Smuzhiyun  */
shmem_swap_usage(struct vm_area_struct * vma)824*4882a593Smuzhiyun unsigned long shmem_swap_usage(struct vm_area_struct *vma)
825*4882a593Smuzhiyun {
826*4882a593Smuzhiyun 	struct inode *inode = file_inode(vma->vm_file);
827*4882a593Smuzhiyun 	struct shmem_inode_info *info = SHMEM_I(inode);
828*4882a593Smuzhiyun 	struct address_space *mapping = inode->i_mapping;
829*4882a593Smuzhiyun 	unsigned long swapped;
830*4882a593Smuzhiyun 
831*4882a593Smuzhiyun 	/* Be careful as we don't hold info->lock */
832*4882a593Smuzhiyun 	swapped = READ_ONCE(info->swapped);
833*4882a593Smuzhiyun 
834*4882a593Smuzhiyun 	/*
835*4882a593Smuzhiyun 	 * The easier cases are when the shmem object has nothing in swap, or
836*4882a593Smuzhiyun 	 * the vma maps it whole. Then we can simply use the stats that we
837*4882a593Smuzhiyun 	 * already track.
838*4882a593Smuzhiyun 	 */
839*4882a593Smuzhiyun 	if (!swapped)
840*4882a593Smuzhiyun 		return 0;
841*4882a593Smuzhiyun 
842*4882a593Smuzhiyun 	if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size)
843*4882a593Smuzhiyun 		return swapped << PAGE_SHIFT;
844*4882a593Smuzhiyun 
845*4882a593Smuzhiyun 	/* Here comes the more involved part */
846*4882a593Smuzhiyun 	return shmem_partial_swap_usage(mapping,
847*4882a593Smuzhiyun 			linear_page_index(vma, vma->vm_start),
848*4882a593Smuzhiyun 			linear_page_index(vma, vma->vm_end));
849*4882a593Smuzhiyun }
850*4882a593Smuzhiyun 
851*4882a593Smuzhiyun /*
852*4882a593Smuzhiyun  * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
853*4882a593Smuzhiyun  */
shmem_unlock_mapping(struct address_space * mapping)854*4882a593Smuzhiyun void shmem_unlock_mapping(struct address_space *mapping)
855*4882a593Smuzhiyun {
856*4882a593Smuzhiyun 	struct pagevec pvec;
857*4882a593Smuzhiyun 	pgoff_t indices[PAGEVEC_SIZE];
858*4882a593Smuzhiyun 	pgoff_t index = 0;
859*4882a593Smuzhiyun 
860*4882a593Smuzhiyun 	pagevec_init(&pvec);
861*4882a593Smuzhiyun 	/*
862*4882a593Smuzhiyun 	 * Minor point, but we might as well stop if someone else SHM_LOCKs it.
863*4882a593Smuzhiyun 	 */
864*4882a593Smuzhiyun 	while (!mapping_unevictable(mapping)) {
865*4882a593Smuzhiyun 		/*
866*4882a593Smuzhiyun 		 * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it
867*4882a593Smuzhiyun 		 * has finished, if it hits a row of PAGEVEC_SIZE swap entries.
868*4882a593Smuzhiyun 		 */
869*4882a593Smuzhiyun 		pvec.nr = find_get_entries(mapping, index,
870*4882a593Smuzhiyun 					   PAGEVEC_SIZE, pvec.pages, indices);
871*4882a593Smuzhiyun 		if (!pvec.nr)
872*4882a593Smuzhiyun 			break;
873*4882a593Smuzhiyun 		index = indices[pvec.nr - 1] + 1;
874*4882a593Smuzhiyun 		pagevec_remove_exceptionals(&pvec);
875*4882a593Smuzhiyun 		check_move_unevictable_pages(&pvec);
876*4882a593Smuzhiyun 		pagevec_release(&pvec);
877*4882a593Smuzhiyun 		cond_resched();
878*4882a593Smuzhiyun 	}
879*4882a593Smuzhiyun }
880*4882a593Smuzhiyun 
881*4882a593Smuzhiyun /*
882*4882a593Smuzhiyun  * Check whether a hole-punch or truncation needs to split a huge page,
883*4882a593Smuzhiyun  * returning true if no split was required, or the split has been successful.
884*4882a593Smuzhiyun  *
885*4882a593Smuzhiyun  * Eviction (or truncation to 0 size) should never need to split a huge page;
886*4882a593Smuzhiyun  * but in rare cases might do so, if shmem_undo_range() failed to trylock on
887*4882a593Smuzhiyun  * head, and then succeeded to trylock on tail.
888*4882a593Smuzhiyun  *
889*4882a593Smuzhiyun  * A split can only succeed when there are no additional references on the
890*4882a593Smuzhiyun  * huge page: so the split below relies upon find_get_entries() having stopped
891*4882a593Smuzhiyun  * when it found a subpage of the huge page, without getting further references.
892*4882a593Smuzhiyun  */
shmem_punch_compound(struct page * page,pgoff_t start,pgoff_t end)893*4882a593Smuzhiyun static bool shmem_punch_compound(struct page *page, pgoff_t start, pgoff_t end)
894*4882a593Smuzhiyun {
895*4882a593Smuzhiyun 	if (!PageTransCompound(page))
896*4882a593Smuzhiyun 		return true;
897*4882a593Smuzhiyun 
898*4882a593Smuzhiyun 	/* Just proceed to delete a huge page wholly within the range punched */
899*4882a593Smuzhiyun 	if (PageHead(page) &&
900*4882a593Smuzhiyun 	    page->index >= start && page->index + HPAGE_PMD_NR <= end)
901*4882a593Smuzhiyun 		return true;
902*4882a593Smuzhiyun 
903*4882a593Smuzhiyun 	/* Try to split huge page, so we can truly punch the hole or truncate */
904*4882a593Smuzhiyun 	return split_huge_page(page) >= 0;
905*4882a593Smuzhiyun }
906*4882a593Smuzhiyun 
907*4882a593Smuzhiyun /*
908*4882a593Smuzhiyun  * Remove range of pages and swap entries from page cache, and free them.
909*4882a593Smuzhiyun  * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate.
910*4882a593Smuzhiyun  */
shmem_undo_range(struct inode * inode,loff_t lstart,loff_t lend,bool unfalloc)911*4882a593Smuzhiyun static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
912*4882a593Smuzhiyun 								 bool unfalloc)
913*4882a593Smuzhiyun {
914*4882a593Smuzhiyun 	struct address_space *mapping = inode->i_mapping;
915*4882a593Smuzhiyun 	struct shmem_inode_info *info = SHMEM_I(inode);
916*4882a593Smuzhiyun 	pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
917*4882a593Smuzhiyun 	pgoff_t end = (lend + 1) >> PAGE_SHIFT;
918*4882a593Smuzhiyun 	unsigned int partial_start = lstart & (PAGE_SIZE - 1);
919*4882a593Smuzhiyun 	unsigned int partial_end = (lend + 1) & (PAGE_SIZE - 1);
920*4882a593Smuzhiyun 	struct pagevec pvec;
921*4882a593Smuzhiyun 	pgoff_t indices[PAGEVEC_SIZE];
922*4882a593Smuzhiyun 	long nr_swaps_freed = 0;
923*4882a593Smuzhiyun 	pgoff_t index;
924*4882a593Smuzhiyun 	int i;
925*4882a593Smuzhiyun 
926*4882a593Smuzhiyun 	if (lend == -1)
927*4882a593Smuzhiyun 		end = -1;	/* unsigned, so actually very big */
928*4882a593Smuzhiyun 
929*4882a593Smuzhiyun 	pagevec_init(&pvec);
930*4882a593Smuzhiyun 	index = start;
931*4882a593Smuzhiyun 	while (index < end) {
932*4882a593Smuzhiyun 		pvec.nr = find_get_entries(mapping, index,
933*4882a593Smuzhiyun 			min(end - index, (pgoff_t)PAGEVEC_SIZE),
934*4882a593Smuzhiyun 			pvec.pages, indices);
935*4882a593Smuzhiyun 		if (!pvec.nr)
936*4882a593Smuzhiyun 			break;
937*4882a593Smuzhiyun 		for (i = 0; i < pagevec_count(&pvec); i++) {
938*4882a593Smuzhiyun 			struct page *page = pvec.pages[i];
939*4882a593Smuzhiyun 
940*4882a593Smuzhiyun 			index = indices[i];
941*4882a593Smuzhiyun 			if (index >= end)
942*4882a593Smuzhiyun 				break;
943*4882a593Smuzhiyun 
944*4882a593Smuzhiyun 			if (xa_is_value(page)) {
945*4882a593Smuzhiyun 				if (unfalloc)
946*4882a593Smuzhiyun 					continue;
947*4882a593Smuzhiyun 				nr_swaps_freed += !shmem_free_swap(mapping,
948*4882a593Smuzhiyun 								index, page);
949*4882a593Smuzhiyun 				continue;
950*4882a593Smuzhiyun 			}
951*4882a593Smuzhiyun 
952*4882a593Smuzhiyun 			VM_BUG_ON_PAGE(page_to_pgoff(page) != index, page);
953*4882a593Smuzhiyun 
954*4882a593Smuzhiyun 			if (!trylock_page(page))
955*4882a593Smuzhiyun 				continue;
956*4882a593Smuzhiyun 
957*4882a593Smuzhiyun 			if ((!unfalloc || !PageUptodate(page)) &&
958*4882a593Smuzhiyun 			    page_mapping(page) == mapping) {
959*4882a593Smuzhiyun 				VM_BUG_ON_PAGE(PageWriteback(page), page);
960*4882a593Smuzhiyun 				if (shmem_punch_compound(page, start, end))
961*4882a593Smuzhiyun 					truncate_inode_page(mapping, page);
962*4882a593Smuzhiyun 			}
963*4882a593Smuzhiyun 			unlock_page(page);
964*4882a593Smuzhiyun 		}
965*4882a593Smuzhiyun 		pagevec_remove_exceptionals(&pvec);
966*4882a593Smuzhiyun 		pagevec_release(&pvec);
967*4882a593Smuzhiyun 		cond_resched();
968*4882a593Smuzhiyun 		index++;
969*4882a593Smuzhiyun 	}
970*4882a593Smuzhiyun 
971*4882a593Smuzhiyun 	if (partial_start) {
972*4882a593Smuzhiyun 		struct page *page = NULL;
973*4882a593Smuzhiyun 		shmem_getpage(inode, start - 1, &page, SGP_READ);
974*4882a593Smuzhiyun 		if (page) {
975*4882a593Smuzhiyun 			unsigned int top = PAGE_SIZE;
976*4882a593Smuzhiyun 			if (start > end) {
977*4882a593Smuzhiyun 				top = partial_end;
978*4882a593Smuzhiyun 				partial_end = 0;
979*4882a593Smuzhiyun 			}
980*4882a593Smuzhiyun 			zero_user_segment(page, partial_start, top);
981*4882a593Smuzhiyun 			set_page_dirty(page);
982*4882a593Smuzhiyun 			unlock_page(page);
983*4882a593Smuzhiyun 			put_page(page);
984*4882a593Smuzhiyun 		}
985*4882a593Smuzhiyun 	}
986*4882a593Smuzhiyun 	if (partial_end) {
987*4882a593Smuzhiyun 		struct page *page = NULL;
988*4882a593Smuzhiyun 		shmem_getpage(inode, end, &page, SGP_READ);
989*4882a593Smuzhiyun 		if (page) {
990*4882a593Smuzhiyun 			zero_user_segment(page, 0, partial_end);
991*4882a593Smuzhiyun 			set_page_dirty(page);
992*4882a593Smuzhiyun 			unlock_page(page);
993*4882a593Smuzhiyun 			put_page(page);
994*4882a593Smuzhiyun 		}
995*4882a593Smuzhiyun 	}
996*4882a593Smuzhiyun 	if (start >= end)
997*4882a593Smuzhiyun 		return;
998*4882a593Smuzhiyun 
999*4882a593Smuzhiyun 	index = start;
1000*4882a593Smuzhiyun 	while (index < end) {
1001*4882a593Smuzhiyun 		cond_resched();
1002*4882a593Smuzhiyun 
1003*4882a593Smuzhiyun 		pvec.nr = find_get_entries(mapping, index,
1004*4882a593Smuzhiyun 				min(end - index, (pgoff_t)PAGEVEC_SIZE),
1005*4882a593Smuzhiyun 				pvec.pages, indices);
1006*4882a593Smuzhiyun 		if (!pvec.nr) {
1007*4882a593Smuzhiyun 			/* If all gone or hole-punch or unfalloc, we're done */
1008*4882a593Smuzhiyun 			if (index == start || end != -1)
1009*4882a593Smuzhiyun 				break;
1010*4882a593Smuzhiyun 			/* But if truncating, restart to make sure all gone */
1011*4882a593Smuzhiyun 			index = start;
1012*4882a593Smuzhiyun 			continue;
1013*4882a593Smuzhiyun 		}
1014*4882a593Smuzhiyun 		for (i = 0; i < pagevec_count(&pvec); i++) {
1015*4882a593Smuzhiyun 			struct page *page = pvec.pages[i];
1016*4882a593Smuzhiyun 
1017*4882a593Smuzhiyun 			index = indices[i];
1018*4882a593Smuzhiyun 			if (index >= end)
1019*4882a593Smuzhiyun 				break;
1020*4882a593Smuzhiyun 
1021*4882a593Smuzhiyun 			if (xa_is_value(page)) {
1022*4882a593Smuzhiyun 				if (unfalloc)
1023*4882a593Smuzhiyun 					continue;
1024*4882a593Smuzhiyun 				if (shmem_free_swap(mapping, index, page)) {
1025*4882a593Smuzhiyun 					/* Swap was replaced by page: retry */
1026*4882a593Smuzhiyun 					index--;
1027*4882a593Smuzhiyun 					break;
1028*4882a593Smuzhiyun 				}
1029*4882a593Smuzhiyun 				nr_swaps_freed++;
1030*4882a593Smuzhiyun 				continue;
1031*4882a593Smuzhiyun 			}
1032*4882a593Smuzhiyun 
1033*4882a593Smuzhiyun 			lock_page(page);
1034*4882a593Smuzhiyun 
1035*4882a593Smuzhiyun 			if (!unfalloc || !PageUptodate(page)) {
1036*4882a593Smuzhiyun 				if (page_mapping(page) != mapping) {
1037*4882a593Smuzhiyun 					/* Page was replaced by swap: retry */
1038*4882a593Smuzhiyun 					unlock_page(page);
1039*4882a593Smuzhiyun 					index--;
1040*4882a593Smuzhiyun 					break;
1041*4882a593Smuzhiyun 				}
1042*4882a593Smuzhiyun 				VM_BUG_ON_PAGE(PageWriteback(page), page);
1043*4882a593Smuzhiyun 				if (shmem_punch_compound(page, start, end))
1044*4882a593Smuzhiyun 					truncate_inode_page(mapping, page);
1045*4882a593Smuzhiyun 				else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
1046*4882a593Smuzhiyun 					/* Wipe the page and don't get stuck */
1047*4882a593Smuzhiyun 					clear_highpage(page);
1048*4882a593Smuzhiyun 					flush_dcache_page(page);
1049*4882a593Smuzhiyun 					set_page_dirty(page);
1050*4882a593Smuzhiyun 					if (index <
1051*4882a593Smuzhiyun 					    round_up(start, HPAGE_PMD_NR))
1052*4882a593Smuzhiyun 						start = index + 1;
1053*4882a593Smuzhiyun 				}
1054*4882a593Smuzhiyun 			}
1055*4882a593Smuzhiyun 			unlock_page(page);
1056*4882a593Smuzhiyun 		}
1057*4882a593Smuzhiyun 		pagevec_remove_exceptionals(&pvec);
1058*4882a593Smuzhiyun 		pagevec_release(&pvec);
1059*4882a593Smuzhiyun 		index++;
1060*4882a593Smuzhiyun 	}
1061*4882a593Smuzhiyun 
1062*4882a593Smuzhiyun 	spin_lock_irq(&info->lock);
1063*4882a593Smuzhiyun 	info->swapped -= nr_swaps_freed;
1064*4882a593Smuzhiyun 	shmem_recalc_inode(inode);
1065*4882a593Smuzhiyun 	spin_unlock_irq(&info->lock);
1066*4882a593Smuzhiyun }
1067*4882a593Smuzhiyun 
shmem_truncate_range(struct inode * inode,loff_t lstart,loff_t lend)1068*4882a593Smuzhiyun void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
1069*4882a593Smuzhiyun {
1070*4882a593Smuzhiyun 	shmem_undo_range(inode, lstart, lend, false);
1071*4882a593Smuzhiyun 	inode->i_ctime = inode->i_mtime = current_time(inode);
1072*4882a593Smuzhiyun }
1073*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(shmem_truncate_range);
1074*4882a593Smuzhiyun 
shmem_getattr(const struct path * path,struct kstat * stat,u32 request_mask,unsigned int query_flags)1075*4882a593Smuzhiyun static int shmem_getattr(const struct path *path, struct kstat *stat,
1076*4882a593Smuzhiyun 			 u32 request_mask, unsigned int query_flags)
1077*4882a593Smuzhiyun {
1078*4882a593Smuzhiyun 	struct inode *inode = path->dentry->d_inode;
1079*4882a593Smuzhiyun 	struct shmem_inode_info *info = SHMEM_I(inode);
1080*4882a593Smuzhiyun 	struct shmem_sb_info *sb_info = SHMEM_SB(inode->i_sb);
1081*4882a593Smuzhiyun 
1082*4882a593Smuzhiyun 	if (info->alloced - info->swapped != inode->i_mapping->nrpages) {
1083*4882a593Smuzhiyun 		spin_lock_irq(&info->lock);
1084*4882a593Smuzhiyun 		shmem_recalc_inode(inode);
1085*4882a593Smuzhiyun 		spin_unlock_irq(&info->lock);
1086*4882a593Smuzhiyun 	}
1087*4882a593Smuzhiyun 	generic_fillattr(inode, stat);
1088*4882a593Smuzhiyun 
1089*4882a593Smuzhiyun 	if (is_huge_enabled(sb_info))
1090*4882a593Smuzhiyun 		stat->blksize = HPAGE_PMD_SIZE;
1091*4882a593Smuzhiyun 
1092*4882a593Smuzhiyun 	return 0;
1093*4882a593Smuzhiyun }
1094*4882a593Smuzhiyun 
shmem_setattr(struct dentry * dentry,struct iattr * attr)1095*4882a593Smuzhiyun static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
1096*4882a593Smuzhiyun {
1097*4882a593Smuzhiyun 	struct inode *inode = d_inode(dentry);
1098*4882a593Smuzhiyun 	struct shmem_inode_info *info = SHMEM_I(inode);
1099*4882a593Smuzhiyun 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1100*4882a593Smuzhiyun 	int error;
1101*4882a593Smuzhiyun 
1102*4882a593Smuzhiyun 	error = setattr_prepare(dentry, attr);
1103*4882a593Smuzhiyun 	if (error)
1104*4882a593Smuzhiyun 		return error;
1105*4882a593Smuzhiyun 
1106*4882a593Smuzhiyun 	if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
1107*4882a593Smuzhiyun 		loff_t oldsize = inode->i_size;
1108*4882a593Smuzhiyun 		loff_t newsize = attr->ia_size;
1109*4882a593Smuzhiyun 
1110*4882a593Smuzhiyun 		/* protected by i_mutex */
1111*4882a593Smuzhiyun 		if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
1112*4882a593Smuzhiyun 		    (newsize > oldsize && (info->seals & F_SEAL_GROW)))
1113*4882a593Smuzhiyun 			return -EPERM;
1114*4882a593Smuzhiyun 
1115*4882a593Smuzhiyun 		if (newsize != oldsize) {
1116*4882a593Smuzhiyun 			error = shmem_reacct_size(SHMEM_I(inode)->flags,
1117*4882a593Smuzhiyun 					oldsize, newsize);
1118*4882a593Smuzhiyun 			if (error)
1119*4882a593Smuzhiyun 				return error;
1120*4882a593Smuzhiyun 			i_size_write(inode, newsize);
1121*4882a593Smuzhiyun 			inode->i_ctime = inode->i_mtime = current_time(inode);
1122*4882a593Smuzhiyun 		}
1123*4882a593Smuzhiyun 		if (newsize <= oldsize) {
1124*4882a593Smuzhiyun 			loff_t holebegin = round_up(newsize, PAGE_SIZE);
1125*4882a593Smuzhiyun 			if (oldsize > holebegin)
1126*4882a593Smuzhiyun 				unmap_mapping_range(inode->i_mapping,
1127*4882a593Smuzhiyun 							holebegin, 0, 1);
1128*4882a593Smuzhiyun 			if (info->alloced)
1129*4882a593Smuzhiyun 				shmem_truncate_range(inode,
1130*4882a593Smuzhiyun 							newsize, (loff_t)-1);
1131*4882a593Smuzhiyun 			/* unmap again to remove racily COWed private pages */
1132*4882a593Smuzhiyun 			if (oldsize > holebegin)
1133*4882a593Smuzhiyun 				unmap_mapping_range(inode->i_mapping,
1134*4882a593Smuzhiyun 							holebegin, 0, 1);
1135*4882a593Smuzhiyun 
1136*4882a593Smuzhiyun 			/*
1137*4882a593Smuzhiyun 			 * Part of the huge page can be beyond i_size: subject
1138*4882a593Smuzhiyun 			 * to shrink under memory pressure.
1139*4882a593Smuzhiyun 			 */
1140*4882a593Smuzhiyun 			if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
1141*4882a593Smuzhiyun 				spin_lock(&sbinfo->shrinklist_lock);
1142*4882a593Smuzhiyun 				/*
1143*4882a593Smuzhiyun 				 * _careful to defend against unlocked access to
1144*4882a593Smuzhiyun 				 * ->shrink_list in shmem_unused_huge_shrink()
1145*4882a593Smuzhiyun 				 */
1146*4882a593Smuzhiyun 				if (list_empty_careful(&info->shrinklist)) {
1147*4882a593Smuzhiyun 					list_add_tail(&info->shrinklist,
1148*4882a593Smuzhiyun 							&sbinfo->shrinklist);
1149*4882a593Smuzhiyun 					sbinfo->shrinklist_len++;
1150*4882a593Smuzhiyun 				}
1151*4882a593Smuzhiyun 				spin_unlock(&sbinfo->shrinklist_lock);
1152*4882a593Smuzhiyun 			}
1153*4882a593Smuzhiyun 		}
1154*4882a593Smuzhiyun 	}
1155*4882a593Smuzhiyun 
1156*4882a593Smuzhiyun 	setattr_copy(inode, attr);
1157*4882a593Smuzhiyun 	if (attr->ia_valid & ATTR_MODE)
1158*4882a593Smuzhiyun 		error = posix_acl_chmod(inode, inode->i_mode);
1159*4882a593Smuzhiyun 	return error;
1160*4882a593Smuzhiyun }
1161*4882a593Smuzhiyun 
shmem_evict_inode(struct inode * inode)1162*4882a593Smuzhiyun static void shmem_evict_inode(struct inode *inode)
1163*4882a593Smuzhiyun {
1164*4882a593Smuzhiyun 	struct shmem_inode_info *info = SHMEM_I(inode);
1165*4882a593Smuzhiyun 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1166*4882a593Smuzhiyun 
1167*4882a593Smuzhiyun 	if (inode->i_mapping->a_ops == &shmem_aops) {
1168*4882a593Smuzhiyun 		shmem_unacct_size(info->flags, inode->i_size);
1169*4882a593Smuzhiyun 		inode->i_size = 0;
1170*4882a593Smuzhiyun 		shmem_truncate_range(inode, 0, (loff_t)-1);
1171*4882a593Smuzhiyun 		if (!list_empty(&info->shrinklist)) {
1172*4882a593Smuzhiyun 			spin_lock(&sbinfo->shrinklist_lock);
1173*4882a593Smuzhiyun 			if (!list_empty(&info->shrinklist)) {
1174*4882a593Smuzhiyun 				list_del_init(&info->shrinklist);
1175*4882a593Smuzhiyun 				sbinfo->shrinklist_len--;
1176*4882a593Smuzhiyun 			}
1177*4882a593Smuzhiyun 			spin_unlock(&sbinfo->shrinklist_lock);
1178*4882a593Smuzhiyun 		}
1179*4882a593Smuzhiyun 		while (!list_empty(&info->swaplist)) {
1180*4882a593Smuzhiyun 			/* Wait while shmem_unuse() is scanning this inode... */
1181*4882a593Smuzhiyun 			wait_var_event(&info->stop_eviction,
1182*4882a593Smuzhiyun 				       !atomic_read(&info->stop_eviction));
1183*4882a593Smuzhiyun 			mutex_lock(&shmem_swaplist_mutex);
1184*4882a593Smuzhiyun 			/* ...but beware of the race if we peeked too early */
1185*4882a593Smuzhiyun 			if (!atomic_read(&info->stop_eviction))
1186*4882a593Smuzhiyun 				list_del_init(&info->swaplist);
1187*4882a593Smuzhiyun 			mutex_unlock(&shmem_swaplist_mutex);
1188*4882a593Smuzhiyun 		}
1189*4882a593Smuzhiyun 	}
1190*4882a593Smuzhiyun 
1191*4882a593Smuzhiyun 	simple_xattrs_free(&info->xattrs);
1192*4882a593Smuzhiyun 	WARN_ON(inode->i_blocks);
1193*4882a593Smuzhiyun 	shmem_free_inode(inode->i_sb);
1194*4882a593Smuzhiyun 	clear_inode(inode);
1195*4882a593Smuzhiyun }
1196*4882a593Smuzhiyun 
1197*4882a593Smuzhiyun extern struct swap_info_struct *swap_info[];
1198*4882a593Smuzhiyun 
shmem_find_swap_entries(struct address_space * mapping,pgoff_t start,unsigned int nr_entries,struct page ** entries,pgoff_t * indices,unsigned int type,bool frontswap)1199*4882a593Smuzhiyun static int shmem_find_swap_entries(struct address_space *mapping,
1200*4882a593Smuzhiyun 				   pgoff_t start, unsigned int nr_entries,
1201*4882a593Smuzhiyun 				   struct page **entries, pgoff_t *indices,
1202*4882a593Smuzhiyun 				   unsigned int type, bool frontswap)
1203*4882a593Smuzhiyun {
1204*4882a593Smuzhiyun 	XA_STATE(xas, &mapping->i_pages, start);
1205*4882a593Smuzhiyun 	struct page *page;
1206*4882a593Smuzhiyun 	swp_entry_t entry;
1207*4882a593Smuzhiyun 	unsigned int ret = 0;
1208*4882a593Smuzhiyun 
1209*4882a593Smuzhiyun 	if (!nr_entries)
1210*4882a593Smuzhiyun 		return 0;
1211*4882a593Smuzhiyun 
1212*4882a593Smuzhiyun 	rcu_read_lock();
1213*4882a593Smuzhiyun 	xas_for_each(&xas, page, ULONG_MAX) {
1214*4882a593Smuzhiyun 		if (xas_retry(&xas, page))
1215*4882a593Smuzhiyun 			continue;
1216*4882a593Smuzhiyun 
1217*4882a593Smuzhiyun 		if (!xa_is_value(page))
1218*4882a593Smuzhiyun 			continue;
1219*4882a593Smuzhiyun 
1220*4882a593Smuzhiyun 		entry = radix_to_swp_entry(page);
1221*4882a593Smuzhiyun 		if (swp_type(entry) != type)
1222*4882a593Smuzhiyun 			continue;
1223*4882a593Smuzhiyun 		if (frontswap &&
1224*4882a593Smuzhiyun 		    !frontswap_test(swap_info[type], swp_offset(entry)))
1225*4882a593Smuzhiyun 			continue;
1226*4882a593Smuzhiyun 
1227*4882a593Smuzhiyun 		indices[ret] = xas.xa_index;
1228*4882a593Smuzhiyun 		entries[ret] = page;
1229*4882a593Smuzhiyun 
1230*4882a593Smuzhiyun 		if (need_resched()) {
1231*4882a593Smuzhiyun 			xas_pause(&xas);
1232*4882a593Smuzhiyun 			cond_resched_rcu();
1233*4882a593Smuzhiyun 		}
1234*4882a593Smuzhiyun 		if (++ret == nr_entries)
1235*4882a593Smuzhiyun 			break;
1236*4882a593Smuzhiyun 	}
1237*4882a593Smuzhiyun 	rcu_read_unlock();
1238*4882a593Smuzhiyun 
1239*4882a593Smuzhiyun 	return ret;
1240*4882a593Smuzhiyun }
1241*4882a593Smuzhiyun 
1242*4882a593Smuzhiyun /*
1243*4882a593Smuzhiyun  * Move the swapped pages for an inode to page cache. Returns the count
1244*4882a593Smuzhiyun  * of pages swapped in, or the error in case of failure.
1245*4882a593Smuzhiyun  */
shmem_unuse_swap_entries(struct inode * inode,struct pagevec pvec,pgoff_t * indices)1246*4882a593Smuzhiyun static int shmem_unuse_swap_entries(struct inode *inode, struct pagevec pvec,
1247*4882a593Smuzhiyun 				    pgoff_t *indices)
1248*4882a593Smuzhiyun {
1249*4882a593Smuzhiyun 	int i = 0;
1250*4882a593Smuzhiyun 	int ret = 0;
1251*4882a593Smuzhiyun 	int error = 0;
1252*4882a593Smuzhiyun 	struct address_space *mapping = inode->i_mapping;
1253*4882a593Smuzhiyun 
1254*4882a593Smuzhiyun 	for (i = 0; i < pvec.nr; i++) {
1255*4882a593Smuzhiyun 		struct page *page = pvec.pages[i];
1256*4882a593Smuzhiyun 
1257*4882a593Smuzhiyun 		if (!xa_is_value(page))
1258*4882a593Smuzhiyun 			continue;
1259*4882a593Smuzhiyun 		error = shmem_swapin_page(inode, indices[i],
1260*4882a593Smuzhiyun 					  &page, SGP_CACHE,
1261*4882a593Smuzhiyun 					  mapping_gfp_mask(mapping),
1262*4882a593Smuzhiyun 					  NULL, NULL);
1263*4882a593Smuzhiyun 		if (error == 0) {
1264*4882a593Smuzhiyun 			unlock_page(page);
1265*4882a593Smuzhiyun 			put_page(page);
1266*4882a593Smuzhiyun 			ret++;
1267*4882a593Smuzhiyun 		}
1268*4882a593Smuzhiyun 		if (error == -ENOMEM)
1269*4882a593Smuzhiyun 			break;
1270*4882a593Smuzhiyun 		error = 0;
1271*4882a593Smuzhiyun 	}
1272*4882a593Smuzhiyun 	return error ? error : ret;
1273*4882a593Smuzhiyun }
1274*4882a593Smuzhiyun 
1275*4882a593Smuzhiyun /*
1276*4882a593Smuzhiyun  * If swap found in inode, free it and move page from swapcache to filecache.
1277*4882a593Smuzhiyun  */
shmem_unuse_inode(struct inode * inode,unsigned int type,bool frontswap,unsigned long * fs_pages_to_unuse)1278*4882a593Smuzhiyun static int shmem_unuse_inode(struct inode *inode, unsigned int type,
1279*4882a593Smuzhiyun 			     bool frontswap, unsigned long *fs_pages_to_unuse)
1280*4882a593Smuzhiyun {
1281*4882a593Smuzhiyun 	struct address_space *mapping = inode->i_mapping;
1282*4882a593Smuzhiyun 	pgoff_t start = 0;
1283*4882a593Smuzhiyun 	struct pagevec pvec;
1284*4882a593Smuzhiyun 	pgoff_t indices[PAGEVEC_SIZE];
1285*4882a593Smuzhiyun 	bool frontswap_partial = (frontswap && *fs_pages_to_unuse > 0);
1286*4882a593Smuzhiyun 	int ret = 0;
1287*4882a593Smuzhiyun 
1288*4882a593Smuzhiyun 	pagevec_init(&pvec);
1289*4882a593Smuzhiyun 	do {
1290*4882a593Smuzhiyun 		unsigned int nr_entries = PAGEVEC_SIZE;
1291*4882a593Smuzhiyun 
1292*4882a593Smuzhiyun 		if (frontswap_partial && *fs_pages_to_unuse < PAGEVEC_SIZE)
1293*4882a593Smuzhiyun 			nr_entries = *fs_pages_to_unuse;
1294*4882a593Smuzhiyun 
1295*4882a593Smuzhiyun 		pvec.nr = shmem_find_swap_entries(mapping, start, nr_entries,
1296*4882a593Smuzhiyun 						  pvec.pages, indices,
1297*4882a593Smuzhiyun 						  type, frontswap);
1298*4882a593Smuzhiyun 		if (pvec.nr == 0) {
1299*4882a593Smuzhiyun 			ret = 0;
1300*4882a593Smuzhiyun 			break;
1301*4882a593Smuzhiyun 		}
1302*4882a593Smuzhiyun 
1303*4882a593Smuzhiyun 		ret = shmem_unuse_swap_entries(inode, pvec, indices);
1304*4882a593Smuzhiyun 		if (ret < 0)
1305*4882a593Smuzhiyun 			break;
1306*4882a593Smuzhiyun 
1307*4882a593Smuzhiyun 		if (frontswap_partial) {
1308*4882a593Smuzhiyun 			*fs_pages_to_unuse -= ret;
1309*4882a593Smuzhiyun 			if (*fs_pages_to_unuse == 0) {
1310*4882a593Smuzhiyun 				ret = FRONTSWAP_PAGES_UNUSED;
1311*4882a593Smuzhiyun 				break;
1312*4882a593Smuzhiyun 			}
1313*4882a593Smuzhiyun 		}
1314*4882a593Smuzhiyun 
1315*4882a593Smuzhiyun 		start = indices[pvec.nr - 1];
1316*4882a593Smuzhiyun 	} while (true);
1317*4882a593Smuzhiyun 
1318*4882a593Smuzhiyun 	return ret;
1319*4882a593Smuzhiyun }
1320*4882a593Smuzhiyun 
1321*4882a593Smuzhiyun /*
1322*4882a593Smuzhiyun  * Read all the shared memory data that resides in the swap
1323*4882a593Smuzhiyun  * device 'type' back into memory, so the swap device can be
1324*4882a593Smuzhiyun  * unused.
1325*4882a593Smuzhiyun  */
shmem_unuse(unsigned int type,bool frontswap,unsigned long * fs_pages_to_unuse)1326*4882a593Smuzhiyun int shmem_unuse(unsigned int type, bool frontswap,
1327*4882a593Smuzhiyun 		unsigned long *fs_pages_to_unuse)
1328*4882a593Smuzhiyun {
1329*4882a593Smuzhiyun 	struct shmem_inode_info *info, *next;
1330*4882a593Smuzhiyun 	int error = 0;
1331*4882a593Smuzhiyun 
1332*4882a593Smuzhiyun 	if (list_empty(&shmem_swaplist))
1333*4882a593Smuzhiyun 		return 0;
1334*4882a593Smuzhiyun 
1335*4882a593Smuzhiyun 	mutex_lock(&shmem_swaplist_mutex);
1336*4882a593Smuzhiyun 	list_for_each_entry_safe(info, next, &shmem_swaplist, swaplist) {
1337*4882a593Smuzhiyun 		if (!info->swapped) {
1338*4882a593Smuzhiyun 			list_del_init(&info->swaplist);
1339*4882a593Smuzhiyun 			continue;
1340*4882a593Smuzhiyun 		}
1341*4882a593Smuzhiyun 		/*
1342*4882a593Smuzhiyun 		 * Drop the swaplist mutex while searching the inode for swap;
1343*4882a593Smuzhiyun 		 * but before doing so, make sure shmem_evict_inode() will not
1344*4882a593Smuzhiyun 		 * remove placeholder inode from swaplist, nor let it be freed
1345*4882a593Smuzhiyun 		 * (igrab() would protect from unlink, but not from unmount).
1346*4882a593Smuzhiyun 		 */
1347*4882a593Smuzhiyun 		atomic_inc(&info->stop_eviction);
1348*4882a593Smuzhiyun 		mutex_unlock(&shmem_swaplist_mutex);
1349*4882a593Smuzhiyun 
1350*4882a593Smuzhiyun 		error = shmem_unuse_inode(&info->vfs_inode, type, frontswap,
1351*4882a593Smuzhiyun 					  fs_pages_to_unuse);
1352*4882a593Smuzhiyun 		cond_resched();
1353*4882a593Smuzhiyun 
1354*4882a593Smuzhiyun 		mutex_lock(&shmem_swaplist_mutex);
1355*4882a593Smuzhiyun 		next = list_next_entry(info, swaplist);
1356*4882a593Smuzhiyun 		if (!info->swapped)
1357*4882a593Smuzhiyun 			list_del_init(&info->swaplist);
1358*4882a593Smuzhiyun 		if (atomic_dec_and_test(&info->stop_eviction))
1359*4882a593Smuzhiyun 			wake_up_var(&info->stop_eviction);
1360*4882a593Smuzhiyun 		if (error)
1361*4882a593Smuzhiyun 			break;
1362*4882a593Smuzhiyun 	}
1363*4882a593Smuzhiyun 	mutex_unlock(&shmem_swaplist_mutex);
1364*4882a593Smuzhiyun 
1365*4882a593Smuzhiyun 	return error;
1366*4882a593Smuzhiyun }
1367*4882a593Smuzhiyun 
1368*4882a593Smuzhiyun /*
1369*4882a593Smuzhiyun  * Move the page from the page cache to the swap cache.
1370*4882a593Smuzhiyun  */
shmem_writepage(struct page * page,struct writeback_control * wbc)1371*4882a593Smuzhiyun static int shmem_writepage(struct page *page, struct writeback_control *wbc)
1372*4882a593Smuzhiyun {
1373*4882a593Smuzhiyun 	struct shmem_inode_info *info;
1374*4882a593Smuzhiyun 	struct address_space *mapping;
1375*4882a593Smuzhiyun 	struct inode *inode;
1376*4882a593Smuzhiyun 	swp_entry_t swap;
1377*4882a593Smuzhiyun 	pgoff_t index;
1378*4882a593Smuzhiyun 
1379*4882a593Smuzhiyun 	VM_BUG_ON_PAGE(PageCompound(page), page);
1380*4882a593Smuzhiyun 	BUG_ON(!PageLocked(page));
1381*4882a593Smuzhiyun 	mapping = page->mapping;
1382*4882a593Smuzhiyun 	index = page->index;
1383*4882a593Smuzhiyun 	inode = mapping->host;
1384*4882a593Smuzhiyun 	info = SHMEM_I(inode);
1385*4882a593Smuzhiyun 	if (info->flags & VM_LOCKED)
1386*4882a593Smuzhiyun 		goto redirty;
1387*4882a593Smuzhiyun 	if (!total_swap_pages)
1388*4882a593Smuzhiyun 		goto redirty;
1389*4882a593Smuzhiyun 
1390*4882a593Smuzhiyun 	/*
1391*4882a593Smuzhiyun 	 * Our capabilities prevent regular writeback or sync from ever calling
1392*4882a593Smuzhiyun 	 * shmem_writepage; but a stacking filesystem might use ->writepage of
1393*4882a593Smuzhiyun 	 * its underlying filesystem, in which case tmpfs should write out to
1394*4882a593Smuzhiyun 	 * swap only in response to memory pressure, and not for the writeback
1395*4882a593Smuzhiyun 	 * threads or sync.
1396*4882a593Smuzhiyun 	 */
1397*4882a593Smuzhiyun 	if (!wbc->for_reclaim) {
1398*4882a593Smuzhiyun 		WARN_ON_ONCE(1);	/* Still happens? Tell us about it! */
1399*4882a593Smuzhiyun 		goto redirty;
1400*4882a593Smuzhiyun 	}
1401*4882a593Smuzhiyun 
1402*4882a593Smuzhiyun 	/*
1403*4882a593Smuzhiyun 	 * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC
1404*4882a593Smuzhiyun 	 * value into swapfile.c, the only way we can correctly account for a
1405*4882a593Smuzhiyun 	 * fallocated page arriving here is now to initialize it and write it.
1406*4882a593Smuzhiyun 	 *
1407*4882a593Smuzhiyun 	 * That's okay for a page already fallocated earlier, but if we have
1408*4882a593Smuzhiyun 	 * not yet completed the fallocation, then (a) we want to keep track
1409*4882a593Smuzhiyun 	 * of this page in case we have to undo it, and (b) it may not be a
1410*4882a593Smuzhiyun 	 * good idea to continue anyway, once we're pushing into swap.  So
1411*4882a593Smuzhiyun 	 * reactivate the page, and let shmem_fallocate() quit when too many.
1412*4882a593Smuzhiyun 	 */
1413*4882a593Smuzhiyun 	if (!PageUptodate(page)) {
1414*4882a593Smuzhiyun 		if (inode->i_private) {
1415*4882a593Smuzhiyun 			struct shmem_falloc *shmem_falloc;
1416*4882a593Smuzhiyun 			spin_lock(&inode->i_lock);
1417*4882a593Smuzhiyun 			shmem_falloc = inode->i_private;
1418*4882a593Smuzhiyun 			if (shmem_falloc &&
1419*4882a593Smuzhiyun 			    !shmem_falloc->waitq &&
1420*4882a593Smuzhiyun 			    index >= shmem_falloc->start &&
1421*4882a593Smuzhiyun 			    index < shmem_falloc->next)
1422*4882a593Smuzhiyun 				shmem_falloc->nr_unswapped++;
1423*4882a593Smuzhiyun 			else
1424*4882a593Smuzhiyun 				shmem_falloc = NULL;
1425*4882a593Smuzhiyun 			spin_unlock(&inode->i_lock);
1426*4882a593Smuzhiyun 			if (shmem_falloc)
1427*4882a593Smuzhiyun 				goto redirty;
1428*4882a593Smuzhiyun 		}
1429*4882a593Smuzhiyun 		clear_highpage(page);
1430*4882a593Smuzhiyun 		flush_dcache_page(page);
1431*4882a593Smuzhiyun 		SetPageUptodate(page);
1432*4882a593Smuzhiyun 	}
1433*4882a593Smuzhiyun 
1434*4882a593Smuzhiyun 	trace_android_vh_set_shmem_page_flag(page);
1435*4882a593Smuzhiyun 	swap = get_swap_page(page);
1436*4882a593Smuzhiyun 	if (!swap.val)
1437*4882a593Smuzhiyun 		goto redirty;
1438*4882a593Smuzhiyun 
1439*4882a593Smuzhiyun 	/*
1440*4882a593Smuzhiyun 	 * Add inode to shmem_unuse()'s list of swapped-out inodes,
1441*4882a593Smuzhiyun 	 * if it's not already there.  Do it now before the page is
1442*4882a593Smuzhiyun 	 * moved to swap cache, when its pagelock no longer protects
1443*4882a593Smuzhiyun 	 * the inode from eviction.  But don't unlock the mutex until
1444*4882a593Smuzhiyun 	 * we've incremented swapped, because shmem_unuse_inode() will
1445*4882a593Smuzhiyun 	 * prune a !swapped inode from the swaplist under this mutex.
1446*4882a593Smuzhiyun 	 */
1447*4882a593Smuzhiyun 	mutex_lock(&shmem_swaplist_mutex);
1448*4882a593Smuzhiyun 	if (list_empty(&info->swaplist))
1449*4882a593Smuzhiyun 		list_add(&info->swaplist, &shmem_swaplist);
1450*4882a593Smuzhiyun 
1451*4882a593Smuzhiyun 	if (add_to_swap_cache(page, swap,
1452*4882a593Smuzhiyun 			__GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN,
1453*4882a593Smuzhiyun 			NULL) == 0) {
1454*4882a593Smuzhiyun 		spin_lock_irq(&info->lock);
1455*4882a593Smuzhiyun 		shmem_recalc_inode(inode);
1456*4882a593Smuzhiyun 		info->swapped++;
1457*4882a593Smuzhiyun 		spin_unlock_irq(&info->lock);
1458*4882a593Smuzhiyun 
1459*4882a593Smuzhiyun 		swap_shmem_alloc(swap);
1460*4882a593Smuzhiyun 		shmem_delete_from_page_cache(page, swp_to_radix_entry(swap));
1461*4882a593Smuzhiyun 
1462*4882a593Smuzhiyun 		mutex_unlock(&shmem_swaplist_mutex);
1463*4882a593Smuzhiyun 		BUG_ON(page_mapped(page));
1464*4882a593Smuzhiyun 		swap_writepage(page, wbc);
1465*4882a593Smuzhiyun 		return 0;
1466*4882a593Smuzhiyun 	}
1467*4882a593Smuzhiyun 
1468*4882a593Smuzhiyun 	mutex_unlock(&shmem_swaplist_mutex);
1469*4882a593Smuzhiyun 	put_swap_page(page, swap);
1470*4882a593Smuzhiyun redirty:
1471*4882a593Smuzhiyun 	set_page_dirty(page);
1472*4882a593Smuzhiyun 	if (wbc->for_reclaim)
1473*4882a593Smuzhiyun 		return AOP_WRITEPAGE_ACTIVATE;	/* Return with page locked */
1474*4882a593Smuzhiyun 	unlock_page(page);
1475*4882a593Smuzhiyun 	return 0;
1476*4882a593Smuzhiyun }
1477*4882a593Smuzhiyun 
1478*4882a593Smuzhiyun #if defined(CONFIG_NUMA) && defined(CONFIG_TMPFS)
shmem_show_mpol(struct seq_file * seq,struct mempolicy * mpol)1479*4882a593Smuzhiyun static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1480*4882a593Smuzhiyun {
1481*4882a593Smuzhiyun 	char buffer[64];
1482*4882a593Smuzhiyun 
1483*4882a593Smuzhiyun 	if (!mpol || mpol->mode == MPOL_DEFAULT)
1484*4882a593Smuzhiyun 		return;		/* show nothing */
1485*4882a593Smuzhiyun 
1486*4882a593Smuzhiyun 	mpol_to_str(buffer, sizeof(buffer), mpol);
1487*4882a593Smuzhiyun 
1488*4882a593Smuzhiyun 	seq_printf(seq, ",mpol=%s", buffer);
1489*4882a593Smuzhiyun }
1490*4882a593Smuzhiyun 
shmem_get_sbmpol(struct shmem_sb_info * sbinfo)1491*4882a593Smuzhiyun static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1492*4882a593Smuzhiyun {
1493*4882a593Smuzhiyun 	struct mempolicy *mpol = NULL;
1494*4882a593Smuzhiyun 	if (sbinfo->mpol) {
1495*4882a593Smuzhiyun 		spin_lock(&sbinfo->stat_lock);	/* prevent replace/use races */
1496*4882a593Smuzhiyun 		mpol = sbinfo->mpol;
1497*4882a593Smuzhiyun 		mpol_get(mpol);
1498*4882a593Smuzhiyun 		spin_unlock(&sbinfo->stat_lock);
1499*4882a593Smuzhiyun 	}
1500*4882a593Smuzhiyun 	return mpol;
1501*4882a593Smuzhiyun }
1502*4882a593Smuzhiyun #else /* !CONFIG_NUMA || !CONFIG_TMPFS */
shmem_show_mpol(struct seq_file * seq,struct mempolicy * mpol)1503*4882a593Smuzhiyun static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1504*4882a593Smuzhiyun {
1505*4882a593Smuzhiyun }
shmem_get_sbmpol(struct shmem_sb_info * sbinfo)1506*4882a593Smuzhiyun static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1507*4882a593Smuzhiyun {
1508*4882a593Smuzhiyun 	return NULL;
1509*4882a593Smuzhiyun }
1510*4882a593Smuzhiyun #endif /* CONFIG_NUMA && CONFIG_TMPFS */
1511*4882a593Smuzhiyun #ifndef CONFIG_NUMA
1512*4882a593Smuzhiyun #define vm_policy vm_private_data
1513*4882a593Smuzhiyun #endif
1514*4882a593Smuzhiyun 
shmem_pseudo_vma_init(struct vm_area_struct * vma,struct shmem_inode_info * info,pgoff_t index)1515*4882a593Smuzhiyun static void shmem_pseudo_vma_init(struct vm_area_struct *vma,
1516*4882a593Smuzhiyun 		struct shmem_inode_info *info, pgoff_t index)
1517*4882a593Smuzhiyun {
1518*4882a593Smuzhiyun 	/* Create a pseudo vma that just contains the policy */
1519*4882a593Smuzhiyun 	vma_init(vma, NULL);
1520*4882a593Smuzhiyun 	/* Bias interleave by inode number to distribute better across nodes */
1521*4882a593Smuzhiyun 	vma->vm_pgoff = index + info->vfs_inode.i_ino;
1522*4882a593Smuzhiyun 	vma->vm_policy = mpol_shared_policy_lookup(&info->policy, index);
1523*4882a593Smuzhiyun }
1524*4882a593Smuzhiyun 
shmem_pseudo_vma_destroy(struct vm_area_struct * vma)1525*4882a593Smuzhiyun static void shmem_pseudo_vma_destroy(struct vm_area_struct *vma)
1526*4882a593Smuzhiyun {
1527*4882a593Smuzhiyun 	/* Drop reference taken by mpol_shared_policy_lookup() */
1528*4882a593Smuzhiyun 	mpol_cond_put(vma->vm_policy);
1529*4882a593Smuzhiyun }
1530*4882a593Smuzhiyun 
shmem_swapin(swp_entry_t swap,gfp_t gfp,struct shmem_inode_info * info,pgoff_t index)1531*4882a593Smuzhiyun static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
1532*4882a593Smuzhiyun 			struct shmem_inode_info *info, pgoff_t index)
1533*4882a593Smuzhiyun {
1534*4882a593Smuzhiyun 	struct vm_area_struct pvma;
1535*4882a593Smuzhiyun 	struct page *page;
1536*4882a593Smuzhiyun 	struct vm_fault vmf = {
1537*4882a593Smuzhiyun 		.vma = &pvma,
1538*4882a593Smuzhiyun 	};
1539*4882a593Smuzhiyun 
1540*4882a593Smuzhiyun 	shmem_pseudo_vma_init(&pvma, info, index);
1541*4882a593Smuzhiyun 	page = swap_cluster_readahead(swap, gfp, &vmf);
1542*4882a593Smuzhiyun 	shmem_pseudo_vma_destroy(&pvma);
1543*4882a593Smuzhiyun 
1544*4882a593Smuzhiyun 	return page;
1545*4882a593Smuzhiyun }
1546*4882a593Smuzhiyun 
shmem_alloc_hugepage(gfp_t gfp,struct shmem_inode_info * info,pgoff_t index)1547*4882a593Smuzhiyun static struct page *shmem_alloc_hugepage(gfp_t gfp,
1548*4882a593Smuzhiyun 		struct shmem_inode_info *info, pgoff_t index)
1549*4882a593Smuzhiyun {
1550*4882a593Smuzhiyun 	struct vm_area_struct pvma;
1551*4882a593Smuzhiyun 	struct address_space *mapping = info->vfs_inode.i_mapping;
1552*4882a593Smuzhiyun 	pgoff_t hindex;
1553*4882a593Smuzhiyun 	struct page *page;
1554*4882a593Smuzhiyun 
1555*4882a593Smuzhiyun 	hindex = round_down(index, HPAGE_PMD_NR);
1556*4882a593Smuzhiyun 	if (xa_find(&mapping->i_pages, &hindex, hindex + HPAGE_PMD_NR - 1,
1557*4882a593Smuzhiyun 								XA_PRESENT))
1558*4882a593Smuzhiyun 		return NULL;
1559*4882a593Smuzhiyun 
1560*4882a593Smuzhiyun 	shmem_pseudo_vma_init(&pvma, info, hindex);
1561*4882a593Smuzhiyun 	page = alloc_pages_vma(gfp | __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN,
1562*4882a593Smuzhiyun 			HPAGE_PMD_ORDER, &pvma, 0, numa_node_id(), true);
1563*4882a593Smuzhiyun 	shmem_pseudo_vma_destroy(&pvma);
1564*4882a593Smuzhiyun 	if (page)
1565*4882a593Smuzhiyun 		prep_transhuge_page(page);
1566*4882a593Smuzhiyun 	else
1567*4882a593Smuzhiyun 		count_vm_event(THP_FILE_FALLBACK);
1568*4882a593Smuzhiyun 	return page;
1569*4882a593Smuzhiyun }
1570*4882a593Smuzhiyun 
shmem_alloc_page(gfp_t gfp,struct shmem_inode_info * info,pgoff_t index)1571*4882a593Smuzhiyun static struct page *shmem_alloc_page(gfp_t gfp,
1572*4882a593Smuzhiyun 			struct shmem_inode_info *info, pgoff_t index)
1573*4882a593Smuzhiyun {
1574*4882a593Smuzhiyun 	struct vm_area_struct pvma;
1575*4882a593Smuzhiyun 	struct page *page = NULL;
1576*4882a593Smuzhiyun 
1577*4882a593Smuzhiyun 	trace_android_vh_shmem_alloc_page(&page);
1578*4882a593Smuzhiyun 	if (page)
1579*4882a593Smuzhiyun 		return page;
1580*4882a593Smuzhiyun 
1581*4882a593Smuzhiyun 	shmem_pseudo_vma_init(&pvma, info, index);
1582*4882a593Smuzhiyun 	page = alloc_page_vma(gfp, &pvma, 0);
1583*4882a593Smuzhiyun 	shmem_pseudo_vma_destroy(&pvma);
1584*4882a593Smuzhiyun 
1585*4882a593Smuzhiyun 	return page;
1586*4882a593Smuzhiyun }
1587*4882a593Smuzhiyun 
shmem_alloc_and_acct_page(gfp_t gfp,struct inode * inode,pgoff_t index,bool huge)1588*4882a593Smuzhiyun static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
1589*4882a593Smuzhiyun 		struct inode *inode,
1590*4882a593Smuzhiyun 		pgoff_t index, bool huge)
1591*4882a593Smuzhiyun {
1592*4882a593Smuzhiyun 	struct shmem_inode_info *info = SHMEM_I(inode);
1593*4882a593Smuzhiyun 	struct page *page;
1594*4882a593Smuzhiyun 	int nr;
1595*4882a593Smuzhiyun 	int err = -ENOSPC;
1596*4882a593Smuzhiyun 
1597*4882a593Smuzhiyun 	if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
1598*4882a593Smuzhiyun 		huge = false;
1599*4882a593Smuzhiyun 	nr = huge ? HPAGE_PMD_NR : 1;
1600*4882a593Smuzhiyun 
1601*4882a593Smuzhiyun 	if (!shmem_inode_acct_block(inode, nr))
1602*4882a593Smuzhiyun 		goto failed;
1603*4882a593Smuzhiyun 
1604*4882a593Smuzhiyun 	if (huge)
1605*4882a593Smuzhiyun 		page = shmem_alloc_hugepage(gfp, info, index);
1606*4882a593Smuzhiyun 	else
1607*4882a593Smuzhiyun 		page = shmem_alloc_page(gfp, info, index);
1608*4882a593Smuzhiyun 	if (page) {
1609*4882a593Smuzhiyun 		__SetPageLocked(page);
1610*4882a593Smuzhiyun 		__SetPageSwapBacked(page);
1611*4882a593Smuzhiyun 		return page;
1612*4882a593Smuzhiyun 	}
1613*4882a593Smuzhiyun 
1614*4882a593Smuzhiyun 	err = -ENOMEM;
1615*4882a593Smuzhiyun 	shmem_inode_unacct_blocks(inode, nr);
1616*4882a593Smuzhiyun failed:
1617*4882a593Smuzhiyun 	return ERR_PTR(err);
1618*4882a593Smuzhiyun }
1619*4882a593Smuzhiyun 
1620*4882a593Smuzhiyun /*
1621*4882a593Smuzhiyun  * When a page is moved from swapcache to shmem filecache (either by the
1622*4882a593Smuzhiyun  * usual swapin of shmem_getpage_gfp(), or by the less common swapoff of
1623*4882a593Smuzhiyun  * shmem_unuse_inode()), it may have been read in earlier from swap, in
1624*4882a593Smuzhiyun  * ignorance of the mapping it belongs to.  If that mapping has special
1625*4882a593Smuzhiyun  * constraints (like the gma500 GEM driver, which requires RAM below 4GB),
1626*4882a593Smuzhiyun  * we may need to copy to a suitable page before moving to filecache.
1627*4882a593Smuzhiyun  *
1628*4882a593Smuzhiyun  * In a future release, this may well be extended to respect cpuset and
1629*4882a593Smuzhiyun  * NUMA mempolicy, and applied also to anonymous pages in do_swap_page();
1630*4882a593Smuzhiyun  * but for now it is a simple matter of zone.
1631*4882a593Smuzhiyun  */
shmem_should_replace_page(struct page * page,gfp_t gfp)1632*4882a593Smuzhiyun static bool shmem_should_replace_page(struct page *page, gfp_t gfp)
1633*4882a593Smuzhiyun {
1634*4882a593Smuzhiyun 	return page_zonenum(page) > gfp_zone(gfp);
1635*4882a593Smuzhiyun }
1636*4882a593Smuzhiyun 
shmem_replace_page(struct page ** pagep,gfp_t gfp,struct shmem_inode_info * info,pgoff_t index)1637*4882a593Smuzhiyun static int shmem_replace_page(struct page **pagep, gfp_t gfp,
1638*4882a593Smuzhiyun 				struct shmem_inode_info *info, pgoff_t index)
1639*4882a593Smuzhiyun {
1640*4882a593Smuzhiyun 	struct page *oldpage, *newpage;
1641*4882a593Smuzhiyun 	struct address_space *swap_mapping;
1642*4882a593Smuzhiyun 	swp_entry_t entry;
1643*4882a593Smuzhiyun 	pgoff_t swap_index;
1644*4882a593Smuzhiyun 	int error;
1645*4882a593Smuzhiyun 
1646*4882a593Smuzhiyun 	oldpage = *pagep;
1647*4882a593Smuzhiyun 	entry.val = page_private(oldpage);
1648*4882a593Smuzhiyun 	swap_index = swp_offset(entry);
1649*4882a593Smuzhiyun 	swap_mapping = page_mapping(oldpage);
1650*4882a593Smuzhiyun 
1651*4882a593Smuzhiyun 	/*
1652*4882a593Smuzhiyun 	 * We have arrived here because our zones are constrained, so don't
1653*4882a593Smuzhiyun 	 * limit chance of success by further cpuset and node constraints.
1654*4882a593Smuzhiyun 	 */
1655*4882a593Smuzhiyun 	gfp &= ~GFP_CONSTRAINT_MASK;
1656*4882a593Smuzhiyun 	newpage = shmem_alloc_page(gfp, info, index);
1657*4882a593Smuzhiyun 	if (!newpage)
1658*4882a593Smuzhiyun 		return -ENOMEM;
1659*4882a593Smuzhiyun 
1660*4882a593Smuzhiyun 	get_page(newpage);
1661*4882a593Smuzhiyun 	copy_highpage(newpage, oldpage);
1662*4882a593Smuzhiyun 	flush_dcache_page(newpage);
1663*4882a593Smuzhiyun 
1664*4882a593Smuzhiyun 	__SetPageLocked(newpage);
1665*4882a593Smuzhiyun 	__SetPageSwapBacked(newpage);
1666*4882a593Smuzhiyun 	SetPageUptodate(newpage);
1667*4882a593Smuzhiyun 	set_page_private(newpage, entry.val);
1668*4882a593Smuzhiyun 	SetPageSwapCache(newpage);
1669*4882a593Smuzhiyun 
1670*4882a593Smuzhiyun 	/*
1671*4882a593Smuzhiyun 	 * Our caller will very soon move newpage out of swapcache, but it's
1672*4882a593Smuzhiyun 	 * a nice clean interface for us to replace oldpage by newpage there.
1673*4882a593Smuzhiyun 	 */
1674*4882a593Smuzhiyun 	xa_lock_irq(&swap_mapping->i_pages);
1675*4882a593Smuzhiyun 	error = shmem_replace_entry(swap_mapping, swap_index, oldpage, newpage);
1676*4882a593Smuzhiyun 	if (!error) {
1677*4882a593Smuzhiyun 		mem_cgroup_migrate(oldpage, newpage);
1678*4882a593Smuzhiyun 		__inc_lruvec_page_state(newpage, NR_FILE_PAGES);
1679*4882a593Smuzhiyun 		__dec_lruvec_page_state(oldpage, NR_FILE_PAGES);
1680*4882a593Smuzhiyun 	}
1681*4882a593Smuzhiyun 	xa_unlock_irq(&swap_mapping->i_pages);
1682*4882a593Smuzhiyun 
1683*4882a593Smuzhiyun 	if (unlikely(error)) {
1684*4882a593Smuzhiyun 		/*
1685*4882a593Smuzhiyun 		 * Is this possible?  I think not, now that our callers check
1686*4882a593Smuzhiyun 		 * both PageSwapCache and page_private after getting page lock;
1687*4882a593Smuzhiyun 		 * but be defensive.  Reverse old to newpage for clear and free.
1688*4882a593Smuzhiyun 		 */
1689*4882a593Smuzhiyun 		oldpage = newpage;
1690*4882a593Smuzhiyun 	} else {
1691*4882a593Smuzhiyun 		lru_cache_add(newpage);
1692*4882a593Smuzhiyun 		*pagep = newpage;
1693*4882a593Smuzhiyun 	}
1694*4882a593Smuzhiyun 
1695*4882a593Smuzhiyun 	ClearPageSwapCache(oldpage);
1696*4882a593Smuzhiyun 	set_page_private(oldpage, 0);
1697*4882a593Smuzhiyun 
1698*4882a593Smuzhiyun 	unlock_page(oldpage);
1699*4882a593Smuzhiyun 	put_page(oldpage);
1700*4882a593Smuzhiyun 	put_page(oldpage);
1701*4882a593Smuzhiyun 	return error;
1702*4882a593Smuzhiyun }
1703*4882a593Smuzhiyun 
1704*4882a593Smuzhiyun /*
1705*4882a593Smuzhiyun  * Swap in the page pointed to by *pagep.
1706*4882a593Smuzhiyun  * Caller has to make sure that *pagep contains a valid swapped page.
1707*4882a593Smuzhiyun  * Returns 0 and the page in pagep if success. On failure, returns the
1708*4882a593Smuzhiyun  * error code and NULL in *pagep.
1709*4882a593Smuzhiyun  */
shmem_swapin_page(struct inode * inode,pgoff_t index,struct page ** pagep,enum sgp_type sgp,gfp_t gfp,struct vm_area_struct * vma,vm_fault_t * fault_type)1710*4882a593Smuzhiyun static int shmem_swapin_page(struct inode *inode, pgoff_t index,
1711*4882a593Smuzhiyun 			     struct page **pagep, enum sgp_type sgp,
1712*4882a593Smuzhiyun 			     gfp_t gfp, struct vm_area_struct *vma,
1713*4882a593Smuzhiyun 			     vm_fault_t *fault_type)
1714*4882a593Smuzhiyun {
1715*4882a593Smuzhiyun 	struct address_space *mapping = inode->i_mapping;
1716*4882a593Smuzhiyun 	struct shmem_inode_info *info = SHMEM_I(inode);
1717*4882a593Smuzhiyun 	struct mm_struct *charge_mm = vma ? vma->vm_mm : current->mm;
1718*4882a593Smuzhiyun 	struct page *page;
1719*4882a593Smuzhiyun 	swp_entry_t swap;
1720*4882a593Smuzhiyun 	int error;
1721*4882a593Smuzhiyun 
1722*4882a593Smuzhiyun 	VM_BUG_ON(!*pagep || !xa_is_value(*pagep));
1723*4882a593Smuzhiyun 	swap = radix_to_swp_entry(*pagep);
1724*4882a593Smuzhiyun 	*pagep = NULL;
1725*4882a593Smuzhiyun 
1726*4882a593Smuzhiyun 	/* Look it up and read it in.. */
1727*4882a593Smuzhiyun 	page = lookup_swap_cache(swap, NULL, 0);
1728*4882a593Smuzhiyun 	if (!page) {
1729*4882a593Smuzhiyun 		/* Or update major stats only when swapin succeeds?? */
1730*4882a593Smuzhiyun 		if (fault_type) {
1731*4882a593Smuzhiyun 			*fault_type |= VM_FAULT_MAJOR;
1732*4882a593Smuzhiyun 			count_vm_event(PGMAJFAULT);
1733*4882a593Smuzhiyun 			count_memcg_event_mm(charge_mm, PGMAJFAULT);
1734*4882a593Smuzhiyun 		}
1735*4882a593Smuzhiyun 		/* Here we actually start the io */
1736*4882a593Smuzhiyun 		page = shmem_swapin(swap, gfp, info, index);
1737*4882a593Smuzhiyun 		if (!page) {
1738*4882a593Smuzhiyun 			error = -ENOMEM;
1739*4882a593Smuzhiyun 			goto failed;
1740*4882a593Smuzhiyun 		}
1741*4882a593Smuzhiyun 	}
1742*4882a593Smuzhiyun 
1743*4882a593Smuzhiyun 	/* We have to do this with page locked to prevent races */
1744*4882a593Smuzhiyun 	lock_page(page);
1745*4882a593Smuzhiyun 	if (!PageSwapCache(page) || page_private(page) != swap.val ||
1746*4882a593Smuzhiyun 	    !shmem_confirm_swap(mapping, index, swap)) {
1747*4882a593Smuzhiyun 		error = -EEXIST;
1748*4882a593Smuzhiyun 		goto unlock;
1749*4882a593Smuzhiyun 	}
1750*4882a593Smuzhiyun 	if (!PageUptodate(page)) {
1751*4882a593Smuzhiyun 		error = -EIO;
1752*4882a593Smuzhiyun 		goto failed;
1753*4882a593Smuzhiyun 	}
1754*4882a593Smuzhiyun 	wait_on_page_writeback(page);
1755*4882a593Smuzhiyun 
1756*4882a593Smuzhiyun 	/*
1757*4882a593Smuzhiyun 	 * Some architectures may have to restore extra metadata to the
1758*4882a593Smuzhiyun 	 * physical page after reading from swap.
1759*4882a593Smuzhiyun 	 */
1760*4882a593Smuzhiyun 	arch_swap_restore(swap, page);
1761*4882a593Smuzhiyun 
1762*4882a593Smuzhiyun 	if (shmem_should_replace_page(page, gfp)) {
1763*4882a593Smuzhiyun 		error = shmem_replace_page(&page, gfp, info, index);
1764*4882a593Smuzhiyun 		if (error)
1765*4882a593Smuzhiyun 			goto failed;
1766*4882a593Smuzhiyun 	}
1767*4882a593Smuzhiyun 
1768*4882a593Smuzhiyun 	error = shmem_add_to_page_cache(page, mapping, index,
1769*4882a593Smuzhiyun 					swp_to_radix_entry(swap), gfp,
1770*4882a593Smuzhiyun 					charge_mm);
1771*4882a593Smuzhiyun 	if (error)
1772*4882a593Smuzhiyun 		goto failed;
1773*4882a593Smuzhiyun 
1774*4882a593Smuzhiyun 	spin_lock_irq(&info->lock);
1775*4882a593Smuzhiyun 	info->swapped--;
1776*4882a593Smuzhiyun 	shmem_recalc_inode(inode);
1777*4882a593Smuzhiyun 	spin_unlock_irq(&info->lock);
1778*4882a593Smuzhiyun 
1779*4882a593Smuzhiyun 	if (sgp == SGP_WRITE)
1780*4882a593Smuzhiyun 		mark_page_accessed(page);
1781*4882a593Smuzhiyun 
1782*4882a593Smuzhiyun 	delete_from_swap_cache(page);
1783*4882a593Smuzhiyun 	set_page_dirty(page);
1784*4882a593Smuzhiyun 	swap_free(swap);
1785*4882a593Smuzhiyun 
1786*4882a593Smuzhiyun 	*pagep = page;
1787*4882a593Smuzhiyun 	return 0;
1788*4882a593Smuzhiyun failed:
1789*4882a593Smuzhiyun 	if (!shmem_confirm_swap(mapping, index, swap))
1790*4882a593Smuzhiyun 		error = -EEXIST;
1791*4882a593Smuzhiyun unlock:
1792*4882a593Smuzhiyun 	if (page) {
1793*4882a593Smuzhiyun 		unlock_page(page);
1794*4882a593Smuzhiyun 		put_page(page);
1795*4882a593Smuzhiyun 	}
1796*4882a593Smuzhiyun 
1797*4882a593Smuzhiyun 	return error;
1798*4882a593Smuzhiyun }
1799*4882a593Smuzhiyun 
1800*4882a593Smuzhiyun /*
1801*4882a593Smuzhiyun  * shmem_getpage_gfp - find page in cache, or get from swap, or allocate
1802*4882a593Smuzhiyun  *
1803*4882a593Smuzhiyun  * If we allocate a new one we do not mark it dirty. That's up to the
1804*4882a593Smuzhiyun  * vm. If we swap it in we mark it dirty since we also free the swap
1805*4882a593Smuzhiyun  * entry since a page cannot live in both the swap and page cache.
1806*4882a593Smuzhiyun  *
1807*4882a593Smuzhiyun  * vma, vmf, and fault_type are only supplied by shmem_fault:
1808*4882a593Smuzhiyun  * otherwise they are NULL.
1809*4882a593Smuzhiyun  */
shmem_getpage_gfp(struct inode * inode,pgoff_t index,struct page ** pagep,enum sgp_type sgp,gfp_t gfp,struct vm_area_struct * vma,struct vm_fault * vmf,vm_fault_t * fault_type)1810*4882a593Smuzhiyun static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
1811*4882a593Smuzhiyun 	struct page **pagep, enum sgp_type sgp, gfp_t gfp,
1812*4882a593Smuzhiyun 	struct vm_area_struct *vma, struct vm_fault *vmf,
1813*4882a593Smuzhiyun 			vm_fault_t *fault_type)
1814*4882a593Smuzhiyun {
1815*4882a593Smuzhiyun 	struct address_space *mapping = inode->i_mapping;
1816*4882a593Smuzhiyun 	struct shmem_inode_info *info = SHMEM_I(inode);
1817*4882a593Smuzhiyun 	struct shmem_sb_info *sbinfo;
1818*4882a593Smuzhiyun 	struct mm_struct *charge_mm;
1819*4882a593Smuzhiyun 	struct page *page;
1820*4882a593Smuzhiyun 	enum sgp_type sgp_huge = sgp;
1821*4882a593Smuzhiyun 	pgoff_t hindex = index;
1822*4882a593Smuzhiyun 	int error;
1823*4882a593Smuzhiyun 	int once = 0;
1824*4882a593Smuzhiyun 	int alloced = 0;
1825*4882a593Smuzhiyun 
1826*4882a593Smuzhiyun 	if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
1827*4882a593Smuzhiyun 		return -EFBIG;
1828*4882a593Smuzhiyun 	if (sgp == SGP_NOHUGE || sgp == SGP_HUGE)
1829*4882a593Smuzhiyun 		sgp = SGP_CACHE;
1830*4882a593Smuzhiyun repeat:
1831*4882a593Smuzhiyun 	if (sgp <= SGP_CACHE &&
1832*4882a593Smuzhiyun 	    ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
1833*4882a593Smuzhiyun 		return -EINVAL;
1834*4882a593Smuzhiyun 	}
1835*4882a593Smuzhiyun 
1836*4882a593Smuzhiyun 	sbinfo = SHMEM_SB(inode->i_sb);
1837*4882a593Smuzhiyun 	charge_mm = vma ? vma->vm_mm : current->mm;
1838*4882a593Smuzhiyun 
1839*4882a593Smuzhiyun 	page = find_lock_entry(mapping, index);
1840*4882a593Smuzhiyun 
1841*4882a593Smuzhiyun 	if (page && vma && userfaultfd_minor(vma)) {
1842*4882a593Smuzhiyun 		if (!xa_is_value(page)) {
1843*4882a593Smuzhiyun 			unlock_page(page);
1844*4882a593Smuzhiyun 			put_page(page);
1845*4882a593Smuzhiyun 		}
1846*4882a593Smuzhiyun 		*fault_type = handle_userfault(vmf, VM_UFFD_MINOR);
1847*4882a593Smuzhiyun 		return 0;
1848*4882a593Smuzhiyun 	}
1849*4882a593Smuzhiyun 
1850*4882a593Smuzhiyun 	if (xa_is_value(page)) {
1851*4882a593Smuzhiyun 		error = shmem_swapin_page(inode, index, &page,
1852*4882a593Smuzhiyun 					  sgp, gfp, vma, fault_type);
1853*4882a593Smuzhiyun 		if (error == -EEXIST)
1854*4882a593Smuzhiyun 			goto repeat;
1855*4882a593Smuzhiyun 
1856*4882a593Smuzhiyun 		*pagep = page;
1857*4882a593Smuzhiyun 		return error;
1858*4882a593Smuzhiyun 	}
1859*4882a593Smuzhiyun 
1860*4882a593Smuzhiyun 	if (page)
1861*4882a593Smuzhiyun 		hindex = page->index;
1862*4882a593Smuzhiyun 	if (page && sgp == SGP_WRITE)
1863*4882a593Smuzhiyun 		mark_page_accessed(page);
1864*4882a593Smuzhiyun 
1865*4882a593Smuzhiyun 	/* fallocated page? */
1866*4882a593Smuzhiyun 	if (page && !PageUptodate(page)) {
1867*4882a593Smuzhiyun 		if (sgp != SGP_READ)
1868*4882a593Smuzhiyun 			goto clear;
1869*4882a593Smuzhiyun 		unlock_page(page);
1870*4882a593Smuzhiyun 		put_page(page);
1871*4882a593Smuzhiyun 		page = NULL;
1872*4882a593Smuzhiyun 		hindex = index;
1873*4882a593Smuzhiyun 	}
1874*4882a593Smuzhiyun 	if (page || sgp == SGP_READ)
1875*4882a593Smuzhiyun 		goto out;
1876*4882a593Smuzhiyun 
1877*4882a593Smuzhiyun 	/*
1878*4882a593Smuzhiyun 	 * Fast cache lookup did not find it:
1879*4882a593Smuzhiyun 	 * bring it back from swap or allocate.
1880*4882a593Smuzhiyun 	 */
1881*4882a593Smuzhiyun 
1882*4882a593Smuzhiyun 	if (vma && userfaultfd_missing(vma)) {
1883*4882a593Smuzhiyun 		*fault_type = handle_userfault(vmf, VM_UFFD_MISSING);
1884*4882a593Smuzhiyun 		return 0;
1885*4882a593Smuzhiyun 	}
1886*4882a593Smuzhiyun 
1887*4882a593Smuzhiyun 	/* shmem_symlink() */
1888*4882a593Smuzhiyun 	if (mapping->a_ops != &shmem_aops)
1889*4882a593Smuzhiyun 		goto alloc_nohuge;
1890*4882a593Smuzhiyun 	if (shmem_huge == SHMEM_HUGE_DENY || sgp_huge == SGP_NOHUGE)
1891*4882a593Smuzhiyun 		goto alloc_nohuge;
1892*4882a593Smuzhiyun 	if (shmem_huge == SHMEM_HUGE_FORCE)
1893*4882a593Smuzhiyun 		goto alloc_huge;
1894*4882a593Smuzhiyun 	switch (sbinfo->huge) {
1895*4882a593Smuzhiyun 	case SHMEM_HUGE_NEVER:
1896*4882a593Smuzhiyun 		goto alloc_nohuge;
1897*4882a593Smuzhiyun 	case SHMEM_HUGE_WITHIN_SIZE: {
1898*4882a593Smuzhiyun 		loff_t i_size;
1899*4882a593Smuzhiyun 		pgoff_t off;
1900*4882a593Smuzhiyun 
1901*4882a593Smuzhiyun 		off = round_up(index, HPAGE_PMD_NR);
1902*4882a593Smuzhiyun 		i_size = round_up(i_size_read(inode), PAGE_SIZE);
1903*4882a593Smuzhiyun 		if (i_size >= HPAGE_PMD_SIZE &&
1904*4882a593Smuzhiyun 		    i_size >> PAGE_SHIFT >= off)
1905*4882a593Smuzhiyun 			goto alloc_huge;
1906*4882a593Smuzhiyun 
1907*4882a593Smuzhiyun 		fallthrough;
1908*4882a593Smuzhiyun 	}
1909*4882a593Smuzhiyun 	case SHMEM_HUGE_ADVISE:
1910*4882a593Smuzhiyun 		if (sgp_huge == SGP_HUGE)
1911*4882a593Smuzhiyun 			goto alloc_huge;
1912*4882a593Smuzhiyun 		/* TODO: implement fadvise() hints */
1913*4882a593Smuzhiyun 		goto alloc_nohuge;
1914*4882a593Smuzhiyun 	}
1915*4882a593Smuzhiyun 
1916*4882a593Smuzhiyun alloc_huge:
1917*4882a593Smuzhiyun 	page = shmem_alloc_and_acct_page(gfp, inode, index, true);
1918*4882a593Smuzhiyun 	if (IS_ERR(page)) {
1919*4882a593Smuzhiyun alloc_nohuge:
1920*4882a593Smuzhiyun 		page = shmem_alloc_and_acct_page(gfp, inode,
1921*4882a593Smuzhiyun 						 index, false);
1922*4882a593Smuzhiyun 	}
1923*4882a593Smuzhiyun 	if (IS_ERR(page)) {
1924*4882a593Smuzhiyun 		int retry = 5;
1925*4882a593Smuzhiyun 
1926*4882a593Smuzhiyun 		error = PTR_ERR(page);
1927*4882a593Smuzhiyun 		page = NULL;
1928*4882a593Smuzhiyun 		if (error != -ENOSPC)
1929*4882a593Smuzhiyun 			goto unlock;
1930*4882a593Smuzhiyun 		/*
1931*4882a593Smuzhiyun 		 * Try to reclaim some space by splitting a huge page
1932*4882a593Smuzhiyun 		 * beyond i_size on the filesystem.
1933*4882a593Smuzhiyun 		 */
1934*4882a593Smuzhiyun 		while (retry--) {
1935*4882a593Smuzhiyun 			int ret;
1936*4882a593Smuzhiyun 
1937*4882a593Smuzhiyun 			ret = shmem_unused_huge_shrink(sbinfo, NULL, 1);
1938*4882a593Smuzhiyun 			if (ret == SHRINK_STOP)
1939*4882a593Smuzhiyun 				break;
1940*4882a593Smuzhiyun 			if (ret)
1941*4882a593Smuzhiyun 				goto alloc_nohuge;
1942*4882a593Smuzhiyun 		}
1943*4882a593Smuzhiyun 		goto unlock;
1944*4882a593Smuzhiyun 	}
1945*4882a593Smuzhiyun 
1946*4882a593Smuzhiyun 	if (PageTransHuge(page))
1947*4882a593Smuzhiyun 		hindex = round_down(index, HPAGE_PMD_NR);
1948*4882a593Smuzhiyun 	else
1949*4882a593Smuzhiyun 		hindex = index;
1950*4882a593Smuzhiyun 
1951*4882a593Smuzhiyun 	if (sgp == SGP_WRITE)
1952*4882a593Smuzhiyun 		__SetPageReferenced(page);
1953*4882a593Smuzhiyun 
1954*4882a593Smuzhiyun 	error = shmem_add_to_page_cache(page, mapping, hindex,
1955*4882a593Smuzhiyun 					NULL, gfp & GFP_RECLAIM_MASK,
1956*4882a593Smuzhiyun 					charge_mm);
1957*4882a593Smuzhiyun 	if (error)
1958*4882a593Smuzhiyun 		goto unacct;
1959*4882a593Smuzhiyun 	lru_cache_add(page);
1960*4882a593Smuzhiyun 
1961*4882a593Smuzhiyun 	spin_lock_irq(&info->lock);
1962*4882a593Smuzhiyun 	info->alloced += compound_nr(page);
1963*4882a593Smuzhiyun 	inode->i_blocks += BLOCKS_PER_PAGE << compound_order(page);
1964*4882a593Smuzhiyun 	shmem_recalc_inode(inode);
1965*4882a593Smuzhiyun 	spin_unlock_irq(&info->lock);
1966*4882a593Smuzhiyun 	alloced = true;
1967*4882a593Smuzhiyun 
1968*4882a593Smuzhiyun 	if (PageTransHuge(page) &&
1969*4882a593Smuzhiyun 	    DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) <
1970*4882a593Smuzhiyun 			hindex + HPAGE_PMD_NR - 1) {
1971*4882a593Smuzhiyun 		/*
1972*4882a593Smuzhiyun 		 * Part of the huge page is beyond i_size: subject
1973*4882a593Smuzhiyun 		 * to shrink under memory pressure.
1974*4882a593Smuzhiyun 		 */
1975*4882a593Smuzhiyun 		spin_lock(&sbinfo->shrinklist_lock);
1976*4882a593Smuzhiyun 		/*
1977*4882a593Smuzhiyun 		 * _careful to defend against unlocked access to
1978*4882a593Smuzhiyun 		 * ->shrink_list in shmem_unused_huge_shrink()
1979*4882a593Smuzhiyun 		 */
1980*4882a593Smuzhiyun 		if (list_empty_careful(&info->shrinklist)) {
1981*4882a593Smuzhiyun 			list_add_tail(&info->shrinklist,
1982*4882a593Smuzhiyun 				      &sbinfo->shrinklist);
1983*4882a593Smuzhiyun 			sbinfo->shrinklist_len++;
1984*4882a593Smuzhiyun 		}
1985*4882a593Smuzhiyun 		spin_unlock(&sbinfo->shrinklist_lock);
1986*4882a593Smuzhiyun 	}
1987*4882a593Smuzhiyun 
1988*4882a593Smuzhiyun 	/*
1989*4882a593Smuzhiyun 	 * Let SGP_FALLOC use the SGP_WRITE optimization on a new page.
1990*4882a593Smuzhiyun 	 */
1991*4882a593Smuzhiyun 	if (sgp == SGP_FALLOC)
1992*4882a593Smuzhiyun 		sgp = SGP_WRITE;
1993*4882a593Smuzhiyun clear:
1994*4882a593Smuzhiyun 	/*
1995*4882a593Smuzhiyun 	 * Let SGP_WRITE caller clear ends if write does not fill page;
1996*4882a593Smuzhiyun 	 * but SGP_FALLOC on a page fallocated earlier must initialize
1997*4882a593Smuzhiyun 	 * it now, lest undo on failure cancel our earlier guarantee.
1998*4882a593Smuzhiyun 	 */
1999*4882a593Smuzhiyun 	if (sgp != SGP_WRITE && !PageUptodate(page)) {
2000*4882a593Smuzhiyun 		int i;
2001*4882a593Smuzhiyun 
2002*4882a593Smuzhiyun 		for (i = 0; i < compound_nr(page); i++) {
2003*4882a593Smuzhiyun 			clear_highpage(page + i);
2004*4882a593Smuzhiyun 			flush_dcache_page(page + i);
2005*4882a593Smuzhiyun 		}
2006*4882a593Smuzhiyun 		SetPageUptodate(page);
2007*4882a593Smuzhiyun 	}
2008*4882a593Smuzhiyun 
2009*4882a593Smuzhiyun 	/* Perhaps the file has been truncated since we checked */
2010*4882a593Smuzhiyun 	if (sgp <= SGP_CACHE &&
2011*4882a593Smuzhiyun 	    ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
2012*4882a593Smuzhiyun 		if (alloced) {
2013*4882a593Smuzhiyun 			ClearPageDirty(page);
2014*4882a593Smuzhiyun 			delete_from_page_cache(page);
2015*4882a593Smuzhiyun 			spin_lock_irq(&info->lock);
2016*4882a593Smuzhiyun 			shmem_recalc_inode(inode);
2017*4882a593Smuzhiyun 			spin_unlock_irq(&info->lock);
2018*4882a593Smuzhiyun 		}
2019*4882a593Smuzhiyun 		error = -EINVAL;
2020*4882a593Smuzhiyun 		goto unlock;
2021*4882a593Smuzhiyun 	}
2022*4882a593Smuzhiyun out:
2023*4882a593Smuzhiyun 	*pagep = page + index - hindex;
2024*4882a593Smuzhiyun 	return 0;
2025*4882a593Smuzhiyun 
2026*4882a593Smuzhiyun 	/*
2027*4882a593Smuzhiyun 	 * Error recovery.
2028*4882a593Smuzhiyun 	 */
2029*4882a593Smuzhiyun unacct:
2030*4882a593Smuzhiyun 	shmem_inode_unacct_blocks(inode, compound_nr(page));
2031*4882a593Smuzhiyun 
2032*4882a593Smuzhiyun 	if (PageTransHuge(page)) {
2033*4882a593Smuzhiyun 		unlock_page(page);
2034*4882a593Smuzhiyun 		put_page(page);
2035*4882a593Smuzhiyun 		goto alloc_nohuge;
2036*4882a593Smuzhiyun 	}
2037*4882a593Smuzhiyun unlock:
2038*4882a593Smuzhiyun 	if (page) {
2039*4882a593Smuzhiyun 		unlock_page(page);
2040*4882a593Smuzhiyun 		put_page(page);
2041*4882a593Smuzhiyun 	}
2042*4882a593Smuzhiyun 	if (error == -ENOSPC && !once++) {
2043*4882a593Smuzhiyun 		spin_lock_irq(&info->lock);
2044*4882a593Smuzhiyun 		shmem_recalc_inode(inode);
2045*4882a593Smuzhiyun 		spin_unlock_irq(&info->lock);
2046*4882a593Smuzhiyun 		goto repeat;
2047*4882a593Smuzhiyun 	}
2048*4882a593Smuzhiyun 	if (error == -EEXIST)
2049*4882a593Smuzhiyun 		goto repeat;
2050*4882a593Smuzhiyun 	return error;
2051*4882a593Smuzhiyun }
2052*4882a593Smuzhiyun 
2053*4882a593Smuzhiyun /*
2054*4882a593Smuzhiyun  * This is like autoremove_wake_function, but it removes the wait queue
2055*4882a593Smuzhiyun  * entry unconditionally - even if something else had already woken the
2056*4882a593Smuzhiyun  * target.
2057*4882a593Smuzhiyun  */
synchronous_wake_function(wait_queue_entry_t * wait,unsigned mode,int sync,void * key)2058*4882a593Smuzhiyun static int synchronous_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
2059*4882a593Smuzhiyun {
2060*4882a593Smuzhiyun 	int ret = default_wake_function(wait, mode, sync, key);
2061*4882a593Smuzhiyun 	list_del_init(&wait->entry);
2062*4882a593Smuzhiyun 	return ret;
2063*4882a593Smuzhiyun }
2064*4882a593Smuzhiyun 
shmem_fault(struct vm_fault * vmf)2065*4882a593Smuzhiyun static vm_fault_t shmem_fault(struct vm_fault *vmf)
2066*4882a593Smuzhiyun {
2067*4882a593Smuzhiyun 	struct vm_area_struct *vma = vmf->vma;
2068*4882a593Smuzhiyun 	struct inode *inode = file_inode(vma->vm_file);
2069*4882a593Smuzhiyun 	gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
2070*4882a593Smuzhiyun 	enum sgp_type sgp;
2071*4882a593Smuzhiyun 	int err;
2072*4882a593Smuzhiyun 	vm_fault_t ret = VM_FAULT_LOCKED;
2073*4882a593Smuzhiyun 
2074*4882a593Smuzhiyun 	/*
2075*4882a593Smuzhiyun 	 * Trinity finds that probing a hole which tmpfs is punching can
2076*4882a593Smuzhiyun 	 * prevent the hole-punch from ever completing: which in turn
2077*4882a593Smuzhiyun 	 * locks writers out with its hold on i_mutex.  So refrain from
2078*4882a593Smuzhiyun 	 * faulting pages into the hole while it's being punched.  Although
2079*4882a593Smuzhiyun 	 * shmem_undo_range() does remove the additions, it may be unable to
2080*4882a593Smuzhiyun 	 * keep up, as each new page needs its own unmap_mapping_range() call,
2081*4882a593Smuzhiyun 	 * and the i_mmap tree grows ever slower to scan if new vmas are added.
2082*4882a593Smuzhiyun 	 *
2083*4882a593Smuzhiyun 	 * It does not matter if we sometimes reach this check just before the
2084*4882a593Smuzhiyun 	 * hole-punch begins, so that one fault then races with the punch:
2085*4882a593Smuzhiyun 	 * we just need to make racing faults a rare case.
2086*4882a593Smuzhiyun 	 *
2087*4882a593Smuzhiyun 	 * The implementation below would be much simpler if we just used a
2088*4882a593Smuzhiyun 	 * standard mutex or completion: but we cannot take i_mutex in fault,
2089*4882a593Smuzhiyun 	 * and bloating every shmem inode for this unlikely case would be sad.
2090*4882a593Smuzhiyun 	 */
2091*4882a593Smuzhiyun 	if (unlikely(inode->i_private)) {
2092*4882a593Smuzhiyun 		struct shmem_falloc *shmem_falloc;
2093*4882a593Smuzhiyun 
2094*4882a593Smuzhiyun 		spin_lock(&inode->i_lock);
2095*4882a593Smuzhiyun 		shmem_falloc = inode->i_private;
2096*4882a593Smuzhiyun 		if (shmem_falloc &&
2097*4882a593Smuzhiyun 		    shmem_falloc->waitq &&
2098*4882a593Smuzhiyun 		    vmf->pgoff >= shmem_falloc->start &&
2099*4882a593Smuzhiyun 		    vmf->pgoff < shmem_falloc->next) {
2100*4882a593Smuzhiyun 			struct file *fpin;
2101*4882a593Smuzhiyun 			wait_queue_head_t *shmem_falloc_waitq;
2102*4882a593Smuzhiyun 			DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function);
2103*4882a593Smuzhiyun 
2104*4882a593Smuzhiyun 			ret = VM_FAULT_NOPAGE;
2105*4882a593Smuzhiyun 			fpin = maybe_unlock_mmap_for_io(vmf, NULL);
2106*4882a593Smuzhiyun 			if (fpin)
2107*4882a593Smuzhiyun 				ret = VM_FAULT_RETRY;
2108*4882a593Smuzhiyun 
2109*4882a593Smuzhiyun 			shmem_falloc_waitq = shmem_falloc->waitq;
2110*4882a593Smuzhiyun 			prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
2111*4882a593Smuzhiyun 					TASK_UNINTERRUPTIBLE);
2112*4882a593Smuzhiyun 			spin_unlock(&inode->i_lock);
2113*4882a593Smuzhiyun 			schedule();
2114*4882a593Smuzhiyun 
2115*4882a593Smuzhiyun 			/*
2116*4882a593Smuzhiyun 			 * shmem_falloc_waitq points into the shmem_fallocate()
2117*4882a593Smuzhiyun 			 * stack of the hole-punching task: shmem_falloc_waitq
2118*4882a593Smuzhiyun 			 * is usually invalid by the time we reach here, but
2119*4882a593Smuzhiyun 			 * finish_wait() does not dereference it in that case;
2120*4882a593Smuzhiyun 			 * though i_lock needed lest racing with wake_up_all().
2121*4882a593Smuzhiyun 			 */
2122*4882a593Smuzhiyun 			spin_lock(&inode->i_lock);
2123*4882a593Smuzhiyun 			finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
2124*4882a593Smuzhiyun 			spin_unlock(&inode->i_lock);
2125*4882a593Smuzhiyun 
2126*4882a593Smuzhiyun 			if (fpin)
2127*4882a593Smuzhiyun 				fput(fpin);
2128*4882a593Smuzhiyun 			return ret;
2129*4882a593Smuzhiyun 		}
2130*4882a593Smuzhiyun 		spin_unlock(&inode->i_lock);
2131*4882a593Smuzhiyun 	}
2132*4882a593Smuzhiyun 
2133*4882a593Smuzhiyun 	sgp = SGP_CACHE;
2134*4882a593Smuzhiyun 
2135*4882a593Smuzhiyun 	if ((vma->vm_flags & VM_NOHUGEPAGE) ||
2136*4882a593Smuzhiyun 	    test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
2137*4882a593Smuzhiyun 		sgp = SGP_NOHUGE;
2138*4882a593Smuzhiyun 	else if (vma->vm_flags & VM_HUGEPAGE)
2139*4882a593Smuzhiyun 		sgp = SGP_HUGE;
2140*4882a593Smuzhiyun 
2141*4882a593Smuzhiyun 	err = shmem_getpage_gfp(inode, vmf->pgoff, &vmf->page, sgp,
2142*4882a593Smuzhiyun 				  gfp, vma, vmf, &ret);
2143*4882a593Smuzhiyun 	if (err)
2144*4882a593Smuzhiyun 		return vmf_error(err);
2145*4882a593Smuzhiyun 	return ret;
2146*4882a593Smuzhiyun }
2147*4882a593Smuzhiyun 
shmem_get_unmapped_area(struct file * file,unsigned long uaddr,unsigned long len,unsigned long pgoff,unsigned long flags)2148*4882a593Smuzhiyun unsigned long shmem_get_unmapped_area(struct file *file,
2149*4882a593Smuzhiyun 				      unsigned long uaddr, unsigned long len,
2150*4882a593Smuzhiyun 				      unsigned long pgoff, unsigned long flags)
2151*4882a593Smuzhiyun {
2152*4882a593Smuzhiyun 	unsigned long (*get_area)(struct file *,
2153*4882a593Smuzhiyun 		unsigned long, unsigned long, unsigned long, unsigned long);
2154*4882a593Smuzhiyun 	unsigned long addr;
2155*4882a593Smuzhiyun 	unsigned long offset;
2156*4882a593Smuzhiyun 	unsigned long inflated_len;
2157*4882a593Smuzhiyun 	unsigned long inflated_addr;
2158*4882a593Smuzhiyun 	unsigned long inflated_offset;
2159*4882a593Smuzhiyun 
2160*4882a593Smuzhiyun 	if (len > TASK_SIZE)
2161*4882a593Smuzhiyun 		return -ENOMEM;
2162*4882a593Smuzhiyun 
2163*4882a593Smuzhiyun 	get_area = current->mm->get_unmapped_area;
2164*4882a593Smuzhiyun 	addr = get_area(file, uaddr, len, pgoff, flags);
2165*4882a593Smuzhiyun 
2166*4882a593Smuzhiyun 	if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
2167*4882a593Smuzhiyun 		return addr;
2168*4882a593Smuzhiyun 	if (IS_ERR_VALUE(addr))
2169*4882a593Smuzhiyun 		return addr;
2170*4882a593Smuzhiyun 	if (addr & ~PAGE_MASK)
2171*4882a593Smuzhiyun 		return addr;
2172*4882a593Smuzhiyun 	if (addr > TASK_SIZE - len)
2173*4882a593Smuzhiyun 		return addr;
2174*4882a593Smuzhiyun 
2175*4882a593Smuzhiyun 	if (shmem_huge == SHMEM_HUGE_DENY)
2176*4882a593Smuzhiyun 		return addr;
2177*4882a593Smuzhiyun 	if (len < HPAGE_PMD_SIZE)
2178*4882a593Smuzhiyun 		return addr;
2179*4882a593Smuzhiyun 	if (flags & MAP_FIXED)
2180*4882a593Smuzhiyun 		return addr;
2181*4882a593Smuzhiyun 	/*
2182*4882a593Smuzhiyun 	 * Our priority is to support MAP_SHARED mapped hugely;
2183*4882a593Smuzhiyun 	 * and support MAP_PRIVATE mapped hugely too, until it is COWed.
2184*4882a593Smuzhiyun 	 * But if caller specified an address hint and we allocated area there
2185*4882a593Smuzhiyun 	 * successfully, respect that as before.
2186*4882a593Smuzhiyun 	 */
2187*4882a593Smuzhiyun 	if (uaddr == addr)
2188*4882a593Smuzhiyun 		return addr;
2189*4882a593Smuzhiyun 
2190*4882a593Smuzhiyun 	if (shmem_huge != SHMEM_HUGE_FORCE) {
2191*4882a593Smuzhiyun 		struct super_block *sb;
2192*4882a593Smuzhiyun 
2193*4882a593Smuzhiyun 		if (file) {
2194*4882a593Smuzhiyun 			VM_BUG_ON(file->f_op != &shmem_file_operations);
2195*4882a593Smuzhiyun 			sb = file_inode(file)->i_sb;
2196*4882a593Smuzhiyun 		} else {
2197*4882a593Smuzhiyun 			/*
2198*4882a593Smuzhiyun 			 * Called directly from mm/mmap.c, or drivers/char/mem.c
2199*4882a593Smuzhiyun 			 * for "/dev/zero", to create a shared anonymous object.
2200*4882a593Smuzhiyun 			 */
2201*4882a593Smuzhiyun 			if (IS_ERR(shm_mnt))
2202*4882a593Smuzhiyun 				return addr;
2203*4882a593Smuzhiyun 			sb = shm_mnt->mnt_sb;
2204*4882a593Smuzhiyun 		}
2205*4882a593Smuzhiyun 		if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER)
2206*4882a593Smuzhiyun 			return addr;
2207*4882a593Smuzhiyun 	}
2208*4882a593Smuzhiyun 
2209*4882a593Smuzhiyun 	offset = (pgoff << PAGE_SHIFT) & (HPAGE_PMD_SIZE-1);
2210*4882a593Smuzhiyun 	if (offset && offset + len < 2 * HPAGE_PMD_SIZE)
2211*4882a593Smuzhiyun 		return addr;
2212*4882a593Smuzhiyun 	if ((addr & (HPAGE_PMD_SIZE-1)) == offset)
2213*4882a593Smuzhiyun 		return addr;
2214*4882a593Smuzhiyun 
2215*4882a593Smuzhiyun 	inflated_len = len + HPAGE_PMD_SIZE - PAGE_SIZE;
2216*4882a593Smuzhiyun 	if (inflated_len > TASK_SIZE)
2217*4882a593Smuzhiyun 		return addr;
2218*4882a593Smuzhiyun 	if (inflated_len < len)
2219*4882a593Smuzhiyun 		return addr;
2220*4882a593Smuzhiyun 
2221*4882a593Smuzhiyun 	inflated_addr = get_area(NULL, uaddr, inflated_len, 0, flags);
2222*4882a593Smuzhiyun 	if (IS_ERR_VALUE(inflated_addr))
2223*4882a593Smuzhiyun 		return addr;
2224*4882a593Smuzhiyun 	if (inflated_addr & ~PAGE_MASK)
2225*4882a593Smuzhiyun 		return addr;
2226*4882a593Smuzhiyun 
2227*4882a593Smuzhiyun 	inflated_offset = inflated_addr & (HPAGE_PMD_SIZE-1);
2228*4882a593Smuzhiyun 	inflated_addr += offset - inflated_offset;
2229*4882a593Smuzhiyun 	if (inflated_offset > offset)
2230*4882a593Smuzhiyun 		inflated_addr += HPAGE_PMD_SIZE;
2231*4882a593Smuzhiyun 
2232*4882a593Smuzhiyun 	if (inflated_addr > TASK_SIZE - len)
2233*4882a593Smuzhiyun 		return addr;
2234*4882a593Smuzhiyun 	return inflated_addr;
2235*4882a593Smuzhiyun }
2236*4882a593Smuzhiyun 
2237*4882a593Smuzhiyun #ifdef CONFIG_NUMA
shmem_set_policy(struct vm_area_struct * vma,struct mempolicy * mpol)2238*4882a593Smuzhiyun static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
2239*4882a593Smuzhiyun {
2240*4882a593Smuzhiyun 	struct inode *inode = file_inode(vma->vm_file);
2241*4882a593Smuzhiyun 	return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol);
2242*4882a593Smuzhiyun }
2243*4882a593Smuzhiyun 
shmem_get_policy(struct vm_area_struct * vma,unsigned long addr)2244*4882a593Smuzhiyun static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
2245*4882a593Smuzhiyun 					  unsigned long addr)
2246*4882a593Smuzhiyun {
2247*4882a593Smuzhiyun 	struct inode *inode = file_inode(vma->vm_file);
2248*4882a593Smuzhiyun 	pgoff_t index;
2249*4882a593Smuzhiyun 
2250*4882a593Smuzhiyun 	index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
2251*4882a593Smuzhiyun 	return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index);
2252*4882a593Smuzhiyun }
2253*4882a593Smuzhiyun #endif
2254*4882a593Smuzhiyun 
shmem_lock(struct file * file,int lock,struct user_struct * user)2255*4882a593Smuzhiyun int shmem_lock(struct file *file, int lock, struct user_struct *user)
2256*4882a593Smuzhiyun {
2257*4882a593Smuzhiyun 	struct inode *inode = file_inode(file);
2258*4882a593Smuzhiyun 	struct shmem_inode_info *info = SHMEM_I(inode);
2259*4882a593Smuzhiyun 	int retval = -ENOMEM;
2260*4882a593Smuzhiyun 
2261*4882a593Smuzhiyun 	/*
2262*4882a593Smuzhiyun 	 * What serializes the accesses to info->flags?
2263*4882a593Smuzhiyun 	 * ipc_lock_object() when called from shmctl_do_lock(),
2264*4882a593Smuzhiyun 	 * no serialization needed when called from shm_destroy().
2265*4882a593Smuzhiyun 	 */
2266*4882a593Smuzhiyun 	if (lock && !(info->flags & VM_LOCKED)) {
2267*4882a593Smuzhiyun 		if (!user_shm_lock(inode->i_size, user))
2268*4882a593Smuzhiyun 			goto out_nomem;
2269*4882a593Smuzhiyun 		info->flags |= VM_LOCKED;
2270*4882a593Smuzhiyun 		mapping_set_unevictable(file->f_mapping);
2271*4882a593Smuzhiyun 	}
2272*4882a593Smuzhiyun 	if (!lock && (info->flags & VM_LOCKED) && user) {
2273*4882a593Smuzhiyun 		user_shm_unlock(inode->i_size, user);
2274*4882a593Smuzhiyun 		info->flags &= ~VM_LOCKED;
2275*4882a593Smuzhiyun 		mapping_clear_unevictable(file->f_mapping);
2276*4882a593Smuzhiyun 	}
2277*4882a593Smuzhiyun 	retval = 0;
2278*4882a593Smuzhiyun 
2279*4882a593Smuzhiyun out_nomem:
2280*4882a593Smuzhiyun 	return retval;
2281*4882a593Smuzhiyun }
2282*4882a593Smuzhiyun 
shmem_mmap(struct file * file,struct vm_area_struct * vma)2283*4882a593Smuzhiyun static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
2284*4882a593Smuzhiyun {
2285*4882a593Smuzhiyun 	struct shmem_inode_info *info = SHMEM_I(file_inode(file));
2286*4882a593Smuzhiyun 	int ret;
2287*4882a593Smuzhiyun 
2288*4882a593Smuzhiyun 	ret = seal_check_future_write(info->seals, vma);
2289*4882a593Smuzhiyun 	if (ret)
2290*4882a593Smuzhiyun 		return ret;
2291*4882a593Smuzhiyun 
2292*4882a593Smuzhiyun 	/* arm64 - allow memory tagging on RAM-based files */
2293*4882a593Smuzhiyun 	vma->vm_flags |= VM_MTE_ALLOWED;
2294*4882a593Smuzhiyun 
2295*4882a593Smuzhiyun 	file_accessed(file);
2296*4882a593Smuzhiyun 	vma->vm_ops = &shmem_vm_ops;
2297*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
2298*4882a593Smuzhiyun 			((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
2299*4882a593Smuzhiyun 			(vma->vm_end & HPAGE_PMD_MASK)) {
2300*4882a593Smuzhiyun 		khugepaged_enter(vma, vma->vm_flags);
2301*4882a593Smuzhiyun 	}
2302*4882a593Smuzhiyun 	return 0;
2303*4882a593Smuzhiyun }
2304*4882a593Smuzhiyun 
shmem_get_inode(struct super_block * sb,const struct inode * dir,umode_t mode,dev_t dev,unsigned long flags)2305*4882a593Smuzhiyun static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir,
2306*4882a593Smuzhiyun 				     umode_t mode, dev_t dev, unsigned long flags)
2307*4882a593Smuzhiyun {
2308*4882a593Smuzhiyun 	struct inode *inode;
2309*4882a593Smuzhiyun 	struct shmem_inode_info *info;
2310*4882a593Smuzhiyun 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2311*4882a593Smuzhiyun 	ino_t ino;
2312*4882a593Smuzhiyun 
2313*4882a593Smuzhiyun 	if (shmem_reserve_inode(sb, &ino))
2314*4882a593Smuzhiyun 		return NULL;
2315*4882a593Smuzhiyun 
2316*4882a593Smuzhiyun 	inode = new_inode(sb);
2317*4882a593Smuzhiyun 	if (inode) {
2318*4882a593Smuzhiyun 		inode->i_ino = ino;
2319*4882a593Smuzhiyun 		inode_init_owner(inode, dir, mode);
2320*4882a593Smuzhiyun 		inode->i_blocks = 0;
2321*4882a593Smuzhiyun 		inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
2322*4882a593Smuzhiyun 		inode->i_generation = prandom_u32();
2323*4882a593Smuzhiyun 		info = SHMEM_I(inode);
2324*4882a593Smuzhiyun 		memset(info, 0, (char *)inode - (char *)info);
2325*4882a593Smuzhiyun 		spin_lock_init(&info->lock);
2326*4882a593Smuzhiyun 		atomic_set(&info->stop_eviction, 0);
2327*4882a593Smuzhiyun 		info->seals = F_SEAL_SEAL;
2328*4882a593Smuzhiyun 		info->flags = flags & VM_NORESERVE;
2329*4882a593Smuzhiyun 		INIT_LIST_HEAD(&info->shrinklist);
2330*4882a593Smuzhiyun 		INIT_LIST_HEAD(&info->swaplist);
2331*4882a593Smuzhiyun 		simple_xattrs_init(&info->xattrs);
2332*4882a593Smuzhiyun 		cache_no_acl(inode);
2333*4882a593Smuzhiyun 
2334*4882a593Smuzhiyun 		switch (mode & S_IFMT) {
2335*4882a593Smuzhiyun 		default:
2336*4882a593Smuzhiyun 			inode->i_op = &shmem_special_inode_operations;
2337*4882a593Smuzhiyun 			init_special_inode(inode, mode, dev);
2338*4882a593Smuzhiyun 			break;
2339*4882a593Smuzhiyun 		case S_IFREG:
2340*4882a593Smuzhiyun 			inode->i_mapping->a_ops = &shmem_aops;
2341*4882a593Smuzhiyun 			inode->i_op = &shmem_inode_operations;
2342*4882a593Smuzhiyun 			inode->i_fop = &shmem_file_operations;
2343*4882a593Smuzhiyun 			mpol_shared_policy_init(&info->policy,
2344*4882a593Smuzhiyun 						 shmem_get_sbmpol(sbinfo));
2345*4882a593Smuzhiyun 			break;
2346*4882a593Smuzhiyun 		case S_IFDIR:
2347*4882a593Smuzhiyun 			inc_nlink(inode);
2348*4882a593Smuzhiyun 			/* Some things misbehave if size == 0 on a directory */
2349*4882a593Smuzhiyun 			inode->i_size = 2 * BOGO_DIRENT_SIZE;
2350*4882a593Smuzhiyun 			inode->i_op = &shmem_dir_inode_operations;
2351*4882a593Smuzhiyun 			inode->i_fop = &simple_dir_operations;
2352*4882a593Smuzhiyun 			break;
2353*4882a593Smuzhiyun 		case S_IFLNK:
2354*4882a593Smuzhiyun 			/*
2355*4882a593Smuzhiyun 			 * Must not load anything in the rbtree,
2356*4882a593Smuzhiyun 			 * mpol_free_shared_policy will not be called.
2357*4882a593Smuzhiyun 			 */
2358*4882a593Smuzhiyun 			mpol_shared_policy_init(&info->policy, NULL);
2359*4882a593Smuzhiyun 			break;
2360*4882a593Smuzhiyun 		}
2361*4882a593Smuzhiyun 
2362*4882a593Smuzhiyun 		lockdep_annotate_inode_mutex_key(inode);
2363*4882a593Smuzhiyun 	} else
2364*4882a593Smuzhiyun 		shmem_free_inode(sb);
2365*4882a593Smuzhiyun 	return inode;
2366*4882a593Smuzhiyun }
2367*4882a593Smuzhiyun 
shmem_mapping(struct address_space * mapping)2368*4882a593Smuzhiyun bool shmem_mapping(struct address_space *mapping)
2369*4882a593Smuzhiyun {
2370*4882a593Smuzhiyun 	return mapping->a_ops == &shmem_aops;
2371*4882a593Smuzhiyun }
2372*4882a593Smuzhiyun 
2373*4882a593Smuzhiyun #ifdef CONFIG_USERFAULTFD
shmem_mfill_atomic_pte(struct mm_struct * dst_mm,pmd_t * dst_pmd,struct vm_area_struct * dst_vma,unsigned long dst_addr,unsigned long src_addr,bool zeropage,struct page ** pagep)2374*4882a593Smuzhiyun int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
2375*4882a593Smuzhiyun 			   pmd_t *dst_pmd,
2376*4882a593Smuzhiyun 			   struct vm_area_struct *dst_vma,
2377*4882a593Smuzhiyun 			   unsigned long dst_addr,
2378*4882a593Smuzhiyun 			   unsigned long src_addr,
2379*4882a593Smuzhiyun 			   bool zeropage,
2380*4882a593Smuzhiyun 			   struct page **pagep)
2381*4882a593Smuzhiyun {
2382*4882a593Smuzhiyun 	struct inode *inode = file_inode(dst_vma->vm_file);
2383*4882a593Smuzhiyun 	struct shmem_inode_info *info = SHMEM_I(inode);
2384*4882a593Smuzhiyun 	struct address_space *mapping = inode->i_mapping;
2385*4882a593Smuzhiyun 	gfp_t gfp = mapping_gfp_mask(mapping);
2386*4882a593Smuzhiyun 	pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
2387*4882a593Smuzhiyun 	void *page_kaddr;
2388*4882a593Smuzhiyun 	struct page *page;
2389*4882a593Smuzhiyun 	int ret;
2390*4882a593Smuzhiyun 	pgoff_t max_off;
2391*4882a593Smuzhiyun 
2392*4882a593Smuzhiyun 	if (!shmem_inode_acct_block(inode, 1)) {
2393*4882a593Smuzhiyun 		/*
2394*4882a593Smuzhiyun 		 * We may have got a page, returned -ENOENT triggering a retry,
2395*4882a593Smuzhiyun 		 * and now we find ourselves with -ENOMEM. Release the page, to
2396*4882a593Smuzhiyun 		 * avoid a BUG_ON in our caller.
2397*4882a593Smuzhiyun 		 */
2398*4882a593Smuzhiyun 		if (unlikely(*pagep)) {
2399*4882a593Smuzhiyun 			put_page(*pagep);
2400*4882a593Smuzhiyun 			*pagep = NULL;
2401*4882a593Smuzhiyun 		}
2402*4882a593Smuzhiyun 		return -ENOMEM;
2403*4882a593Smuzhiyun 	}
2404*4882a593Smuzhiyun 
2405*4882a593Smuzhiyun 	if (!*pagep) {
2406*4882a593Smuzhiyun 		ret = -ENOMEM;
2407*4882a593Smuzhiyun 		page = shmem_alloc_page(gfp, info, pgoff);
2408*4882a593Smuzhiyun 		if (!page)
2409*4882a593Smuzhiyun 			goto out_unacct_blocks;
2410*4882a593Smuzhiyun 
2411*4882a593Smuzhiyun 		if (!zeropage) {	/* COPY */
2412*4882a593Smuzhiyun 			page_kaddr = kmap_atomic(page);
2413*4882a593Smuzhiyun 			ret = copy_from_user(page_kaddr,
2414*4882a593Smuzhiyun 					     (const void __user *)src_addr,
2415*4882a593Smuzhiyun 					     PAGE_SIZE);
2416*4882a593Smuzhiyun 			kunmap_atomic(page_kaddr);
2417*4882a593Smuzhiyun 
2418*4882a593Smuzhiyun 			/* fallback to copy_from_user outside mmap_lock */
2419*4882a593Smuzhiyun 			if (unlikely(ret)) {
2420*4882a593Smuzhiyun 				*pagep = page;
2421*4882a593Smuzhiyun 				ret = -ENOENT;
2422*4882a593Smuzhiyun 				/* don't free the page */
2423*4882a593Smuzhiyun 				goto out_unacct_blocks;
2424*4882a593Smuzhiyun 			}
2425*4882a593Smuzhiyun 		} else {		/* ZEROPAGE */
2426*4882a593Smuzhiyun 			clear_highpage(page);
2427*4882a593Smuzhiyun 		}
2428*4882a593Smuzhiyun 	} else {
2429*4882a593Smuzhiyun 		page = *pagep;
2430*4882a593Smuzhiyun 		*pagep = NULL;
2431*4882a593Smuzhiyun 	}
2432*4882a593Smuzhiyun 
2433*4882a593Smuzhiyun 	VM_BUG_ON(PageLocked(page));
2434*4882a593Smuzhiyun 	VM_BUG_ON(PageSwapBacked(page));
2435*4882a593Smuzhiyun 	__SetPageLocked(page);
2436*4882a593Smuzhiyun 	__SetPageSwapBacked(page);
2437*4882a593Smuzhiyun 	__SetPageUptodate(page);
2438*4882a593Smuzhiyun 
2439*4882a593Smuzhiyun 	ret = -EFAULT;
2440*4882a593Smuzhiyun 	max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
2441*4882a593Smuzhiyun 	if (unlikely(pgoff >= max_off))
2442*4882a593Smuzhiyun 		goto out_release;
2443*4882a593Smuzhiyun 
2444*4882a593Smuzhiyun 	ret = shmem_add_to_page_cache(page, mapping, pgoff, NULL,
2445*4882a593Smuzhiyun 				      gfp & GFP_RECLAIM_MASK, dst_mm);
2446*4882a593Smuzhiyun 	if (ret)
2447*4882a593Smuzhiyun 		goto out_release;
2448*4882a593Smuzhiyun 
2449*4882a593Smuzhiyun 	ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
2450*4882a593Smuzhiyun 				       page, true, false);
2451*4882a593Smuzhiyun 	if (ret)
2452*4882a593Smuzhiyun 		goto out_delete_from_cache;
2453*4882a593Smuzhiyun 
2454*4882a593Smuzhiyun 	spin_lock_irq(&info->lock);
2455*4882a593Smuzhiyun 	info->alloced++;
2456*4882a593Smuzhiyun 	inode->i_blocks += BLOCKS_PER_PAGE;
2457*4882a593Smuzhiyun 	shmem_recalc_inode(inode);
2458*4882a593Smuzhiyun 	spin_unlock_irq(&info->lock);
2459*4882a593Smuzhiyun 
2460*4882a593Smuzhiyun 	SetPageDirty(page);
2461*4882a593Smuzhiyun 	unlock_page(page);
2462*4882a593Smuzhiyun 	return 0;
2463*4882a593Smuzhiyun out_delete_from_cache:
2464*4882a593Smuzhiyun 	delete_from_page_cache(page);
2465*4882a593Smuzhiyun out_release:
2466*4882a593Smuzhiyun 	unlock_page(page);
2467*4882a593Smuzhiyun 	put_page(page);
2468*4882a593Smuzhiyun out_unacct_blocks:
2469*4882a593Smuzhiyun 	shmem_inode_unacct_blocks(inode, 1);
2470*4882a593Smuzhiyun 	return ret;
2471*4882a593Smuzhiyun }
2472*4882a593Smuzhiyun #endif /* CONFIG_USERFAULTFD */
2473*4882a593Smuzhiyun 
2474*4882a593Smuzhiyun #ifdef CONFIG_TMPFS
2475*4882a593Smuzhiyun static const struct inode_operations shmem_symlink_inode_operations;
2476*4882a593Smuzhiyun static const struct inode_operations shmem_short_symlink_operations;
2477*4882a593Smuzhiyun 
2478*4882a593Smuzhiyun #ifdef CONFIG_TMPFS_XATTR
2479*4882a593Smuzhiyun static int shmem_initxattrs(struct inode *, const struct xattr *, void *);
2480*4882a593Smuzhiyun #else
2481*4882a593Smuzhiyun #define shmem_initxattrs NULL
2482*4882a593Smuzhiyun #endif
2483*4882a593Smuzhiyun 
2484*4882a593Smuzhiyun static int
shmem_write_begin(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned flags,struct page ** pagep,void ** fsdata)2485*4882a593Smuzhiyun shmem_write_begin(struct file *file, struct address_space *mapping,
2486*4882a593Smuzhiyun 			loff_t pos, unsigned len, unsigned flags,
2487*4882a593Smuzhiyun 			struct page **pagep, void **fsdata)
2488*4882a593Smuzhiyun {
2489*4882a593Smuzhiyun 	struct inode *inode = mapping->host;
2490*4882a593Smuzhiyun 	struct shmem_inode_info *info = SHMEM_I(inode);
2491*4882a593Smuzhiyun 	pgoff_t index = pos >> PAGE_SHIFT;
2492*4882a593Smuzhiyun 
2493*4882a593Smuzhiyun 	/* i_mutex is held by caller */
2494*4882a593Smuzhiyun 	if (unlikely(info->seals & (F_SEAL_GROW |
2495*4882a593Smuzhiyun 				   F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))) {
2496*4882a593Smuzhiyun 		if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))
2497*4882a593Smuzhiyun 			return -EPERM;
2498*4882a593Smuzhiyun 		if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size)
2499*4882a593Smuzhiyun 			return -EPERM;
2500*4882a593Smuzhiyun 	}
2501*4882a593Smuzhiyun 
2502*4882a593Smuzhiyun 	return shmem_getpage(inode, index, pagep, SGP_WRITE);
2503*4882a593Smuzhiyun }
2504*4882a593Smuzhiyun 
2505*4882a593Smuzhiyun static int
shmem_write_end(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct page * page,void * fsdata)2506*4882a593Smuzhiyun shmem_write_end(struct file *file, struct address_space *mapping,
2507*4882a593Smuzhiyun 			loff_t pos, unsigned len, unsigned copied,
2508*4882a593Smuzhiyun 			struct page *page, void *fsdata)
2509*4882a593Smuzhiyun {
2510*4882a593Smuzhiyun 	struct inode *inode = mapping->host;
2511*4882a593Smuzhiyun 
2512*4882a593Smuzhiyun 	if (pos + copied > inode->i_size)
2513*4882a593Smuzhiyun 		i_size_write(inode, pos + copied);
2514*4882a593Smuzhiyun 
2515*4882a593Smuzhiyun 	if (!PageUptodate(page)) {
2516*4882a593Smuzhiyun 		struct page *head = compound_head(page);
2517*4882a593Smuzhiyun 		if (PageTransCompound(page)) {
2518*4882a593Smuzhiyun 			int i;
2519*4882a593Smuzhiyun 
2520*4882a593Smuzhiyun 			for (i = 0; i < HPAGE_PMD_NR; i++) {
2521*4882a593Smuzhiyun 				if (head + i == page)
2522*4882a593Smuzhiyun 					continue;
2523*4882a593Smuzhiyun 				clear_highpage(head + i);
2524*4882a593Smuzhiyun 				flush_dcache_page(head + i);
2525*4882a593Smuzhiyun 			}
2526*4882a593Smuzhiyun 		}
2527*4882a593Smuzhiyun 		if (copied < PAGE_SIZE) {
2528*4882a593Smuzhiyun 			unsigned from = pos & (PAGE_SIZE - 1);
2529*4882a593Smuzhiyun 			zero_user_segments(page, 0, from,
2530*4882a593Smuzhiyun 					from + copied, PAGE_SIZE);
2531*4882a593Smuzhiyun 		}
2532*4882a593Smuzhiyun 		SetPageUptodate(head);
2533*4882a593Smuzhiyun 	}
2534*4882a593Smuzhiyun 	set_page_dirty(page);
2535*4882a593Smuzhiyun 	unlock_page(page);
2536*4882a593Smuzhiyun 	put_page(page);
2537*4882a593Smuzhiyun 
2538*4882a593Smuzhiyun 	return copied;
2539*4882a593Smuzhiyun }
2540*4882a593Smuzhiyun 
shmem_file_read_iter(struct kiocb * iocb,struct iov_iter * to)2541*4882a593Smuzhiyun static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
2542*4882a593Smuzhiyun {
2543*4882a593Smuzhiyun 	struct file *file = iocb->ki_filp;
2544*4882a593Smuzhiyun 	struct inode *inode = file_inode(file);
2545*4882a593Smuzhiyun 	struct address_space *mapping = inode->i_mapping;
2546*4882a593Smuzhiyun 	pgoff_t index;
2547*4882a593Smuzhiyun 	unsigned long offset;
2548*4882a593Smuzhiyun 	enum sgp_type sgp = SGP_READ;
2549*4882a593Smuzhiyun 	int error = 0;
2550*4882a593Smuzhiyun 	ssize_t retval = 0;
2551*4882a593Smuzhiyun 	loff_t *ppos = &iocb->ki_pos;
2552*4882a593Smuzhiyun 
2553*4882a593Smuzhiyun 	/*
2554*4882a593Smuzhiyun 	 * Might this read be for a stacking filesystem?  Then when reading
2555*4882a593Smuzhiyun 	 * holes of a sparse file, we actually need to allocate those pages,
2556*4882a593Smuzhiyun 	 * and even mark them dirty, so it cannot exceed the max_blocks limit.
2557*4882a593Smuzhiyun 	 */
2558*4882a593Smuzhiyun 	if (!iter_is_iovec(to))
2559*4882a593Smuzhiyun 		sgp = SGP_CACHE;
2560*4882a593Smuzhiyun 
2561*4882a593Smuzhiyun 	index = *ppos >> PAGE_SHIFT;
2562*4882a593Smuzhiyun 	offset = *ppos & ~PAGE_MASK;
2563*4882a593Smuzhiyun 
2564*4882a593Smuzhiyun 	for (;;) {
2565*4882a593Smuzhiyun 		struct page *page = NULL;
2566*4882a593Smuzhiyun 		pgoff_t end_index;
2567*4882a593Smuzhiyun 		unsigned long nr, ret;
2568*4882a593Smuzhiyun 		loff_t i_size = i_size_read(inode);
2569*4882a593Smuzhiyun 
2570*4882a593Smuzhiyun 		end_index = i_size >> PAGE_SHIFT;
2571*4882a593Smuzhiyun 		if (index > end_index)
2572*4882a593Smuzhiyun 			break;
2573*4882a593Smuzhiyun 		if (index == end_index) {
2574*4882a593Smuzhiyun 			nr = i_size & ~PAGE_MASK;
2575*4882a593Smuzhiyun 			if (nr <= offset)
2576*4882a593Smuzhiyun 				break;
2577*4882a593Smuzhiyun 		}
2578*4882a593Smuzhiyun 
2579*4882a593Smuzhiyun 		error = shmem_getpage(inode, index, &page, sgp);
2580*4882a593Smuzhiyun 		if (error) {
2581*4882a593Smuzhiyun 			if (error == -EINVAL)
2582*4882a593Smuzhiyun 				error = 0;
2583*4882a593Smuzhiyun 			break;
2584*4882a593Smuzhiyun 		}
2585*4882a593Smuzhiyun 		if (page) {
2586*4882a593Smuzhiyun 			if (sgp == SGP_CACHE)
2587*4882a593Smuzhiyun 				set_page_dirty(page);
2588*4882a593Smuzhiyun 			unlock_page(page);
2589*4882a593Smuzhiyun 		}
2590*4882a593Smuzhiyun 
2591*4882a593Smuzhiyun 		/*
2592*4882a593Smuzhiyun 		 * We must evaluate after, since reads (unlike writes)
2593*4882a593Smuzhiyun 		 * are called without i_mutex protection against truncate
2594*4882a593Smuzhiyun 		 */
2595*4882a593Smuzhiyun 		nr = PAGE_SIZE;
2596*4882a593Smuzhiyun 		i_size = i_size_read(inode);
2597*4882a593Smuzhiyun 		end_index = i_size >> PAGE_SHIFT;
2598*4882a593Smuzhiyun 		if (index == end_index) {
2599*4882a593Smuzhiyun 			nr = i_size & ~PAGE_MASK;
2600*4882a593Smuzhiyun 			if (nr <= offset) {
2601*4882a593Smuzhiyun 				if (page)
2602*4882a593Smuzhiyun 					put_page(page);
2603*4882a593Smuzhiyun 				break;
2604*4882a593Smuzhiyun 			}
2605*4882a593Smuzhiyun 		}
2606*4882a593Smuzhiyun 		nr -= offset;
2607*4882a593Smuzhiyun 
2608*4882a593Smuzhiyun 		if (page) {
2609*4882a593Smuzhiyun 			/*
2610*4882a593Smuzhiyun 			 * If users can be writing to this page using arbitrary
2611*4882a593Smuzhiyun 			 * virtual addresses, take care about potential aliasing
2612*4882a593Smuzhiyun 			 * before reading the page on the kernel side.
2613*4882a593Smuzhiyun 			 */
2614*4882a593Smuzhiyun 			if (mapping_writably_mapped(mapping))
2615*4882a593Smuzhiyun 				flush_dcache_page(page);
2616*4882a593Smuzhiyun 			/*
2617*4882a593Smuzhiyun 			 * Mark the page accessed if we read the beginning.
2618*4882a593Smuzhiyun 			 */
2619*4882a593Smuzhiyun 			if (!offset)
2620*4882a593Smuzhiyun 				mark_page_accessed(page);
2621*4882a593Smuzhiyun 		} else {
2622*4882a593Smuzhiyun 			page = ZERO_PAGE(0);
2623*4882a593Smuzhiyun 			get_page(page);
2624*4882a593Smuzhiyun 		}
2625*4882a593Smuzhiyun 
2626*4882a593Smuzhiyun 		/*
2627*4882a593Smuzhiyun 		 * Ok, we have the page, and it's up-to-date, so
2628*4882a593Smuzhiyun 		 * now we can copy it to user space...
2629*4882a593Smuzhiyun 		 */
2630*4882a593Smuzhiyun 		ret = copy_page_to_iter(page, offset, nr, to);
2631*4882a593Smuzhiyun 		retval += ret;
2632*4882a593Smuzhiyun 		offset += ret;
2633*4882a593Smuzhiyun 		index += offset >> PAGE_SHIFT;
2634*4882a593Smuzhiyun 		offset &= ~PAGE_MASK;
2635*4882a593Smuzhiyun 
2636*4882a593Smuzhiyun 		put_page(page);
2637*4882a593Smuzhiyun 		if (!iov_iter_count(to))
2638*4882a593Smuzhiyun 			break;
2639*4882a593Smuzhiyun 		if (ret < nr) {
2640*4882a593Smuzhiyun 			error = -EFAULT;
2641*4882a593Smuzhiyun 			break;
2642*4882a593Smuzhiyun 		}
2643*4882a593Smuzhiyun 		cond_resched();
2644*4882a593Smuzhiyun 	}
2645*4882a593Smuzhiyun 
2646*4882a593Smuzhiyun 	*ppos = ((loff_t) index << PAGE_SHIFT) + offset;
2647*4882a593Smuzhiyun 	file_accessed(file);
2648*4882a593Smuzhiyun 	return retval ? retval : error;
2649*4882a593Smuzhiyun }
2650*4882a593Smuzhiyun 
2651*4882a593Smuzhiyun /*
2652*4882a593Smuzhiyun  * llseek SEEK_DATA or SEEK_HOLE through the page cache.
2653*4882a593Smuzhiyun  */
shmem_seek_hole_data(struct address_space * mapping,pgoff_t index,pgoff_t end,int whence)2654*4882a593Smuzhiyun static pgoff_t shmem_seek_hole_data(struct address_space *mapping,
2655*4882a593Smuzhiyun 				    pgoff_t index, pgoff_t end, int whence)
2656*4882a593Smuzhiyun {
2657*4882a593Smuzhiyun 	struct page *page;
2658*4882a593Smuzhiyun 	struct pagevec pvec;
2659*4882a593Smuzhiyun 	pgoff_t indices[PAGEVEC_SIZE];
2660*4882a593Smuzhiyun 	bool done = false;
2661*4882a593Smuzhiyun 	int i;
2662*4882a593Smuzhiyun 
2663*4882a593Smuzhiyun 	pagevec_init(&pvec);
2664*4882a593Smuzhiyun 	pvec.nr = 1;		/* start small: we may be there already */
2665*4882a593Smuzhiyun 	while (!done) {
2666*4882a593Smuzhiyun 		pvec.nr = find_get_entries(mapping, index,
2667*4882a593Smuzhiyun 					pvec.nr, pvec.pages, indices);
2668*4882a593Smuzhiyun 		if (!pvec.nr) {
2669*4882a593Smuzhiyun 			if (whence == SEEK_DATA)
2670*4882a593Smuzhiyun 				index = end;
2671*4882a593Smuzhiyun 			break;
2672*4882a593Smuzhiyun 		}
2673*4882a593Smuzhiyun 		for (i = 0; i < pvec.nr; i++, index++) {
2674*4882a593Smuzhiyun 			if (index < indices[i]) {
2675*4882a593Smuzhiyun 				if (whence == SEEK_HOLE) {
2676*4882a593Smuzhiyun 					done = true;
2677*4882a593Smuzhiyun 					break;
2678*4882a593Smuzhiyun 				}
2679*4882a593Smuzhiyun 				index = indices[i];
2680*4882a593Smuzhiyun 			}
2681*4882a593Smuzhiyun 			page = pvec.pages[i];
2682*4882a593Smuzhiyun 			if (page && !xa_is_value(page)) {
2683*4882a593Smuzhiyun 				if (!PageUptodate(page))
2684*4882a593Smuzhiyun 					page = NULL;
2685*4882a593Smuzhiyun 			}
2686*4882a593Smuzhiyun 			if (index >= end ||
2687*4882a593Smuzhiyun 			    (page && whence == SEEK_DATA) ||
2688*4882a593Smuzhiyun 			    (!page && whence == SEEK_HOLE)) {
2689*4882a593Smuzhiyun 				done = true;
2690*4882a593Smuzhiyun 				break;
2691*4882a593Smuzhiyun 			}
2692*4882a593Smuzhiyun 		}
2693*4882a593Smuzhiyun 		pagevec_remove_exceptionals(&pvec);
2694*4882a593Smuzhiyun 		pagevec_release(&pvec);
2695*4882a593Smuzhiyun 		pvec.nr = PAGEVEC_SIZE;
2696*4882a593Smuzhiyun 		cond_resched();
2697*4882a593Smuzhiyun 	}
2698*4882a593Smuzhiyun 	return index;
2699*4882a593Smuzhiyun }
2700*4882a593Smuzhiyun 
shmem_file_llseek(struct file * file,loff_t offset,int whence)2701*4882a593Smuzhiyun static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
2702*4882a593Smuzhiyun {
2703*4882a593Smuzhiyun 	struct address_space *mapping = file->f_mapping;
2704*4882a593Smuzhiyun 	struct inode *inode = mapping->host;
2705*4882a593Smuzhiyun 	pgoff_t start, end;
2706*4882a593Smuzhiyun 	loff_t new_offset;
2707*4882a593Smuzhiyun 
2708*4882a593Smuzhiyun 	if (whence != SEEK_DATA && whence != SEEK_HOLE)
2709*4882a593Smuzhiyun 		return generic_file_llseek_size(file, offset, whence,
2710*4882a593Smuzhiyun 					MAX_LFS_FILESIZE, i_size_read(inode));
2711*4882a593Smuzhiyun 	inode_lock(inode);
2712*4882a593Smuzhiyun 	/* We're holding i_mutex so we can access i_size directly */
2713*4882a593Smuzhiyun 
2714*4882a593Smuzhiyun 	if (offset < 0 || offset >= inode->i_size)
2715*4882a593Smuzhiyun 		offset = -ENXIO;
2716*4882a593Smuzhiyun 	else {
2717*4882a593Smuzhiyun 		start = offset >> PAGE_SHIFT;
2718*4882a593Smuzhiyun 		end = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
2719*4882a593Smuzhiyun 		new_offset = shmem_seek_hole_data(mapping, start, end, whence);
2720*4882a593Smuzhiyun 		new_offset <<= PAGE_SHIFT;
2721*4882a593Smuzhiyun 		if (new_offset > offset) {
2722*4882a593Smuzhiyun 			if (new_offset < inode->i_size)
2723*4882a593Smuzhiyun 				offset = new_offset;
2724*4882a593Smuzhiyun 			else if (whence == SEEK_DATA)
2725*4882a593Smuzhiyun 				offset = -ENXIO;
2726*4882a593Smuzhiyun 			else
2727*4882a593Smuzhiyun 				offset = inode->i_size;
2728*4882a593Smuzhiyun 		}
2729*4882a593Smuzhiyun 	}
2730*4882a593Smuzhiyun 
2731*4882a593Smuzhiyun 	if (offset >= 0)
2732*4882a593Smuzhiyun 		offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE);
2733*4882a593Smuzhiyun 	inode_unlock(inode);
2734*4882a593Smuzhiyun 	return offset;
2735*4882a593Smuzhiyun }
2736*4882a593Smuzhiyun 
shmem_fallocate(struct file * file,int mode,loff_t offset,loff_t len)2737*4882a593Smuzhiyun static long shmem_fallocate(struct file *file, int mode, loff_t offset,
2738*4882a593Smuzhiyun 							 loff_t len)
2739*4882a593Smuzhiyun {
2740*4882a593Smuzhiyun 	struct inode *inode = file_inode(file);
2741*4882a593Smuzhiyun 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
2742*4882a593Smuzhiyun 	struct shmem_inode_info *info = SHMEM_I(inode);
2743*4882a593Smuzhiyun 	struct shmem_falloc shmem_falloc;
2744*4882a593Smuzhiyun 	pgoff_t start, index, end;
2745*4882a593Smuzhiyun 	int error;
2746*4882a593Smuzhiyun 
2747*4882a593Smuzhiyun 	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2748*4882a593Smuzhiyun 		return -EOPNOTSUPP;
2749*4882a593Smuzhiyun 
2750*4882a593Smuzhiyun 	inode_lock(inode);
2751*4882a593Smuzhiyun 
2752*4882a593Smuzhiyun 	if (mode & FALLOC_FL_PUNCH_HOLE) {
2753*4882a593Smuzhiyun 		struct address_space *mapping = file->f_mapping;
2754*4882a593Smuzhiyun 		loff_t unmap_start = round_up(offset, PAGE_SIZE);
2755*4882a593Smuzhiyun 		loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
2756*4882a593Smuzhiyun 		DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
2757*4882a593Smuzhiyun 
2758*4882a593Smuzhiyun 		/* protected by i_mutex */
2759*4882a593Smuzhiyun 		if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
2760*4882a593Smuzhiyun 			error = -EPERM;
2761*4882a593Smuzhiyun 			goto out;
2762*4882a593Smuzhiyun 		}
2763*4882a593Smuzhiyun 
2764*4882a593Smuzhiyun 		shmem_falloc.waitq = &shmem_falloc_waitq;
2765*4882a593Smuzhiyun 		shmem_falloc.start = (u64)unmap_start >> PAGE_SHIFT;
2766*4882a593Smuzhiyun 		shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
2767*4882a593Smuzhiyun 		spin_lock(&inode->i_lock);
2768*4882a593Smuzhiyun 		inode->i_private = &shmem_falloc;
2769*4882a593Smuzhiyun 		spin_unlock(&inode->i_lock);
2770*4882a593Smuzhiyun 
2771*4882a593Smuzhiyun 		if ((u64)unmap_end > (u64)unmap_start)
2772*4882a593Smuzhiyun 			unmap_mapping_range(mapping, unmap_start,
2773*4882a593Smuzhiyun 					    1 + unmap_end - unmap_start, 0);
2774*4882a593Smuzhiyun 		shmem_truncate_range(inode, offset, offset + len - 1);
2775*4882a593Smuzhiyun 		/* No need to unmap again: hole-punching leaves COWed pages */
2776*4882a593Smuzhiyun 
2777*4882a593Smuzhiyun 		spin_lock(&inode->i_lock);
2778*4882a593Smuzhiyun 		inode->i_private = NULL;
2779*4882a593Smuzhiyun 		wake_up_all(&shmem_falloc_waitq);
2780*4882a593Smuzhiyun 		WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.head));
2781*4882a593Smuzhiyun 		spin_unlock(&inode->i_lock);
2782*4882a593Smuzhiyun 		error = 0;
2783*4882a593Smuzhiyun 		goto out;
2784*4882a593Smuzhiyun 	}
2785*4882a593Smuzhiyun 
2786*4882a593Smuzhiyun 	/* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
2787*4882a593Smuzhiyun 	error = inode_newsize_ok(inode, offset + len);
2788*4882a593Smuzhiyun 	if (error)
2789*4882a593Smuzhiyun 		goto out;
2790*4882a593Smuzhiyun 
2791*4882a593Smuzhiyun 	if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
2792*4882a593Smuzhiyun 		error = -EPERM;
2793*4882a593Smuzhiyun 		goto out;
2794*4882a593Smuzhiyun 	}
2795*4882a593Smuzhiyun 
2796*4882a593Smuzhiyun 	start = offset >> PAGE_SHIFT;
2797*4882a593Smuzhiyun 	end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
2798*4882a593Smuzhiyun 	/* Try to avoid a swapstorm if len is impossible to satisfy */
2799*4882a593Smuzhiyun 	if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) {
2800*4882a593Smuzhiyun 		error = -ENOSPC;
2801*4882a593Smuzhiyun 		goto out;
2802*4882a593Smuzhiyun 	}
2803*4882a593Smuzhiyun 
2804*4882a593Smuzhiyun 	shmem_falloc.waitq = NULL;
2805*4882a593Smuzhiyun 	shmem_falloc.start = start;
2806*4882a593Smuzhiyun 	shmem_falloc.next  = start;
2807*4882a593Smuzhiyun 	shmem_falloc.nr_falloced = 0;
2808*4882a593Smuzhiyun 	shmem_falloc.nr_unswapped = 0;
2809*4882a593Smuzhiyun 	spin_lock(&inode->i_lock);
2810*4882a593Smuzhiyun 	inode->i_private = &shmem_falloc;
2811*4882a593Smuzhiyun 	spin_unlock(&inode->i_lock);
2812*4882a593Smuzhiyun 
2813*4882a593Smuzhiyun 	for (index = start; index < end; index++) {
2814*4882a593Smuzhiyun 		struct page *page;
2815*4882a593Smuzhiyun 
2816*4882a593Smuzhiyun 		/*
2817*4882a593Smuzhiyun 		 * Good, the fallocate(2) manpage permits EINTR: we may have
2818*4882a593Smuzhiyun 		 * been interrupted because we are using up too much memory.
2819*4882a593Smuzhiyun 		 */
2820*4882a593Smuzhiyun 		if (signal_pending(current))
2821*4882a593Smuzhiyun 			error = -EINTR;
2822*4882a593Smuzhiyun 		else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced)
2823*4882a593Smuzhiyun 			error = -ENOMEM;
2824*4882a593Smuzhiyun 		else
2825*4882a593Smuzhiyun 			error = shmem_getpage(inode, index, &page, SGP_FALLOC);
2826*4882a593Smuzhiyun 		if (error) {
2827*4882a593Smuzhiyun 			/* Remove the !PageUptodate pages we added */
2828*4882a593Smuzhiyun 			if (index > start) {
2829*4882a593Smuzhiyun 				shmem_undo_range(inode,
2830*4882a593Smuzhiyun 				    (loff_t)start << PAGE_SHIFT,
2831*4882a593Smuzhiyun 				    ((loff_t)index << PAGE_SHIFT) - 1, true);
2832*4882a593Smuzhiyun 			}
2833*4882a593Smuzhiyun 			goto undone;
2834*4882a593Smuzhiyun 		}
2835*4882a593Smuzhiyun 
2836*4882a593Smuzhiyun 		/*
2837*4882a593Smuzhiyun 		 * Inform shmem_writepage() how far we have reached.
2838*4882a593Smuzhiyun 		 * No need for lock or barrier: we have the page lock.
2839*4882a593Smuzhiyun 		 */
2840*4882a593Smuzhiyun 		shmem_falloc.next++;
2841*4882a593Smuzhiyun 		if (!PageUptodate(page))
2842*4882a593Smuzhiyun 			shmem_falloc.nr_falloced++;
2843*4882a593Smuzhiyun 
2844*4882a593Smuzhiyun 		/*
2845*4882a593Smuzhiyun 		 * If !PageUptodate, leave it that way so that freeable pages
2846*4882a593Smuzhiyun 		 * can be recognized if we need to rollback on error later.
2847*4882a593Smuzhiyun 		 * But set_page_dirty so that memory pressure will swap rather
2848*4882a593Smuzhiyun 		 * than free the pages we are allocating (and SGP_CACHE pages
2849*4882a593Smuzhiyun 		 * might still be clean: we now need to mark those dirty too).
2850*4882a593Smuzhiyun 		 */
2851*4882a593Smuzhiyun 		set_page_dirty(page);
2852*4882a593Smuzhiyun 		unlock_page(page);
2853*4882a593Smuzhiyun 		put_page(page);
2854*4882a593Smuzhiyun 		cond_resched();
2855*4882a593Smuzhiyun 	}
2856*4882a593Smuzhiyun 
2857*4882a593Smuzhiyun 	if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
2858*4882a593Smuzhiyun 		i_size_write(inode, offset + len);
2859*4882a593Smuzhiyun 	inode->i_ctime = current_time(inode);
2860*4882a593Smuzhiyun undone:
2861*4882a593Smuzhiyun 	spin_lock(&inode->i_lock);
2862*4882a593Smuzhiyun 	inode->i_private = NULL;
2863*4882a593Smuzhiyun 	spin_unlock(&inode->i_lock);
2864*4882a593Smuzhiyun out:
2865*4882a593Smuzhiyun 	inode_unlock(inode);
2866*4882a593Smuzhiyun 	return error;
2867*4882a593Smuzhiyun }
2868*4882a593Smuzhiyun 
shmem_statfs(struct dentry * dentry,struct kstatfs * buf)2869*4882a593Smuzhiyun static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
2870*4882a593Smuzhiyun {
2871*4882a593Smuzhiyun 	struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
2872*4882a593Smuzhiyun 
2873*4882a593Smuzhiyun 	buf->f_type = TMPFS_MAGIC;
2874*4882a593Smuzhiyun 	buf->f_bsize = PAGE_SIZE;
2875*4882a593Smuzhiyun 	buf->f_namelen = NAME_MAX;
2876*4882a593Smuzhiyun 	if (sbinfo->max_blocks) {
2877*4882a593Smuzhiyun 		buf->f_blocks = sbinfo->max_blocks;
2878*4882a593Smuzhiyun 		buf->f_bavail =
2879*4882a593Smuzhiyun 		buf->f_bfree  = sbinfo->max_blocks -
2880*4882a593Smuzhiyun 				percpu_counter_sum(&sbinfo->used_blocks);
2881*4882a593Smuzhiyun 	}
2882*4882a593Smuzhiyun 	if (sbinfo->max_inodes) {
2883*4882a593Smuzhiyun 		buf->f_files = sbinfo->max_inodes;
2884*4882a593Smuzhiyun 		buf->f_ffree = sbinfo->free_inodes;
2885*4882a593Smuzhiyun 	}
2886*4882a593Smuzhiyun 	/* else leave those fields 0 like simple_statfs */
2887*4882a593Smuzhiyun 	return 0;
2888*4882a593Smuzhiyun }
2889*4882a593Smuzhiyun 
2890*4882a593Smuzhiyun /*
2891*4882a593Smuzhiyun  * File creation. Allocate an inode, and we're done..
2892*4882a593Smuzhiyun  */
2893*4882a593Smuzhiyun static int
shmem_mknod(struct inode * dir,struct dentry * dentry,umode_t mode,dev_t dev)2894*4882a593Smuzhiyun shmem_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
2895*4882a593Smuzhiyun {
2896*4882a593Smuzhiyun 	struct inode *inode;
2897*4882a593Smuzhiyun 	int error = -ENOSPC;
2898*4882a593Smuzhiyun 
2899*4882a593Smuzhiyun 	inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE);
2900*4882a593Smuzhiyun 	if (inode) {
2901*4882a593Smuzhiyun 		error = simple_acl_create(dir, inode);
2902*4882a593Smuzhiyun 		if (error)
2903*4882a593Smuzhiyun 			goto out_iput;
2904*4882a593Smuzhiyun 		error = security_inode_init_security(inode, dir,
2905*4882a593Smuzhiyun 						     &dentry->d_name,
2906*4882a593Smuzhiyun 						     shmem_initxattrs, NULL);
2907*4882a593Smuzhiyun 		if (error && error != -EOPNOTSUPP)
2908*4882a593Smuzhiyun 			goto out_iput;
2909*4882a593Smuzhiyun 
2910*4882a593Smuzhiyun 		error = 0;
2911*4882a593Smuzhiyun 		dir->i_size += BOGO_DIRENT_SIZE;
2912*4882a593Smuzhiyun 		dir->i_ctime = dir->i_mtime = current_time(dir);
2913*4882a593Smuzhiyun 		d_instantiate(dentry, inode);
2914*4882a593Smuzhiyun 		dget(dentry); /* Extra count - pin the dentry in core */
2915*4882a593Smuzhiyun 	}
2916*4882a593Smuzhiyun 	return error;
2917*4882a593Smuzhiyun out_iput:
2918*4882a593Smuzhiyun 	iput(inode);
2919*4882a593Smuzhiyun 	return error;
2920*4882a593Smuzhiyun }
2921*4882a593Smuzhiyun 
2922*4882a593Smuzhiyun static int
shmem_tmpfile(struct inode * dir,struct dentry * dentry,umode_t mode)2923*4882a593Smuzhiyun shmem_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
2924*4882a593Smuzhiyun {
2925*4882a593Smuzhiyun 	struct inode *inode;
2926*4882a593Smuzhiyun 	int error = -ENOSPC;
2927*4882a593Smuzhiyun 
2928*4882a593Smuzhiyun 	inode = shmem_get_inode(dir->i_sb, dir, mode, 0, VM_NORESERVE);
2929*4882a593Smuzhiyun 	if (inode) {
2930*4882a593Smuzhiyun 		error = security_inode_init_security(inode, dir,
2931*4882a593Smuzhiyun 						     NULL,
2932*4882a593Smuzhiyun 						     shmem_initxattrs, NULL);
2933*4882a593Smuzhiyun 		if (error && error != -EOPNOTSUPP)
2934*4882a593Smuzhiyun 			goto out_iput;
2935*4882a593Smuzhiyun 		error = simple_acl_create(dir, inode);
2936*4882a593Smuzhiyun 		if (error)
2937*4882a593Smuzhiyun 			goto out_iput;
2938*4882a593Smuzhiyun 		d_tmpfile(dentry, inode);
2939*4882a593Smuzhiyun 	}
2940*4882a593Smuzhiyun 	return error;
2941*4882a593Smuzhiyun out_iput:
2942*4882a593Smuzhiyun 	iput(inode);
2943*4882a593Smuzhiyun 	return error;
2944*4882a593Smuzhiyun }
2945*4882a593Smuzhiyun 
shmem_mkdir(struct inode * dir,struct dentry * dentry,umode_t mode)2946*4882a593Smuzhiyun static int shmem_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
2947*4882a593Smuzhiyun {
2948*4882a593Smuzhiyun 	int error;
2949*4882a593Smuzhiyun 
2950*4882a593Smuzhiyun 	if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0)))
2951*4882a593Smuzhiyun 		return error;
2952*4882a593Smuzhiyun 	inc_nlink(dir);
2953*4882a593Smuzhiyun 	return 0;
2954*4882a593Smuzhiyun }
2955*4882a593Smuzhiyun 
shmem_create(struct inode * dir,struct dentry * dentry,umode_t mode,bool excl)2956*4882a593Smuzhiyun static int shmem_create(struct inode *dir, struct dentry *dentry, umode_t mode,
2957*4882a593Smuzhiyun 		bool excl)
2958*4882a593Smuzhiyun {
2959*4882a593Smuzhiyun 	return shmem_mknod(dir, dentry, mode | S_IFREG, 0);
2960*4882a593Smuzhiyun }
2961*4882a593Smuzhiyun 
2962*4882a593Smuzhiyun /*
2963*4882a593Smuzhiyun  * Link a file..
2964*4882a593Smuzhiyun  */
shmem_link(struct dentry * old_dentry,struct inode * dir,struct dentry * dentry)2965*4882a593Smuzhiyun static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
2966*4882a593Smuzhiyun {
2967*4882a593Smuzhiyun 	struct inode *inode = d_inode(old_dentry);
2968*4882a593Smuzhiyun 	int ret = 0;
2969*4882a593Smuzhiyun 
2970*4882a593Smuzhiyun 	/*
2971*4882a593Smuzhiyun 	 * No ordinary (disk based) filesystem counts links as inodes;
2972*4882a593Smuzhiyun 	 * but each new link needs a new dentry, pinning lowmem, and
2973*4882a593Smuzhiyun 	 * tmpfs dentries cannot be pruned until they are unlinked.
2974*4882a593Smuzhiyun 	 * But if an O_TMPFILE file is linked into the tmpfs, the
2975*4882a593Smuzhiyun 	 * first link must skip that, to get the accounting right.
2976*4882a593Smuzhiyun 	 */
2977*4882a593Smuzhiyun 	if (inode->i_nlink) {
2978*4882a593Smuzhiyun 		ret = shmem_reserve_inode(inode->i_sb, NULL);
2979*4882a593Smuzhiyun 		if (ret)
2980*4882a593Smuzhiyun 			goto out;
2981*4882a593Smuzhiyun 	}
2982*4882a593Smuzhiyun 
2983*4882a593Smuzhiyun 	dir->i_size += BOGO_DIRENT_SIZE;
2984*4882a593Smuzhiyun 	inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
2985*4882a593Smuzhiyun 	inc_nlink(inode);
2986*4882a593Smuzhiyun 	ihold(inode);	/* New dentry reference */
2987*4882a593Smuzhiyun 	dget(dentry);		/* Extra pinning count for the created dentry */
2988*4882a593Smuzhiyun 	d_instantiate(dentry, inode);
2989*4882a593Smuzhiyun out:
2990*4882a593Smuzhiyun 	return ret;
2991*4882a593Smuzhiyun }
2992*4882a593Smuzhiyun 
shmem_unlink(struct inode * dir,struct dentry * dentry)2993*4882a593Smuzhiyun static int shmem_unlink(struct inode *dir, struct dentry *dentry)
2994*4882a593Smuzhiyun {
2995*4882a593Smuzhiyun 	struct inode *inode = d_inode(dentry);
2996*4882a593Smuzhiyun 
2997*4882a593Smuzhiyun 	if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
2998*4882a593Smuzhiyun 		shmem_free_inode(inode->i_sb);
2999*4882a593Smuzhiyun 
3000*4882a593Smuzhiyun 	dir->i_size -= BOGO_DIRENT_SIZE;
3001*4882a593Smuzhiyun 	inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
3002*4882a593Smuzhiyun 	drop_nlink(inode);
3003*4882a593Smuzhiyun 	dput(dentry);	/* Undo the count from "create" - this does all the work */
3004*4882a593Smuzhiyun 	return 0;
3005*4882a593Smuzhiyun }
3006*4882a593Smuzhiyun 
shmem_rmdir(struct inode * dir,struct dentry * dentry)3007*4882a593Smuzhiyun static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
3008*4882a593Smuzhiyun {
3009*4882a593Smuzhiyun 	if (!simple_empty(dentry))
3010*4882a593Smuzhiyun 		return -ENOTEMPTY;
3011*4882a593Smuzhiyun 
3012*4882a593Smuzhiyun 	drop_nlink(d_inode(dentry));
3013*4882a593Smuzhiyun 	drop_nlink(dir);
3014*4882a593Smuzhiyun 	return shmem_unlink(dir, dentry);
3015*4882a593Smuzhiyun }
3016*4882a593Smuzhiyun 
shmem_exchange(struct inode * old_dir,struct dentry * old_dentry,struct inode * new_dir,struct dentry * new_dentry)3017*4882a593Smuzhiyun static int shmem_exchange(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
3018*4882a593Smuzhiyun {
3019*4882a593Smuzhiyun 	bool old_is_dir = d_is_dir(old_dentry);
3020*4882a593Smuzhiyun 	bool new_is_dir = d_is_dir(new_dentry);
3021*4882a593Smuzhiyun 
3022*4882a593Smuzhiyun 	if (old_dir != new_dir && old_is_dir != new_is_dir) {
3023*4882a593Smuzhiyun 		if (old_is_dir) {
3024*4882a593Smuzhiyun 			drop_nlink(old_dir);
3025*4882a593Smuzhiyun 			inc_nlink(new_dir);
3026*4882a593Smuzhiyun 		} else {
3027*4882a593Smuzhiyun 			drop_nlink(new_dir);
3028*4882a593Smuzhiyun 			inc_nlink(old_dir);
3029*4882a593Smuzhiyun 		}
3030*4882a593Smuzhiyun 	}
3031*4882a593Smuzhiyun 	old_dir->i_ctime = old_dir->i_mtime =
3032*4882a593Smuzhiyun 	new_dir->i_ctime = new_dir->i_mtime =
3033*4882a593Smuzhiyun 	d_inode(old_dentry)->i_ctime =
3034*4882a593Smuzhiyun 	d_inode(new_dentry)->i_ctime = current_time(old_dir);
3035*4882a593Smuzhiyun 
3036*4882a593Smuzhiyun 	return 0;
3037*4882a593Smuzhiyun }
3038*4882a593Smuzhiyun 
shmem_whiteout(struct inode * old_dir,struct dentry * old_dentry)3039*4882a593Smuzhiyun static int shmem_whiteout(struct inode *old_dir, struct dentry *old_dentry)
3040*4882a593Smuzhiyun {
3041*4882a593Smuzhiyun 	struct dentry *whiteout;
3042*4882a593Smuzhiyun 	int error;
3043*4882a593Smuzhiyun 
3044*4882a593Smuzhiyun 	whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name);
3045*4882a593Smuzhiyun 	if (!whiteout)
3046*4882a593Smuzhiyun 		return -ENOMEM;
3047*4882a593Smuzhiyun 
3048*4882a593Smuzhiyun 	error = shmem_mknod(old_dir, whiteout,
3049*4882a593Smuzhiyun 			    S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV);
3050*4882a593Smuzhiyun 	dput(whiteout);
3051*4882a593Smuzhiyun 	if (error)
3052*4882a593Smuzhiyun 		return error;
3053*4882a593Smuzhiyun 
3054*4882a593Smuzhiyun 	/*
3055*4882a593Smuzhiyun 	 * Cheat and hash the whiteout while the old dentry is still in
3056*4882a593Smuzhiyun 	 * place, instead of playing games with FS_RENAME_DOES_D_MOVE.
3057*4882a593Smuzhiyun 	 *
3058*4882a593Smuzhiyun 	 * d_lookup() will consistently find one of them at this point,
3059*4882a593Smuzhiyun 	 * not sure which one, but that isn't even important.
3060*4882a593Smuzhiyun 	 */
3061*4882a593Smuzhiyun 	d_rehash(whiteout);
3062*4882a593Smuzhiyun 	return 0;
3063*4882a593Smuzhiyun }
3064*4882a593Smuzhiyun 
3065*4882a593Smuzhiyun /*
3066*4882a593Smuzhiyun  * The VFS layer already does all the dentry stuff for rename,
3067*4882a593Smuzhiyun  * we just have to decrement the usage count for the target if
3068*4882a593Smuzhiyun  * it exists so that the VFS layer correctly free's it when it
3069*4882a593Smuzhiyun  * gets overwritten.
3070*4882a593Smuzhiyun  */
shmem_rename2(struct inode * old_dir,struct dentry * old_dentry,struct inode * new_dir,struct dentry * new_dentry,unsigned int flags)3071*4882a593Smuzhiyun static int shmem_rename2(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags)
3072*4882a593Smuzhiyun {
3073*4882a593Smuzhiyun 	struct inode *inode = d_inode(old_dentry);
3074*4882a593Smuzhiyun 	int they_are_dirs = S_ISDIR(inode->i_mode);
3075*4882a593Smuzhiyun 
3076*4882a593Smuzhiyun 	if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
3077*4882a593Smuzhiyun 		return -EINVAL;
3078*4882a593Smuzhiyun 
3079*4882a593Smuzhiyun 	if (flags & RENAME_EXCHANGE)
3080*4882a593Smuzhiyun 		return shmem_exchange(old_dir, old_dentry, new_dir, new_dentry);
3081*4882a593Smuzhiyun 
3082*4882a593Smuzhiyun 	if (!simple_empty(new_dentry))
3083*4882a593Smuzhiyun 		return -ENOTEMPTY;
3084*4882a593Smuzhiyun 
3085*4882a593Smuzhiyun 	if (flags & RENAME_WHITEOUT) {
3086*4882a593Smuzhiyun 		int error;
3087*4882a593Smuzhiyun 
3088*4882a593Smuzhiyun 		error = shmem_whiteout(old_dir, old_dentry);
3089*4882a593Smuzhiyun 		if (error)
3090*4882a593Smuzhiyun 			return error;
3091*4882a593Smuzhiyun 	}
3092*4882a593Smuzhiyun 
3093*4882a593Smuzhiyun 	if (d_really_is_positive(new_dentry)) {
3094*4882a593Smuzhiyun 		(void) shmem_unlink(new_dir, new_dentry);
3095*4882a593Smuzhiyun 		if (they_are_dirs) {
3096*4882a593Smuzhiyun 			drop_nlink(d_inode(new_dentry));
3097*4882a593Smuzhiyun 			drop_nlink(old_dir);
3098*4882a593Smuzhiyun 		}
3099*4882a593Smuzhiyun 	} else if (they_are_dirs) {
3100*4882a593Smuzhiyun 		drop_nlink(old_dir);
3101*4882a593Smuzhiyun 		inc_nlink(new_dir);
3102*4882a593Smuzhiyun 	}
3103*4882a593Smuzhiyun 
3104*4882a593Smuzhiyun 	old_dir->i_size -= BOGO_DIRENT_SIZE;
3105*4882a593Smuzhiyun 	new_dir->i_size += BOGO_DIRENT_SIZE;
3106*4882a593Smuzhiyun 	old_dir->i_ctime = old_dir->i_mtime =
3107*4882a593Smuzhiyun 	new_dir->i_ctime = new_dir->i_mtime =
3108*4882a593Smuzhiyun 	inode->i_ctime = current_time(old_dir);
3109*4882a593Smuzhiyun 	return 0;
3110*4882a593Smuzhiyun }
3111*4882a593Smuzhiyun 
shmem_symlink(struct inode * dir,struct dentry * dentry,const char * symname)3112*4882a593Smuzhiyun static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
3113*4882a593Smuzhiyun {
3114*4882a593Smuzhiyun 	int error;
3115*4882a593Smuzhiyun 	int len;
3116*4882a593Smuzhiyun 	struct inode *inode;
3117*4882a593Smuzhiyun 	struct page *page;
3118*4882a593Smuzhiyun 
3119*4882a593Smuzhiyun 	len = strlen(symname) + 1;
3120*4882a593Smuzhiyun 	if (len > PAGE_SIZE)
3121*4882a593Smuzhiyun 		return -ENAMETOOLONG;
3122*4882a593Smuzhiyun 
3123*4882a593Smuzhiyun 	inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK | 0777, 0,
3124*4882a593Smuzhiyun 				VM_NORESERVE);
3125*4882a593Smuzhiyun 	if (!inode)
3126*4882a593Smuzhiyun 		return -ENOSPC;
3127*4882a593Smuzhiyun 
3128*4882a593Smuzhiyun 	error = security_inode_init_security(inode, dir, &dentry->d_name,
3129*4882a593Smuzhiyun 					     shmem_initxattrs, NULL);
3130*4882a593Smuzhiyun 	if (error && error != -EOPNOTSUPP) {
3131*4882a593Smuzhiyun 		iput(inode);
3132*4882a593Smuzhiyun 		return error;
3133*4882a593Smuzhiyun 	}
3134*4882a593Smuzhiyun 
3135*4882a593Smuzhiyun 	inode->i_size = len-1;
3136*4882a593Smuzhiyun 	if (len <= SHORT_SYMLINK_LEN) {
3137*4882a593Smuzhiyun 		inode->i_link = kmemdup(symname, len, GFP_KERNEL);
3138*4882a593Smuzhiyun 		if (!inode->i_link) {
3139*4882a593Smuzhiyun 			iput(inode);
3140*4882a593Smuzhiyun 			return -ENOMEM;
3141*4882a593Smuzhiyun 		}
3142*4882a593Smuzhiyun 		inode->i_op = &shmem_short_symlink_operations;
3143*4882a593Smuzhiyun 	} else {
3144*4882a593Smuzhiyun 		inode_nohighmem(inode);
3145*4882a593Smuzhiyun 		error = shmem_getpage(inode, 0, &page, SGP_WRITE);
3146*4882a593Smuzhiyun 		if (error) {
3147*4882a593Smuzhiyun 			iput(inode);
3148*4882a593Smuzhiyun 			return error;
3149*4882a593Smuzhiyun 		}
3150*4882a593Smuzhiyun 		inode->i_mapping->a_ops = &shmem_aops;
3151*4882a593Smuzhiyun 		inode->i_op = &shmem_symlink_inode_operations;
3152*4882a593Smuzhiyun 		memcpy(page_address(page), symname, len);
3153*4882a593Smuzhiyun 		SetPageUptodate(page);
3154*4882a593Smuzhiyun 		set_page_dirty(page);
3155*4882a593Smuzhiyun 		unlock_page(page);
3156*4882a593Smuzhiyun 		put_page(page);
3157*4882a593Smuzhiyun 	}
3158*4882a593Smuzhiyun 	dir->i_size += BOGO_DIRENT_SIZE;
3159*4882a593Smuzhiyun 	dir->i_ctime = dir->i_mtime = current_time(dir);
3160*4882a593Smuzhiyun 	d_instantiate(dentry, inode);
3161*4882a593Smuzhiyun 	dget(dentry);
3162*4882a593Smuzhiyun 	return 0;
3163*4882a593Smuzhiyun }
3164*4882a593Smuzhiyun 
shmem_put_link(void * arg)3165*4882a593Smuzhiyun static void shmem_put_link(void *arg)
3166*4882a593Smuzhiyun {
3167*4882a593Smuzhiyun 	mark_page_accessed(arg);
3168*4882a593Smuzhiyun 	put_page(arg);
3169*4882a593Smuzhiyun }
3170*4882a593Smuzhiyun 
shmem_get_link(struct dentry * dentry,struct inode * inode,struct delayed_call * done)3171*4882a593Smuzhiyun static const char *shmem_get_link(struct dentry *dentry,
3172*4882a593Smuzhiyun 				  struct inode *inode,
3173*4882a593Smuzhiyun 				  struct delayed_call *done)
3174*4882a593Smuzhiyun {
3175*4882a593Smuzhiyun 	struct page *page = NULL;
3176*4882a593Smuzhiyun 	int error;
3177*4882a593Smuzhiyun 	if (!dentry) {
3178*4882a593Smuzhiyun 		page = find_get_page(inode->i_mapping, 0);
3179*4882a593Smuzhiyun 		if (!page)
3180*4882a593Smuzhiyun 			return ERR_PTR(-ECHILD);
3181*4882a593Smuzhiyun 		if (!PageUptodate(page)) {
3182*4882a593Smuzhiyun 			put_page(page);
3183*4882a593Smuzhiyun 			return ERR_PTR(-ECHILD);
3184*4882a593Smuzhiyun 		}
3185*4882a593Smuzhiyun 	} else {
3186*4882a593Smuzhiyun 		error = shmem_getpage(inode, 0, &page, SGP_READ);
3187*4882a593Smuzhiyun 		if (error)
3188*4882a593Smuzhiyun 			return ERR_PTR(error);
3189*4882a593Smuzhiyun 		unlock_page(page);
3190*4882a593Smuzhiyun 	}
3191*4882a593Smuzhiyun 	set_delayed_call(done, shmem_put_link, page);
3192*4882a593Smuzhiyun 	return page_address(page);
3193*4882a593Smuzhiyun }
3194*4882a593Smuzhiyun 
3195*4882a593Smuzhiyun #ifdef CONFIG_TMPFS_XATTR
3196*4882a593Smuzhiyun /*
3197*4882a593Smuzhiyun  * Superblocks without xattr inode operations may get some security.* xattr
3198*4882a593Smuzhiyun  * support from the LSM "for free". As soon as we have any other xattrs
3199*4882a593Smuzhiyun  * like ACLs, we also need to implement the security.* handlers at
3200*4882a593Smuzhiyun  * filesystem level, though.
3201*4882a593Smuzhiyun  */
3202*4882a593Smuzhiyun 
3203*4882a593Smuzhiyun /*
3204*4882a593Smuzhiyun  * Callback for security_inode_init_security() for acquiring xattrs.
3205*4882a593Smuzhiyun  */
shmem_initxattrs(struct inode * inode,const struct xattr * xattr_array,void * fs_info)3206*4882a593Smuzhiyun static int shmem_initxattrs(struct inode *inode,
3207*4882a593Smuzhiyun 			    const struct xattr *xattr_array,
3208*4882a593Smuzhiyun 			    void *fs_info)
3209*4882a593Smuzhiyun {
3210*4882a593Smuzhiyun 	struct shmem_inode_info *info = SHMEM_I(inode);
3211*4882a593Smuzhiyun 	const struct xattr *xattr;
3212*4882a593Smuzhiyun 	struct simple_xattr *new_xattr;
3213*4882a593Smuzhiyun 	size_t len;
3214*4882a593Smuzhiyun 
3215*4882a593Smuzhiyun 	for (xattr = xattr_array; xattr->name != NULL; xattr++) {
3216*4882a593Smuzhiyun 		new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len);
3217*4882a593Smuzhiyun 		if (!new_xattr)
3218*4882a593Smuzhiyun 			return -ENOMEM;
3219*4882a593Smuzhiyun 
3220*4882a593Smuzhiyun 		len = strlen(xattr->name) + 1;
3221*4882a593Smuzhiyun 		new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len,
3222*4882a593Smuzhiyun 					  GFP_KERNEL);
3223*4882a593Smuzhiyun 		if (!new_xattr->name) {
3224*4882a593Smuzhiyun 			kvfree(new_xattr);
3225*4882a593Smuzhiyun 			return -ENOMEM;
3226*4882a593Smuzhiyun 		}
3227*4882a593Smuzhiyun 
3228*4882a593Smuzhiyun 		memcpy(new_xattr->name, XATTR_SECURITY_PREFIX,
3229*4882a593Smuzhiyun 		       XATTR_SECURITY_PREFIX_LEN);
3230*4882a593Smuzhiyun 		memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN,
3231*4882a593Smuzhiyun 		       xattr->name, len);
3232*4882a593Smuzhiyun 
3233*4882a593Smuzhiyun 		simple_xattr_list_add(&info->xattrs, new_xattr);
3234*4882a593Smuzhiyun 	}
3235*4882a593Smuzhiyun 
3236*4882a593Smuzhiyun 	return 0;
3237*4882a593Smuzhiyun }
3238*4882a593Smuzhiyun 
shmem_xattr_handler_get(const struct xattr_handler * handler,struct dentry * unused,struct inode * inode,const char * name,void * buffer,size_t size,int flags)3239*4882a593Smuzhiyun static int shmem_xattr_handler_get(const struct xattr_handler *handler,
3240*4882a593Smuzhiyun 				   struct dentry *unused, struct inode *inode,
3241*4882a593Smuzhiyun 				   const char *name, void *buffer, size_t size,
3242*4882a593Smuzhiyun 				   int flags)
3243*4882a593Smuzhiyun {
3244*4882a593Smuzhiyun 	struct shmem_inode_info *info = SHMEM_I(inode);
3245*4882a593Smuzhiyun 
3246*4882a593Smuzhiyun 	name = xattr_full_name(handler, name);
3247*4882a593Smuzhiyun 	return simple_xattr_get(&info->xattrs, name, buffer, size);
3248*4882a593Smuzhiyun }
3249*4882a593Smuzhiyun 
shmem_xattr_handler_set(const struct xattr_handler * handler,struct dentry * unused,struct inode * inode,const char * name,const void * value,size_t size,int flags)3250*4882a593Smuzhiyun static int shmem_xattr_handler_set(const struct xattr_handler *handler,
3251*4882a593Smuzhiyun 				   struct dentry *unused, struct inode *inode,
3252*4882a593Smuzhiyun 				   const char *name, const void *value,
3253*4882a593Smuzhiyun 				   size_t size, int flags)
3254*4882a593Smuzhiyun {
3255*4882a593Smuzhiyun 	struct shmem_inode_info *info = SHMEM_I(inode);
3256*4882a593Smuzhiyun 
3257*4882a593Smuzhiyun 	name = xattr_full_name(handler, name);
3258*4882a593Smuzhiyun 	return simple_xattr_set(&info->xattrs, name, value, size, flags, NULL);
3259*4882a593Smuzhiyun }
3260*4882a593Smuzhiyun 
3261*4882a593Smuzhiyun static const struct xattr_handler shmem_security_xattr_handler = {
3262*4882a593Smuzhiyun 	.prefix = XATTR_SECURITY_PREFIX,
3263*4882a593Smuzhiyun 	.get = shmem_xattr_handler_get,
3264*4882a593Smuzhiyun 	.set = shmem_xattr_handler_set,
3265*4882a593Smuzhiyun };
3266*4882a593Smuzhiyun 
3267*4882a593Smuzhiyun static const struct xattr_handler shmem_trusted_xattr_handler = {
3268*4882a593Smuzhiyun 	.prefix = XATTR_TRUSTED_PREFIX,
3269*4882a593Smuzhiyun 	.get = shmem_xattr_handler_get,
3270*4882a593Smuzhiyun 	.set = shmem_xattr_handler_set,
3271*4882a593Smuzhiyun };
3272*4882a593Smuzhiyun 
3273*4882a593Smuzhiyun static const struct xattr_handler *shmem_xattr_handlers[] = {
3274*4882a593Smuzhiyun #ifdef CONFIG_TMPFS_POSIX_ACL
3275*4882a593Smuzhiyun 	&posix_acl_access_xattr_handler,
3276*4882a593Smuzhiyun 	&posix_acl_default_xattr_handler,
3277*4882a593Smuzhiyun #endif
3278*4882a593Smuzhiyun 	&shmem_security_xattr_handler,
3279*4882a593Smuzhiyun 	&shmem_trusted_xattr_handler,
3280*4882a593Smuzhiyun 	NULL
3281*4882a593Smuzhiyun };
3282*4882a593Smuzhiyun 
shmem_listxattr(struct dentry * dentry,char * buffer,size_t size)3283*4882a593Smuzhiyun static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
3284*4882a593Smuzhiyun {
3285*4882a593Smuzhiyun 	struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
3286*4882a593Smuzhiyun 	return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size);
3287*4882a593Smuzhiyun }
3288*4882a593Smuzhiyun #endif /* CONFIG_TMPFS_XATTR */
3289*4882a593Smuzhiyun 
3290*4882a593Smuzhiyun static const struct inode_operations shmem_short_symlink_operations = {
3291*4882a593Smuzhiyun 	.get_link	= simple_get_link,
3292*4882a593Smuzhiyun #ifdef CONFIG_TMPFS_XATTR
3293*4882a593Smuzhiyun 	.listxattr	= shmem_listxattr,
3294*4882a593Smuzhiyun #endif
3295*4882a593Smuzhiyun };
3296*4882a593Smuzhiyun 
3297*4882a593Smuzhiyun static const struct inode_operations shmem_symlink_inode_operations = {
3298*4882a593Smuzhiyun 	.get_link	= shmem_get_link,
3299*4882a593Smuzhiyun #ifdef CONFIG_TMPFS_XATTR
3300*4882a593Smuzhiyun 	.listxattr	= shmem_listxattr,
3301*4882a593Smuzhiyun #endif
3302*4882a593Smuzhiyun };
3303*4882a593Smuzhiyun 
shmem_get_parent(struct dentry * child)3304*4882a593Smuzhiyun static struct dentry *shmem_get_parent(struct dentry *child)
3305*4882a593Smuzhiyun {
3306*4882a593Smuzhiyun 	return ERR_PTR(-ESTALE);
3307*4882a593Smuzhiyun }
3308*4882a593Smuzhiyun 
shmem_match(struct inode * ino,void * vfh)3309*4882a593Smuzhiyun static int shmem_match(struct inode *ino, void *vfh)
3310*4882a593Smuzhiyun {
3311*4882a593Smuzhiyun 	__u32 *fh = vfh;
3312*4882a593Smuzhiyun 	__u64 inum = fh[2];
3313*4882a593Smuzhiyun 	inum = (inum << 32) | fh[1];
3314*4882a593Smuzhiyun 	return ino->i_ino == inum && fh[0] == ino->i_generation;
3315*4882a593Smuzhiyun }
3316*4882a593Smuzhiyun 
3317*4882a593Smuzhiyun /* Find any alias of inode, but prefer a hashed alias */
shmem_find_alias(struct inode * inode)3318*4882a593Smuzhiyun static struct dentry *shmem_find_alias(struct inode *inode)
3319*4882a593Smuzhiyun {
3320*4882a593Smuzhiyun 	struct dentry *alias = d_find_alias(inode);
3321*4882a593Smuzhiyun 
3322*4882a593Smuzhiyun 	return alias ?: d_find_any_alias(inode);
3323*4882a593Smuzhiyun }
3324*4882a593Smuzhiyun 
3325*4882a593Smuzhiyun 
shmem_fh_to_dentry(struct super_block * sb,struct fid * fid,int fh_len,int fh_type)3326*4882a593Smuzhiyun static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
3327*4882a593Smuzhiyun 		struct fid *fid, int fh_len, int fh_type)
3328*4882a593Smuzhiyun {
3329*4882a593Smuzhiyun 	struct inode *inode;
3330*4882a593Smuzhiyun 	struct dentry *dentry = NULL;
3331*4882a593Smuzhiyun 	u64 inum;
3332*4882a593Smuzhiyun 
3333*4882a593Smuzhiyun 	if (fh_len < 3)
3334*4882a593Smuzhiyun 		return NULL;
3335*4882a593Smuzhiyun 
3336*4882a593Smuzhiyun 	inum = fid->raw[2];
3337*4882a593Smuzhiyun 	inum = (inum << 32) | fid->raw[1];
3338*4882a593Smuzhiyun 
3339*4882a593Smuzhiyun 	inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
3340*4882a593Smuzhiyun 			shmem_match, fid->raw);
3341*4882a593Smuzhiyun 	if (inode) {
3342*4882a593Smuzhiyun 		dentry = shmem_find_alias(inode);
3343*4882a593Smuzhiyun 		iput(inode);
3344*4882a593Smuzhiyun 	}
3345*4882a593Smuzhiyun 
3346*4882a593Smuzhiyun 	return dentry;
3347*4882a593Smuzhiyun }
3348*4882a593Smuzhiyun 
shmem_encode_fh(struct inode * inode,__u32 * fh,int * len,struct inode * parent)3349*4882a593Smuzhiyun static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len,
3350*4882a593Smuzhiyun 				struct inode *parent)
3351*4882a593Smuzhiyun {
3352*4882a593Smuzhiyun 	if (*len < 3) {
3353*4882a593Smuzhiyun 		*len = 3;
3354*4882a593Smuzhiyun 		return FILEID_INVALID;
3355*4882a593Smuzhiyun 	}
3356*4882a593Smuzhiyun 
3357*4882a593Smuzhiyun 	if (inode_unhashed(inode)) {
3358*4882a593Smuzhiyun 		/* Unfortunately insert_inode_hash is not idempotent,
3359*4882a593Smuzhiyun 		 * so as we hash inodes here rather than at creation
3360*4882a593Smuzhiyun 		 * time, we need a lock to ensure we only try
3361*4882a593Smuzhiyun 		 * to do it once
3362*4882a593Smuzhiyun 		 */
3363*4882a593Smuzhiyun 		static DEFINE_SPINLOCK(lock);
3364*4882a593Smuzhiyun 		spin_lock(&lock);
3365*4882a593Smuzhiyun 		if (inode_unhashed(inode))
3366*4882a593Smuzhiyun 			__insert_inode_hash(inode,
3367*4882a593Smuzhiyun 					    inode->i_ino + inode->i_generation);
3368*4882a593Smuzhiyun 		spin_unlock(&lock);
3369*4882a593Smuzhiyun 	}
3370*4882a593Smuzhiyun 
3371*4882a593Smuzhiyun 	fh[0] = inode->i_generation;
3372*4882a593Smuzhiyun 	fh[1] = inode->i_ino;
3373*4882a593Smuzhiyun 	fh[2] = ((__u64)inode->i_ino) >> 32;
3374*4882a593Smuzhiyun 
3375*4882a593Smuzhiyun 	*len = 3;
3376*4882a593Smuzhiyun 	return 1;
3377*4882a593Smuzhiyun }
3378*4882a593Smuzhiyun 
3379*4882a593Smuzhiyun static const struct export_operations shmem_export_ops = {
3380*4882a593Smuzhiyun 	.get_parent     = shmem_get_parent,
3381*4882a593Smuzhiyun 	.encode_fh      = shmem_encode_fh,
3382*4882a593Smuzhiyun 	.fh_to_dentry	= shmem_fh_to_dentry,
3383*4882a593Smuzhiyun };
3384*4882a593Smuzhiyun 
3385*4882a593Smuzhiyun enum shmem_param {
3386*4882a593Smuzhiyun 	Opt_gid,
3387*4882a593Smuzhiyun 	Opt_huge,
3388*4882a593Smuzhiyun 	Opt_mode,
3389*4882a593Smuzhiyun 	Opt_mpol,
3390*4882a593Smuzhiyun 	Opt_nr_blocks,
3391*4882a593Smuzhiyun 	Opt_nr_inodes,
3392*4882a593Smuzhiyun 	Opt_size,
3393*4882a593Smuzhiyun 	Opt_uid,
3394*4882a593Smuzhiyun 	Opt_inode32,
3395*4882a593Smuzhiyun 	Opt_inode64,
3396*4882a593Smuzhiyun };
3397*4882a593Smuzhiyun 
3398*4882a593Smuzhiyun static const struct constant_table shmem_param_enums_huge[] = {
3399*4882a593Smuzhiyun 	{"never",	SHMEM_HUGE_NEVER },
3400*4882a593Smuzhiyun 	{"always",	SHMEM_HUGE_ALWAYS },
3401*4882a593Smuzhiyun 	{"within_size",	SHMEM_HUGE_WITHIN_SIZE },
3402*4882a593Smuzhiyun 	{"advise",	SHMEM_HUGE_ADVISE },
3403*4882a593Smuzhiyun 	{}
3404*4882a593Smuzhiyun };
3405*4882a593Smuzhiyun 
3406*4882a593Smuzhiyun const struct fs_parameter_spec shmem_fs_parameters[] = {
3407*4882a593Smuzhiyun 	fsparam_u32   ("gid",		Opt_gid),
3408*4882a593Smuzhiyun 	fsparam_enum  ("huge",		Opt_huge,  shmem_param_enums_huge),
3409*4882a593Smuzhiyun 	fsparam_u32oct("mode",		Opt_mode),
3410*4882a593Smuzhiyun 	fsparam_string("mpol",		Opt_mpol),
3411*4882a593Smuzhiyun 	fsparam_string("nr_blocks",	Opt_nr_blocks),
3412*4882a593Smuzhiyun 	fsparam_string("nr_inodes",	Opt_nr_inodes),
3413*4882a593Smuzhiyun 	fsparam_string("size",		Opt_size),
3414*4882a593Smuzhiyun 	fsparam_u32   ("uid",		Opt_uid),
3415*4882a593Smuzhiyun 	fsparam_flag  ("inode32",	Opt_inode32),
3416*4882a593Smuzhiyun 	fsparam_flag  ("inode64",	Opt_inode64),
3417*4882a593Smuzhiyun 	{}
3418*4882a593Smuzhiyun };
3419*4882a593Smuzhiyun 
shmem_parse_one(struct fs_context * fc,struct fs_parameter * param)3420*4882a593Smuzhiyun static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param)
3421*4882a593Smuzhiyun {
3422*4882a593Smuzhiyun 	struct shmem_options *ctx = fc->fs_private;
3423*4882a593Smuzhiyun 	struct fs_parse_result result;
3424*4882a593Smuzhiyun 	unsigned long long size;
3425*4882a593Smuzhiyun 	char *rest;
3426*4882a593Smuzhiyun 	int opt;
3427*4882a593Smuzhiyun 
3428*4882a593Smuzhiyun 	opt = fs_parse(fc, shmem_fs_parameters, param, &result);
3429*4882a593Smuzhiyun 	if (opt < 0)
3430*4882a593Smuzhiyun 		return opt;
3431*4882a593Smuzhiyun 
3432*4882a593Smuzhiyun 	switch (opt) {
3433*4882a593Smuzhiyun 	case Opt_size:
3434*4882a593Smuzhiyun 		size = memparse(param->string, &rest);
3435*4882a593Smuzhiyun 		if (*rest == '%') {
3436*4882a593Smuzhiyun 			size <<= PAGE_SHIFT;
3437*4882a593Smuzhiyun 			size *= totalram_pages();
3438*4882a593Smuzhiyun 			do_div(size, 100);
3439*4882a593Smuzhiyun 			rest++;
3440*4882a593Smuzhiyun 		}
3441*4882a593Smuzhiyun 		if (*rest)
3442*4882a593Smuzhiyun 			goto bad_value;
3443*4882a593Smuzhiyun 		ctx->blocks = DIV_ROUND_UP(size, PAGE_SIZE);
3444*4882a593Smuzhiyun 		ctx->seen |= SHMEM_SEEN_BLOCKS;
3445*4882a593Smuzhiyun 		break;
3446*4882a593Smuzhiyun 	case Opt_nr_blocks:
3447*4882a593Smuzhiyun 		ctx->blocks = memparse(param->string, &rest);
3448*4882a593Smuzhiyun 		if (*rest)
3449*4882a593Smuzhiyun 			goto bad_value;
3450*4882a593Smuzhiyun 		ctx->seen |= SHMEM_SEEN_BLOCKS;
3451*4882a593Smuzhiyun 		break;
3452*4882a593Smuzhiyun 	case Opt_nr_inodes:
3453*4882a593Smuzhiyun 		ctx->inodes = memparse(param->string, &rest);
3454*4882a593Smuzhiyun 		if (*rest)
3455*4882a593Smuzhiyun 			goto bad_value;
3456*4882a593Smuzhiyun 		ctx->seen |= SHMEM_SEEN_INODES;
3457*4882a593Smuzhiyun 		break;
3458*4882a593Smuzhiyun 	case Opt_mode:
3459*4882a593Smuzhiyun 		ctx->mode = result.uint_32 & 07777;
3460*4882a593Smuzhiyun 		break;
3461*4882a593Smuzhiyun 	case Opt_uid:
3462*4882a593Smuzhiyun 		ctx->uid = make_kuid(current_user_ns(), result.uint_32);
3463*4882a593Smuzhiyun 		if (!uid_valid(ctx->uid))
3464*4882a593Smuzhiyun 			goto bad_value;
3465*4882a593Smuzhiyun 		break;
3466*4882a593Smuzhiyun 	case Opt_gid:
3467*4882a593Smuzhiyun 		ctx->gid = make_kgid(current_user_ns(), result.uint_32);
3468*4882a593Smuzhiyun 		if (!gid_valid(ctx->gid))
3469*4882a593Smuzhiyun 			goto bad_value;
3470*4882a593Smuzhiyun 		break;
3471*4882a593Smuzhiyun 	case Opt_huge:
3472*4882a593Smuzhiyun 		ctx->huge = result.uint_32;
3473*4882a593Smuzhiyun 		if (ctx->huge != SHMEM_HUGE_NEVER &&
3474*4882a593Smuzhiyun 		    !(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
3475*4882a593Smuzhiyun 		      has_transparent_hugepage()))
3476*4882a593Smuzhiyun 			goto unsupported_parameter;
3477*4882a593Smuzhiyun 		ctx->seen |= SHMEM_SEEN_HUGE;
3478*4882a593Smuzhiyun 		break;
3479*4882a593Smuzhiyun 	case Opt_mpol:
3480*4882a593Smuzhiyun 		if (IS_ENABLED(CONFIG_NUMA)) {
3481*4882a593Smuzhiyun 			mpol_put(ctx->mpol);
3482*4882a593Smuzhiyun 			ctx->mpol = NULL;
3483*4882a593Smuzhiyun 			if (mpol_parse_str(param->string, &ctx->mpol))
3484*4882a593Smuzhiyun 				goto bad_value;
3485*4882a593Smuzhiyun 			break;
3486*4882a593Smuzhiyun 		}
3487*4882a593Smuzhiyun 		goto unsupported_parameter;
3488*4882a593Smuzhiyun 	case Opt_inode32:
3489*4882a593Smuzhiyun 		ctx->full_inums = false;
3490*4882a593Smuzhiyun 		ctx->seen |= SHMEM_SEEN_INUMS;
3491*4882a593Smuzhiyun 		break;
3492*4882a593Smuzhiyun 	case Opt_inode64:
3493*4882a593Smuzhiyun 		if (sizeof(ino_t) < 8) {
3494*4882a593Smuzhiyun 			return invalfc(fc,
3495*4882a593Smuzhiyun 				       "Cannot use inode64 with <64bit inums in kernel\n");
3496*4882a593Smuzhiyun 		}
3497*4882a593Smuzhiyun 		ctx->full_inums = true;
3498*4882a593Smuzhiyun 		ctx->seen |= SHMEM_SEEN_INUMS;
3499*4882a593Smuzhiyun 		break;
3500*4882a593Smuzhiyun 	}
3501*4882a593Smuzhiyun 	return 0;
3502*4882a593Smuzhiyun 
3503*4882a593Smuzhiyun unsupported_parameter:
3504*4882a593Smuzhiyun 	return invalfc(fc, "Unsupported parameter '%s'", param->key);
3505*4882a593Smuzhiyun bad_value:
3506*4882a593Smuzhiyun 	return invalfc(fc, "Bad value for '%s'", param->key);
3507*4882a593Smuzhiyun }
3508*4882a593Smuzhiyun 
shmem_parse_options(struct fs_context * fc,void * data)3509*4882a593Smuzhiyun static int shmem_parse_options(struct fs_context *fc, void *data)
3510*4882a593Smuzhiyun {
3511*4882a593Smuzhiyun 	char *options = data;
3512*4882a593Smuzhiyun 
3513*4882a593Smuzhiyun 	if (options) {
3514*4882a593Smuzhiyun 		int err = security_sb_eat_lsm_opts(options, &fc->security);
3515*4882a593Smuzhiyun 		if (err)
3516*4882a593Smuzhiyun 			return err;
3517*4882a593Smuzhiyun 	}
3518*4882a593Smuzhiyun 
3519*4882a593Smuzhiyun 	while (options != NULL) {
3520*4882a593Smuzhiyun 		char *this_char = options;
3521*4882a593Smuzhiyun 		for (;;) {
3522*4882a593Smuzhiyun 			/*
3523*4882a593Smuzhiyun 			 * NUL-terminate this option: unfortunately,
3524*4882a593Smuzhiyun 			 * mount options form a comma-separated list,
3525*4882a593Smuzhiyun 			 * but mpol's nodelist may also contain commas.
3526*4882a593Smuzhiyun 			 */
3527*4882a593Smuzhiyun 			options = strchr(options, ',');
3528*4882a593Smuzhiyun 			if (options == NULL)
3529*4882a593Smuzhiyun 				break;
3530*4882a593Smuzhiyun 			options++;
3531*4882a593Smuzhiyun 			if (!isdigit(*options)) {
3532*4882a593Smuzhiyun 				options[-1] = '\0';
3533*4882a593Smuzhiyun 				break;
3534*4882a593Smuzhiyun 			}
3535*4882a593Smuzhiyun 		}
3536*4882a593Smuzhiyun 		if (*this_char) {
3537*4882a593Smuzhiyun 			char *value = strchr(this_char,'=');
3538*4882a593Smuzhiyun 			size_t len = 0;
3539*4882a593Smuzhiyun 			int err;
3540*4882a593Smuzhiyun 
3541*4882a593Smuzhiyun 			if (value) {
3542*4882a593Smuzhiyun 				*value++ = '\0';
3543*4882a593Smuzhiyun 				len = strlen(value);
3544*4882a593Smuzhiyun 			}
3545*4882a593Smuzhiyun 			err = vfs_parse_fs_string(fc, this_char, value, len);
3546*4882a593Smuzhiyun 			if (err < 0)
3547*4882a593Smuzhiyun 				return err;
3548*4882a593Smuzhiyun 		}
3549*4882a593Smuzhiyun 	}
3550*4882a593Smuzhiyun 	return 0;
3551*4882a593Smuzhiyun }
3552*4882a593Smuzhiyun 
3553*4882a593Smuzhiyun /*
3554*4882a593Smuzhiyun  * Reconfigure a shmem filesystem.
3555*4882a593Smuzhiyun  *
3556*4882a593Smuzhiyun  * Note that we disallow change from limited->unlimited blocks/inodes while any
3557*4882a593Smuzhiyun  * are in use; but we must separately disallow unlimited->limited, because in
3558*4882a593Smuzhiyun  * that case we have no record of how much is already in use.
3559*4882a593Smuzhiyun  */
shmem_reconfigure(struct fs_context * fc)3560*4882a593Smuzhiyun static int shmem_reconfigure(struct fs_context *fc)
3561*4882a593Smuzhiyun {
3562*4882a593Smuzhiyun 	struct shmem_options *ctx = fc->fs_private;
3563*4882a593Smuzhiyun 	struct shmem_sb_info *sbinfo = SHMEM_SB(fc->root->d_sb);
3564*4882a593Smuzhiyun 	unsigned long inodes;
3565*4882a593Smuzhiyun 	const char *err;
3566*4882a593Smuzhiyun 
3567*4882a593Smuzhiyun 	spin_lock(&sbinfo->stat_lock);
3568*4882a593Smuzhiyun 	inodes = sbinfo->max_inodes - sbinfo->free_inodes;
3569*4882a593Smuzhiyun 	if ((ctx->seen & SHMEM_SEEN_BLOCKS) && ctx->blocks) {
3570*4882a593Smuzhiyun 		if (!sbinfo->max_blocks) {
3571*4882a593Smuzhiyun 			err = "Cannot retroactively limit size";
3572*4882a593Smuzhiyun 			goto out;
3573*4882a593Smuzhiyun 		}
3574*4882a593Smuzhiyun 		if (percpu_counter_compare(&sbinfo->used_blocks,
3575*4882a593Smuzhiyun 					   ctx->blocks) > 0) {
3576*4882a593Smuzhiyun 			err = "Too small a size for current use";
3577*4882a593Smuzhiyun 			goto out;
3578*4882a593Smuzhiyun 		}
3579*4882a593Smuzhiyun 	}
3580*4882a593Smuzhiyun 	if ((ctx->seen & SHMEM_SEEN_INODES) && ctx->inodes) {
3581*4882a593Smuzhiyun 		if (!sbinfo->max_inodes) {
3582*4882a593Smuzhiyun 			err = "Cannot retroactively limit inodes";
3583*4882a593Smuzhiyun 			goto out;
3584*4882a593Smuzhiyun 		}
3585*4882a593Smuzhiyun 		if (ctx->inodes < inodes) {
3586*4882a593Smuzhiyun 			err = "Too few inodes for current use";
3587*4882a593Smuzhiyun 			goto out;
3588*4882a593Smuzhiyun 		}
3589*4882a593Smuzhiyun 	}
3590*4882a593Smuzhiyun 
3591*4882a593Smuzhiyun 	if ((ctx->seen & SHMEM_SEEN_INUMS) && !ctx->full_inums &&
3592*4882a593Smuzhiyun 	    sbinfo->next_ino > UINT_MAX) {
3593*4882a593Smuzhiyun 		err = "Current inum too high to switch to 32-bit inums";
3594*4882a593Smuzhiyun 		goto out;
3595*4882a593Smuzhiyun 	}
3596*4882a593Smuzhiyun 
3597*4882a593Smuzhiyun 	if (ctx->seen & SHMEM_SEEN_HUGE)
3598*4882a593Smuzhiyun 		sbinfo->huge = ctx->huge;
3599*4882a593Smuzhiyun 	if (ctx->seen & SHMEM_SEEN_INUMS)
3600*4882a593Smuzhiyun 		sbinfo->full_inums = ctx->full_inums;
3601*4882a593Smuzhiyun 	if (ctx->seen & SHMEM_SEEN_BLOCKS)
3602*4882a593Smuzhiyun 		sbinfo->max_blocks  = ctx->blocks;
3603*4882a593Smuzhiyun 	if (ctx->seen & SHMEM_SEEN_INODES) {
3604*4882a593Smuzhiyun 		sbinfo->max_inodes  = ctx->inodes;
3605*4882a593Smuzhiyun 		sbinfo->free_inodes = ctx->inodes - inodes;
3606*4882a593Smuzhiyun 	}
3607*4882a593Smuzhiyun 
3608*4882a593Smuzhiyun 	/*
3609*4882a593Smuzhiyun 	 * Preserve previous mempolicy unless mpol remount option was specified.
3610*4882a593Smuzhiyun 	 */
3611*4882a593Smuzhiyun 	if (ctx->mpol) {
3612*4882a593Smuzhiyun 		mpol_put(sbinfo->mpol);
3613*4882a593Smuzhiyun 		sbinfo->mpol = ctx->mpol;	/* transfers initial ref */
3614*4882a593Smuzhiyun 		ctx->mpol = NULL;
3615*4882a593Smuzhiyun 	}
3616*4882a593Smuzhiyun 	spin_unlock(&sbinfo->stat_lock);
3617*4882a593Smuzhiyun 	return 0;
3618*4882a593Smuzhiyun out:
3619*4882a593Smuzhiyun 	spin_unlock(&sbinfo->stat_lock);
3620*4882a593Smuzhiyun 	return invalfc(fc, "%s", err);
3621*4882a593Smuzhiyun }
3622*4882a593Smuzhiyun 
shmem_show_options(struct seq_file * seq,struct dentry * root)3623*4882a593Smuzhiyun static int shmem_show_options(struct seq_file *seq, struct dentry *root)
3624*4882a593Smuzhiyun {
3625*4882a593Smuzhiyun 	struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb);
3626*4882a593Smuzhiyun 
3627*4882a593Smuzhiyun 	if (sbinfo->max_blocks != shmem_default_max_blocks())
3628*4882a593Smuzhiyun 		seq_printf(seq, ",size=%luk",
3629*4882a593Smuzhiyun 			sbinfo->max_blocks << (PAGE_SHIFT - 10));
3630*4882a593Smuzhiyun 	if (sbinfo->max_inodes != shmem_default_max_inodes())
3631*4882a593Smuzhiyun 		seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
3632*4882a593Smuzhiyun 	if (sbinfo->mode != (0777 | S_ISVTX))
3633*4882a593Smuzhiyun 		seq_printf(seq, ",mode=%03ho", sbinfo->mode);
3634*4882a593Smuzhiyun 	if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
3635*4882a593Smuzhiyun 		seq_printf(seq, ",uid=%u",
3636*4882a593Smuzhiyun 				from_kuid_munged(&init_user_ns, sbinfo->uid));
3637*4882a593Smuzhiyun 	if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
3638*4882a593Smuzhiyun 		seq_printf(seq, ",gid=%u",
3639*4882a593Smuzhiyun 				from_kgid_munged(&init_user_ns, sbinfo->gid));
3640*4882a593Smuzhiyun 
3641*4882a593Smuzhiyun 	/*
3642*4882a593Smuzhiyun 	 * Showing inode{64,32} might be useful even if it's the system default,
3643*4882a593Smuzhiyun 	 * since then people don't have to resort to checking both here and
3644*4882a593Smuzhiyun 	 * /proc/config.gz to confirm 64-bit inums were successfully applied
3645*4882a593Smuzhiyun 	 * (which may not even exist if IKCONFIG_PROC isn't enabled).
3646*4882a593Smuzhiyun 	 *
3647*4882a593Smuzhiyun 	 * We hide it when inode64 isn't the default and we are using 32-bit
3648*4882a593Smuzhiyun 	 * inodes, since that probably just means the feature isn't even under
3649*4882a593Smuzhiyun 	 * consideration.
3650*4882a593Smuzhiyun 	 *
3651*4882a593Smuzhiyun 	 * As such:
3652*4882a593Smuzhiyun 	 *
3653*4882a593Smuzhiyun 	 *                     +-----------------+-----------------+
3654*4882a593Smuzhiyun 	 *                     | TMPFS_INODE64=y | TMPFS_INODE64=n |
3655*4882a593Smuzhiyun 	 *  +------------------+-----------------+-----------------+
3656*4882a593Smuzhiyun 	 *  | full_inums=true  | show            | show            |
3657*4882a593Smuzhiyun 	 *  | full_inums=false | show            | hide            |
3658*4882a593Smuzhiyun 	 *  +------------------+-----------------+-----------------+
3659*4882a593Smuzhiyun 	 *
3660*4882a593Smuzhiyun 	 */
3661*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_TMPFS_INODE64) || sbinfo->full_inums)
3662*4882a593Smuzhiyun 		seq_printf(seq, ",inode%d", (sbinfo->full_inums ? 64 : 32));
3663*4882a593Smuzhiyun #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3664*4882a593Smuzhiyun 	/* Rightly or wrongly, show huge mount option unmasked by shmem_huge */
3665*4882a593Smuzhiyun 	if (sbinfo->huge)
3666*4882a593Smuzhiyun 		seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge));
3667*4882a593Smuzhiyun #endif
3668*4882a593Smuzhiyun 	shmem_show_mpol(seq, sbinfo->mpol);
3669*4882a593Smuzhiyun 	return 0;
3670*4882a593Smuzhiyun }
3671*4882a593Smuzhiyun 
3672*4882a593Smuzhiyun #endif /* CONFIG_TMPFS */
3673*4882a593Smuzhiyun 
shmem_put_super(struct super_block * sb)3674*4882a593Smuzhiyun static void shmem_put_super(struct super_block *sb)
3675*4882a593Smuzhiyun {
3676*4882a593Smuzhiyun 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
3677*4882a593Smuzhiyun 
3678*4882a593Smuzhiyun 	free_percpu(sbinfo->ino_batch);
3679*4882a593Smuzhiyun 	percpu_counter_destroy(&sbinfo->used_blocks);
3680*4882a593Smuzhiyun 	mpol_put(sbinfo->mpol);
3681*4882a593Smuzhiyun 	kfree(sbinfo);
3682*4882a593Smuzhiyun 	sb->s_fs_info = NULL;
3683*4882a593Smuzhiyun }
3684*4882a593Smuzhiyun 
shmem_fill_super(struct super_block * sb,struct fs_context * fc)3685*4882a593Smuzhiyun static int shmem_fill_super(struct super_block *sb, struct fs_context *fc)
3686*4882a593Smuzhiyun {
3687*4882a593Smuzhiyun 	struct shmem_options *ctx = fc->fs_private;
3688*4882a593Smuzhiyun 	struct inode *inode;
3689*4882a593Smuzhiyun 	struct shmem_sb_info *sbinfo;
3690*4882a593Smuzhiyun 	int err = -ENOMEM;
3691*4882a593Smuzhiyun 
3692*4882a593Smuzhiyun 	/* Round up to L1_CACHE_BYTES to resist false sharing */
3693*4882a593Smuzhiyun 	sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
3694*4882a593Smuzhiyun 				L1_CACHE_BYTES), GFP_KERNEL);
3695*4882a593Smuzhiyun 	if (!sbinfo)
3696*4882a593Smuzhiyun 		return -ENOMEM;
3697*4882a593Smuzhiyun 
3698*4882a593Smuzhiyun 	sb->s_fs_info = sbinfo;
3699*4882a593Smuzhiyun 
3700*4882a593Smuzhiyun #ifdef CONFIG_TMPFS
3701*4882a593Smuzhiyun 	/*
3702*4882a593Smuzhiyun 	 * Per default we only allow half of the physical ram per
3703*4882a593Smuzhiyun 	 * tmpfs instance, limiting inodes to one per page of lowmem;
3704*4882a593Smuzhiyun 	 * but the internal instance is left unlimited.
3705*4882a593Smuzhiyun 	 */
3706*4882a593Smuzhiyun 	if (!(sb->s_flags & SB_KERNMOUNT)) {
3707*4882a593Smuzhiyun 		if (!(ctx->seen & SHMEM_SEEN_BLOCKS))
3708*4882a593Smuzhiyun 			ctx->blocks = shmem_default_max_blocks();
3709*4882a593Smuzhiyun 		if (!(ctx->seen & SHMEM_SEEN_INODES))
3710*4882a593Smuzhiyun 			ctx->inodes = shmem_default_max_inodes();
3711*4882a593Smuzhiyun 		if (!(ctx->seen & SHMEM_SEEN_INUMS))
3712*4882a593Smuzhiyun 			ctx->full_inums = IS_ENABLED(CONFIG_TMPFS_INODE64);
3713*4882a593Smuzhiyun 	} else {
3714*4882a593Smuzhiyun 		sb->s_flags |= SB_NOUSER;
3715*4882a593Smuzhiyun 	}
3716*4882a593Smuzhiyun 	sb->s_export_op = &shmem_export_ops;
3717*4882a593Smuzhiyun 	sb->s_flags |= SB_NOSEC;
3718*4882a593Smuzhiyun #else
3719*4882a593Smuzhiyun 	sb->s_flags |= SB_NOUSER;
3720*4882a593Smuzhiyun #endif
3721*4882a593Smuzhiyun 	sbinfo->max_blocks = ctx->blocks;
3722*4882a593Smuzhiyun 	sbinfo->free_inodes = sbinfo->max_inodes = ctx->inodes;
3723*4882a593Smuzhiyun 	if (sb->s_flags & SB_KERNMOUNT) {
3724*4882a593Smuzhiyun 		sbinfo->ino_batch = alloc_percpu(ino_t);
3725*4882a593Smuzhiyun 		if (!sbinfo->ino_batch)
3726*4882a593Smuzhiyun 			goto failed;
3727*4882a593Smuzhiyun 	}
3728*4882a593Smuzhiyun 	sbinfo->uid = ctx->uid;
3729*4882a593Smuzhiyun 	sbinfo->gid = ctx->gid;
3730*4882a593Smuzhiyun 	sbinfo->full_inums = ctx->full_inums;
3731*4882a593Smuzhiyun 	sbinfo->mode = ctx->mode;
3732*4882a593Smuzhiyun 	sbinfo->huge = ctx->huge;
3733*4882a593Smuzhiyun 	sbinfo->mpol = ctx->mpol;
3734*4882a593Smuzhiyun 	ctx->mpol = NULL;
3735*4882a593Smuzhiyun 
3736*4882a593Smuzhiyun 	spin_lock_init(&sbinfo->stat_lock);
3737*4882a593Smuzhiyun 	if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL))
3738*4882a593Smuzhiyun 		goto failed;
3739*4882a593Smuzhiyun 	spin_lock_init(&sbinfo->shrinklist_lock);
3740*4882a593Smuzhiyun 	INIT_LIST_HEAD(&sbinfo->shrinklist);
3741*4882a593Smuzhiyun 
3742*4882a593Smuzhiyun 	sb->s_maxbytes = MAX_LFS_FILESIZE;
3743*4882a593Smuzhiyun 	sb->s_blocksize = PAGE_SIZE;
3744*4882a593Smuzhiyun 	sb->s_blocksize_bits = PAGE_SHIFT;
3745*4882a593Smuzhiyun 	sb->s_magic = TMPFS_MAGIC;
3746*4882a593Smuzhiyun 	sb->s_op = &shmem_ops;
3747*4882a593Smuzhiyun 	sb->s_time_gran = 1;
3748*4882a593Smuzhiyun #ifdef CONFIG_TMPFS_XATTR
3749*4882a593Smuzhiyun 	sb->s_xattr = shmem_xattr_handlers;
3750*4882a593Smuzhiyun #endif
3751*4882a593Smuzhiyun #ifdef CONFIG_TMPFS_POSIX_ACL
3752*4882a593Smuzhiyun 	sb->s_flags |= SB_POSIXACL;
3753*4882a593Smuzhiyun #endif
3754*4882a593Smuzhiyun 	uuid_gen(&sb->s_uuid);
3755*4882a593Smuzhiyun 
3756*4882a593Smuzhiyun 	inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
3757*4882a593Smuzhiyun 	if (!inode)
3758*4882a593Smuzhiyun 		goto failed;
3759*4882a593Smuzhiyun 	inode->i_uid = sbinfo->uid;
3760*4882a593Smuzhiyun 	inode->i_gid = sbinfo->gid;
3761*4882a593Smuzhiyun 	sb->s_root = d_make_root(inode);
3762*4882a593Smuzhiyun 	if (!sb->s_root)
3763*4882a593Smuzhiyun 		goto failed;
3764*4882a593Smuzhiyun 	return 0;
3765*4882a593Smuzhiyun 
3766*4882a593Smuzhiyun failed:
3767*4882a593Smuzhiyun 	shmem_put_super(sb);
3768*4882a593Smuzhiyun 	return err;
3769*4882a593Smuzhiyun }
3770*4882a593Smuzhiyun 
shmem_get_tree(struct fs_context * fc)3771*4882a593Smuzhiyun static int shmem_get_tree(struct fs_context *fc)
3772*4882a593Smuzhiyun {
3773*4882a593Smuzhiyun 	return get_tree_nodev(fc, shmem_fill_super);
3774*4882a593Smuzhiyun }
3775*4882a593Smuzhiyun 
shmem_free_fc(struct fs_context * fc)3776*4882a593Smuzhiyun static void shmem_free_fc(struct fs_context *fc)
3777*4882a593Smuzhiyun {
3778*4882a593Smuzhiyun 	struct shmem_options *ctx = fc->fs_private;
3779*4882a593Smuzhiyun 
3780*4882a593Smuzhiyun 	if (ctx) {
3781*4882a593Smuzhiyun 		mpol_put(ctx->mpol);
3782*4882a593Smuzhiyun 		kfree(ctx);
3783*4882a593Smuzhiyun 	}
3784*4882a593Smuzhiyun }
3785*4882a593Smuzhiyun 
3786*4882a593Smuzhiyun static const struct fs_context_operations shmem_fs_context_ops = {
3787*4882a593Smuzhiyun 	.free			= shmem_free_fc,
3788*4882a593Smuzhiyun 	.get_tree		= shmem_get_tree,
3789*4882a593Smuzhiyun #ifdef CONFIG_TMPFS
3790*4882a593Smuzhiyun 	.parse_monolithic	= shmem_parse_options,
3791*4882a593Smuzhiyun 	.parse_param		= shmem_parse_one,
3792*4882a593Smuzhiyun 	.reconfigure		= shmem_reconfigure,
3793*4882a593Smuzhiyun #endif
3794*4882a593Smuzhiyun };
3795*4882a593Smuzhiyun 
3796*4882a593Smuzhiyun static struct kmem_cache *shmem_inode_cachep;
3797*4882a593Smuzhiyun 
shmem_alloc_inode(struct super_block * sb)3798*4882a593Smuzhiyun static struct inode *shmem_alloc_inode(struct super_block *sb)
3799*4882a593Smuzhiyun {
3800*4882a593Smuzhiyun 	struct shmem_inode_info *info;
3801*4882a593Smuzhiyun 	info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
3802*4882a593Smuzhiyun 	if (!info)
3803*4882a593Smuzhiyun 		return NULL;
3804*4882a593Smuzhiyun 	return &info->vfs_inode;
3805*4882a593Smuzhiyun }
3806*4882a593Smuzhiyun 
shmem_free_in_core_inode(struct inode * inode)3807*4882a593Smuzhiyun static void shmem_free_in_core_inode(struct inode *inode)
3808*4882a593Smuzhiyun {
3809*4882a593Smuzhiyun 	if (S_ISLNK(inode->i_mode))
3810*4882a593Smuzhiyun 		kfree(inode->i_link);
3811*4882a593Smuzhiyun 	kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
3812*4882a593Smuzhiyun }
3813*4882a593Smuzhiyun 
shmem_destroy_inode(struct inode * inode)3814*4882a593Smuzhiyun static void shmem_destroy_inode(struct inode *inode)
3815*4882a593Smuzhiyun {
3816*4882a593Smuzhiyun 	if (S_ISREG(inode->i_mode))
3817*4882a593Smuzhiyun 		mpol_free_shared_policy(&SHMEM_I(inode)->policy);
3818*4882a593Smuzhiyun }
3819*4882a593Smuzhiyun 
shmem_init_inode(void * foo)3820*4882a593Smuzhiyun static void shmem_init_inode(void *foo)
3821*4882a593Smuzhiyun {
3822*4882a593Smuzhiyun 	struct shmem_inode_info *info = foo;
3823*4882a593Smuzhiyun 	inode_init_once(&info->vfs_inode);
3824*4882a593Smuzhiyun }
3825*4882a593Smuzhiyun 
shmem_init_inodecache(void)3826*4882a593Smuzhiyun static void shmem_init_inodecache(void)
3827*4882a593Smuzhiyun {
3828*4882a593Smuzhiyun 	shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
3829*4882a593Smuzhiyun 				sizeof(struct shmem_inode_info),
3830*4882a593Smuzhiyun 				0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode);
3831*4882a593Smuzhiyun }
3832*4882a593Smuzhiyun 
shmem_destroy_inodecache(void)3833*4882a593Smuzhiyun static void shmem_destroy_inodecache(void)
3834*4882a593Smuzhiyun {
3835*4882a593Smuzhiyun 	kmem_cache_destroy(shmem_inode_cachep);
3836*4882a593Smuzhiyun }
3837*4882a593Smuzhiyun 
3838*4882a593Smuzhiyun static const struct address_space_operations shmem_aops = {
3839*4882a593Smuzhiyun 	.writepage	= shmem_writepage,
3840*4882a593Smuzhiyun 	.set_page_dirty	= __set_page_dirty_no_writeback,
3841*4882a593Smuzhiyun #ifdef CONFIG_TMPFS
3842*4882a593Smuzhiyun 	.write_begin	= shmem_write_begin,
3843*4882a593Smuzhiyun 	.write_end	= shmem_write_end,
3844*4882a593Smuzhiyun #endif
3845*4882a593Smuzhiyun #ifdef CONFIG_MIGRATION
3846*4882a593Smuzhiyun 	.migratepage	= migrate_page,
3847*4882a593Smuzhiyun #endif
3848*4882a593Smuzhiyun 	.error_remove_page = generic_error_remove_page,
3849*4882a593Smuzhiyun };
3850*4882a593Smuzhiyun 
3851*4882a593Smuzhiyun static const struct file_operations shmem_file_operations = {
3852*4882a593Smuzhiyun 	.mmap		= shmem_mmap,
3853*4882a593Smuzhiyun 	.get_unmapped_area = shmem_get_unmapped_area,
3854*4882a593Smuzhiyun #ifdef CONFIG_TMPFS
3855*4882a593Smuzhiyun 	.llseek		= shmem_file_llseek,
3856*4882a593Smuzhiyun 	.read_iter	= shmem_file_read_iter,
3857*4882a593Smuzhiyun 	.write_iter	= generic_file_write_iter,
3858*4882a593Smuzhiyun 	.fsync		= noop_fsync,
3859*4882a593Smuzhiyun 	.splice_read	= generic_file_splice_read,
3860*4882a593Smuzhiyun 	.splice_write	= iter_file_splice_write,
3861*4882a593Smuzhiyun 	.fallocate	= shmem_fallocate,
3862*4882a593Smuzhiyun #endif
3863*4882a593Smuzhiyun };
3864*4882a593Smuzhiyun 
3865*4882a593Smuzhiyun static const struct inode_operations shmem_inode_operations = {
3866*4882a593Smuzhiyun 	.getattr	= shmem_getattr,
3867*4882a593Smuzhiyun 	.setattr	= shmem_setattr,
3868*4882a593Smuzhiyun #ifdef CONFIG_TMPFS_XATTR
3869*4882a593Smuzhiyun 	.listxattr	= shmem_listxattr,
3870*4882a593Smuzhiyun 	.set_acl	= simple_set_acl,
3871*4882a593Smuzhiyun #endif
3872*4882a593Smuzhiyun };
3873*4882a593Smuzhiyun 
3874*4882a593Smuzhiyun static const struct inode_operations shmem_dir_inode_operations = {
3875*4882a593Smuzhiyun #ifdef CONFIG_TMPFS
3876*4882a593Smuzhiyun 	.create		= shmem_create,
3877*4882a593Smuzhiyun 	.lookup		= simple_lookup,
3878*4882a593Smuzhiyun 	.link		= shmem_link,
3879*4882a593Smuzhiyun 	.unlink		= shmem_unlink,
3880*4882a593Smuzhiyun 	.symlink	= shmem_symlink,
3881*4882a593Smuzhiyun 	.mkdir		= shmem_mkdir,
3882*4882a593Smuzhiyun 	.rmdir		= shmem_rmdir,
3883*4882a593Smuzhiyun 	.mknod		= shmem_mknod,
3884*4882a593Smuzhiyun 	.rename		= shmem_rename2,
3885*4882a593Smuzhiyun 	.tmpfile	= shmem_tmpfile,
3886*4882a593Smuzhiyun #endif
3887*4882a593Smuzhiyun #ifdef CONFIG_TMPFS_XATTR
3888*4882a593Smuzhiyun 	.listxattr	= shmem_listxattr,
3889*4882a593Smuzhiyun #endif
3890*4882a593Smuzhiyun #ifdef CONFIG_TMPFS_POSIX_ACL
3891*4882a593Smuzhiyun 	.setattr	= shmem_setattr,
3892*4882a593Smuzhiyun 	.set_acl	= simple_set_acl,
3893*4882a593Smuzhiyun #endif
3894*4882a593Smuzhiyun };
3895*4882a593Smuzhiyun 
3896*4882a593Smuzhiyun static const struct inode_operations shmem_special_inode_operations = {
3897*4882a593Smuzhiyun #ifdef CONFIG_TMPFS_XATTR
3898*4882a593Smuzhiyun 	.listxattr	= shmem_listxattr,
3899*4882a593Smuzhiyun #endif
3900*4882a593Smuzhiyun #ifdef CONFIG_TMPFS_POSIX_ACL
3901*4882a593Smuzhiyun 	.setattr	= shmem_setattr,
3902*4882a593Smuzhiyun 	.set_acl	= simple_set_acl,
3903*4882a593Smuzhiyun #endif
3904*4882a593Smuzhiyun };
3905*4882a593Smuzhiyun 
3906*4882a593Smuzhiyun static const struct super_operations shmem_ops = {
3907*4882a593Smuzhiyun 	.alloc_inode	= shmem_alloc_inode,
3908*4882a593Smuzhiyun 	.free_inode	= shmem_free_in_core_inode,
3909*4882a593Smuzhiyun 	.destroy_inode	= shmem_destroy_inode,
3910*4882a593Smuzhiyun #ifdef CONFIG_TMPFS
3911*4882a593Smuzhiyun 	.statfs		= shmem_statfs,
3912*4882a593Smuzhiyun 	.show_options	= shmem_show_options,
3913*4882a593Smuzhiyun #endif
3914*4882a593Smuzhiyun 	.evict_inode	= shmem_evict_inode,
3915*4882a593Smuzhiyun 	.drop_inode	= generic_delete_inode,
3916*4882a593Smuzhiyun 	.put_super	= shmem_put_super,
3917*4882a593Smuzhiyun #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3918*4882a593Smuzhiyun 	.nr_cached_objects	= shmem_unused_huge_count,
3919*4882a593Smuzhiyun 	.free_cached_objects	= shmem_unused_huge_scan,
3920*4882a593Smuzhiyun #endif
3921*4882a593Smuzhiyun };
3922*4882a593Smuzhiyun 
3923*4882a593Smuzhiyun static const struct vm_operations_struct shmem_vm_ops = {
3924*4882a593Smuzhiyun 	.fault		= shmem_fault,
3925*4882a593Smuzhiyun 	.map_pages	= filemap_map_pages,
3926*4882a593Smuzhiyun #ifdef CONFIG_NUMA
3927*4882a593Smuzhiyun 	.set_policy     = shmem_set_policy,
3928*4882a593Smuzhiyun 	.get_policy     = shmem_get_policy,
3929*4882a593Smuzhiyun #endif
3930*4882a593Smuzhiyun #ifdef CONFIG_SPECULATIVE_PAGE_FAULT
3931*4882a593Smuzhiyun 	.allow_speculation = filemap_allow_speculation,
3932*4882a593Smuzhiyun #endif
3933*4882a593Smuzhiyun };
3934*4882a593Smuzhiyun 
shmem_init_fs_context(struct fs_context * fc)3935*4882a593Smuzhiyun int shmem_init_fs_context(struct fs_context *fc)
3936*4882a593Smuzhiyun {
3937*4882a593Smuzhiyun 	struct shmem_options *ctx;
3938*4882a593Smuzhiyun 
3939*4882a593Smuzhiyun 	ctx = kzalloc(sizeof(struct shmem_options), GFP_KERNEL);
3940*4882a593Smuzhiyun 	if (!ctx)
3941*4882a593Smuzhiyun 		return -ENOMEM;
3942*4882a593Smuzhiyun 
3943*4882a593Smuzhiyun 	ctx->mode = 0777 | S_ISVTX;
3944*4882a593Smuzhiyun 	ctx->uid = current_fsuid();
3945*4882a593Smuzhiyun 	ctx->gid = current_fsgid();
3946*4882a593Smuzhiyun 
3947*4882a593Smuzhiyun 	fc->fs_private = ctx;
3948*4882a593Smuzhiyun 	fc->ops = &shmem_fs_context_ops;
3949*4882a593Smuzhiyun 	return 0;
3950*4882a593Smuzhiyun }
3951*4882a593Smuzhiyun 
3952*4882a593Smuzhiyun static struct file_system_type shmem_fs_type = {
3953*4882a593Smuzhiyun 	.owner		= THIS_MODULE,
3954*4882a593Smuzhiyun 	.name		= "tmpfs",
3955*4882a593Smuzhiyun 	.init_fs_context = shmem_init_fs_context,
3956*4882a593Smuzhiyun #ifdef CONFIG_TMPFS
3957*4882a593Smuzhiyun 	.parameters	= shmem_fs_parameters,
3958*4882a593Smuzhiyun #endif
3959*4882a593Smuzhiyun 	.kill_sb	= kill_litter_super,
3960*4882a593Smuzhiyun 	.fs_flags	= FS_USERNS_MOUNT | FS_THP_SUPPORT,
3961*4882a593Smuzhiyun };
3962*4882a593Smuzhiyun 
shmem_init(void)3963*4882a593Smuzhiyun int __init shmem_init(void)
3964*4882a593Smuzhiyun {
3965*4882a593Smuzhiyun 	int error;
3966*4882a593Smuzhiyun 
3967*4882a593Smuzhiyun 	shmem_init_inodecache();
3968*4882a593Smuzhiyun 
3969*4882a593Smuzhiyun 	error = register_filesystem(&shmem_fs_type);
3970*4882a593Smuzhiyun 	if (error) {
3971*4882a593Smuzhiyun 		pr_err("Could not register tmpfs\n");
3972*4882a593Smuzhiyun 		goto out2;
3973*4882a593Smuzhiyun 	}
3974*4882a593Smuzhiyun 
3975*4882a593Smuzhiyun 	shm_mnt = kern_mount(&shmem_fs_type);
3976*4882a593Smuzhiyun 	if (IS_ERR(shm_mnt)) {
3977*4882a593Smuzhiyun 		error = PTR_ERR(shm_mnt);
3978*4882a593Smuzhiyun 		pr_err("Could not kern_mount tmpfs\n");
3979*4882a593Smuzhiyun 		goto out1;
3980*4882a593Smuzhiyun 	}
3981*4882a593Smuzhiyun 
3982*4882a593Smuzhiyun #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3983*4882a593Smuzhiyun 	if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY)
3984*4882a593Smuzhiyun 		SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
3985*4882a593Smuzhiyun 	else
3986*4882a593Smuzhiyun 		shmem_huge = 0; /* just in case it was patched */
3987*4882a593Smuzhiyun #endif
3988*4882a593Smuzhiyun 	return 0;
3989*4882a593Smuzhiyun 
3990*4882a593Smuzhiyun out1:
3991*4882a593Smuzhiyun 	unregister_filesystem(&shmem_fs_type);
3992*4882a593Smuzhiyun out2:
3993*4882a593Smuzhiyun 	shmem_destroy_inodecache();
3994*4882a593Smuzhiyun 	shm_mnt = ERR_PTR(error);
3995*4882a593Smuzhiyun 	return error;
3996*4882a593Smuzhiyun }
3997*4882a593Smuzhiyun 
3998*4882a593Smuzhiyun #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS)
shmem_enabled_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)3999*4882a593Smuzhiyun static ssize_t shmem_enabled_show(struct kobject *kobj,
4000*4882a593Smuzhiyun 		struct kobj_attribute *attr, char *buf)
4001*4882a593Smuzhiyun {
4002*4882a593Smuzhiyun 	static const int values[] = {
4003*4882a593Smuzhiyun 		SHMEM_HUGE_ALWAYS,
4004*4882a593Smuzhiyun 		SHMEM_HUGE_WITHIN_SIZE,
4005*4882a593Smuzhiyun 		SHMEM_HUGE_ADVISE,
4006*4882a593Smuzhiyun 		SHMEM_HUGE_NEVER,
4007*4882a593Smuzhiyun 		SHMEM_HUGE_DENY,
4008*4882a593Smuzhiyun 		SHMEM_HUGE_FORCE,
4009*4882a593Smuzhiyun 	};
4010*4882a593Smuzhiyun 	int i, count;
4011*4882a593Smuzhiyun 
4012*4882a593Smuzhiyun 	for (i = 0, count = 0; i < ARRAY_SIZE(values); i++) {
4013*4882a593Smuzhiyun 		const char *fmt = shmem_huge == values[i] ? "[%s] " : "%s ";
4014*4882a593Smuzhiyun 
4015*4882a593Smuzhiyun 		count += sprintf(buf + count, fmt,
4016*4882a593Smuzhiyun 				shmem_format_huge(values[i]));
4017*4882a593Smuzhiyun 	}
4018*4882a593Smuzhiyun 	buf[count - 1] = '\n';
4019*4882a593Smuzhiyun 	return count;
4020*4882a593Smuzhiyun }
4021*4882a593Smuzhiyun 
shmem_enabled_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)4022*4882a593Smuzhiyun static ssize_t shmem_enabled_store(struct kobject *kobj,
4023*4882a593Smuzhiyun 		struct kobj_attribute *attr, const char *buf, size_t count)
4024*4882a593Smuzhiyun {
4025*4882a593Smuzhiyun 	char tmp[16];
4026*4882a593Smuzhiyun 	int huge;
4027*4882a593Smuzhiyun 
4028*4882a593Smuzhiyun 	if (count + 1 > sizeof(tmp))
4029*4882a593Smuzhiyun 		return -EINVAL;
4030*4882a593Smuzhiyun 	memcpy(tmp, buf, count);
4031*4882a593Smuzhiyun 	tmp[count] = '\0';
4032*4882a593Smuzhiyun 	if (count && tmp[count - 1] == '\n')
4033*4882a593Smuzhiyun 		tmp[count - 1] = '\0';
4034*4882a593Smuzhiyun 
4035*4882a593Smuzhiyun 	huge = shmem_parse_huge(tmp);
4036*4882a593Smuzhiyun 	if (huge == -EINVAL)
4037*4882a593Smuzhiyun 		return -EINVAL;
4038*4882a593Smuzhiyun 	if (!has_transparent_hugepage() &&
4039*4882a593Smuzhiyun 			huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY)
4040*4882a593Smuzhiyun 		return -EINVAL;
4041*4882a593Smuzhiyun 
4042*4882a593Smuzhiyun 	shmem_huge = huge;
4043*4882a593Smuzhiyun 	if (shmem_huge > SHMEM_HUGE_DENY)
4044*4882a593Smuzhiyun 		SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
4045*4882a593Smuzhiyun 	return count;
4046*4882a593Smuzhiyun }
4047*4882a593Smuzhiyun 
4048*4882a593Smuzhiyun struct kobj_attribute shmem_enabled_attr =
4049*4882a593Smuzhiyun 	__ATTR(shmem_enabled, 0644, shmem_enabled_show, shmem_enabled_store);
4050*4882a593Smuzhiyun #endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_SYSFS */
4051*4882a593Smuzhiyun 
4052*4882a593Smuzhiyun #ifdef CONFIG_TRANSPARENT_HUGEPAGE
shmem_huge_enabled(struct vm_area_struct * vma)4053*4882a593Smuzhiyun bool shmem_huge_enabled(struct vm_area_struct *vma)
4054*4882a593Smuzhiyun {
4055*4882a593Smuzhiyun 	struct inode *inode = file_inode(vma->vm_file);
4056*4882a593Smuzhiyun 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
4057*4882a593Smuzhiyun 	loff_t i_size;
4058*4882a593Smuzhiyun 	pgoff_t off;
4059*4882a593Smuzhiyun 
4060*4882a593Smuzhiyun 	if (!transhuge_vma_enabled(vma, vma->vm_flags))
4061*4882a593Smuzhiyun 		return false;
4062*4882a593Smuzhiyun 	if (shmem_huge == SHMEM_HUGE_FORCE)
4063*4882a593Smuzhiyun 		return true;
4064*4882a593Smuzhiyun 	if (shmem_huge == SHMEM_HUGE_DENY)
4065*4882a593Smuzhiyun 		return false;
4066*4882a593Smuzhiyun 	switch (sbinfo->huge) {
4067*4882a593Smuzhiyun 		case SHMEM_HUGE_NEVER:
4068*4882a593Smuzhiyun 			return false;
4069*4882a593Smuzhiyun 		case SHMEM_HUGE_ALWAYS:
4070*4882a593Smuzhiyun 			return true;
4071*4882a593Smuzhiyun 		case SHMEM_HUGE_WITHIN_SIZE:
4072*4882a593Smuzhiyun 			off = round_up(vma->vm_pgoff, HPAGE_PMD_NR);
4073*4882a593Smuzhiyun 			i_size = round_up(i_size_read(inode), PAGE_SIZE);
4074*4882a593Smuzhiyun 			if (i_size >= HPAGE_PMD_SIZE &&
4075*4882a593Smuzhiyun 					i_size >> PAGE_SHIFT >= off)
4076*4882a593Smuzhiyun 				return true;
4077*4882a593Smuzhiyun 			fallthrough;
4078*4882a593Smuzhiyun 		case SHMEM_HUGE_ADVISE:
4079*4882a593Smuzhiyun 			/* TODO: implement fadvise() hints */
4080*4882a593Smuzhiyun 			return (vma->vm_flags & VM_HUGEPAGE);
4081*4882a593Smuzhiyun 		default:
4082*4882a593Smuzhiyun 			VM_BUG_ON(1);
4083*4882a593Smuzhiyun 			return false;
4084*4882a593Smuzhiyun 	}
4085*4882a593Smuzhiyun }
4086*4882a593Smuzhiyun #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
4087*4882a593Smuzhiyun 
4088*4882a593Smuzhiyun #else /* !CONFIG_SHMEM */
4089*4882a593Smuzhiyun 
4090*4882a593Smuzhiyun /*
4091*4882a593Smuzhiyun  * tiny-shmem: simple shmemfs and tmpfs using ramfs code
4092*4882a593Smuzhiyun  *
4093*4882a593Smuzhiyun  * This is intended for small system where the benefits of the full
4094*4882a593Smuzhiyun  * shmem code (swap-backed and resource-limited) are outweighed by
4095*4882a593Smuzhiyun  * their complexity. On systems without swap this code should be
4096*4882a593Smuzhiyun  * effectively equivalent, but much lighter weight.
4097*4882a593Smuzhiyun  */
4098*4882a593Smuzhiyun 
4099*4882a593Smuzhiyun static struct file_system_type shmem_fs_type = {
4100*4882a593Smuzhiyun 	.name		= "tmpfs",
4101*4882a593Smuzhiyun 	.init_fs_context = ramfs_init_fs_context,
4102*4882a593Smuzhiyun 	.parameters	= ramfs_fs_parameters,
4103*4882a593Smuzhiyun 	.kill_sb	= kill_litter_super,
4104*4882a593Smuzhiyun 	.fs_flags	= FS_USERNS_MOUNT,
4105*4882a593Smuzhiyun };
4106*4882a593Smuzhiyun 
shmem_init(void)4107*4882a593Smuzhiyun int __init shmem_init(void)
4108*4882a593Smuzhiyun {
4109*4882a593Smuzhiyun 	BUG_ON(register_filesystem(&shmem_fs_type) != 0);
4110*4882a593Smuzhiyun 
4111*4882a593Smuzhiyun 	shm_mnt = kern_mount(&shmem_fs_type);
4112*4882a593Smuzhiyun 	BUG_ON(IS_ERR(shm_mnt));
4113*4882a593Smuzhiyun 
4114*4882a593Smuzhiyun 	return 0;
4115*4882a593Smuzhiyun }
4116*4882a593Smuzhiyun 
shmem_unuse(unsigned int type,bool frontswap,unsigned long * fs_pages_to_unuse)4117*4882a593Smuzhiyun int shmem_unuse(unsigned int type, bool frontswap,
4118*4882a593Smuzhiyun 		unsigned long *fs_pages_to_unuse)
4119*4882a593Smuzhiyun {
4120*4882a593Smuzhiyun 	return 0;
4121*4882a593Smuzhiyun }
4122*4882a593Smuzhiyun 
shmem_lock(struct file * file,int lock,struct user_struct * user)4123*4882a593Smuzhiyun int shmem_lock(struct file *file, int lock, struct user_struct *user)
4124*4882a593Smuzhiyun {
4125*4882a593Smuzhiyun 	return 0;
4126*4882a593Smuzhiyun }
4127*4882a593Smuzhiyun 
shmem_unlock_mapping(struct address_space * mapping)4128*4882a593Smuzhiyun void shmem_unlock_mapping(struct address_space *mapping)
4129*4882a593Smuzhiyun {
4130*4882a593Smuzhiyun }
4131*4882a593Smuzhiyun 
4132*4882a593Smuzhiyun #ifdef CONFIG_MMU
shmem_get_unmapped_area(struct file * file,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)4133*4882a593Smuzhiyun unsigned long shmem_get_unmapped_area(struct file *file,
4134*4882a593Smuzhiyun 				      unsigned long addr, unsigned long len,
4135*4882a593Smuzhiyun 				      unsigned long pgoff, unsigned long flags)
4136*4882a593Smuzhiyun {
4137*4882a593Smuzhiyun 	return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
4138*4882a593Smuzhiyun }
4139*4882a593Smuzhiyun #endif
4140*4882a593Smuzhiyun 
shmem_truncate_range(struct inode * inode,loff_t lstart,loff_t lend)4141*4882a593Smuzhiyun void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
4142*4882a593Smuzhiyun {
4143*4882a593Smuzhiyun 	truncate_inode_pages_range(inode->i_mapping, lstart, lend);
4144*4882a593Smuzhiyun }
4145*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(shmem_truncate_range);
4146*4882a593Smuzhiyun 
4147*4882a593Smuzhiyun #define shmem_vm_ops				generic_file_vm_ops
4148*4882a593Smuzhiyun #define shmem_file_operations			ramfs_file_operations
4149*4882a593Smuzhiyun #define shmem_get_inode(sb, dir, mode, dev, flags)	ramfs_get_inode(sb, dir, mode, dev)
4150*4882a593Smuzhiyun #define shmem_acct_size(flags, size)		0
4151*4882a593Smuzhiyun #define shmem_unacct_size(flags, size)		do {} while (0)
4152*4882a593Smuzhiyun 
4153*4882a593Smuzhiyun #endif /* CONFIG_SHMEM */
4154*4882a593Smuzhiyun 
4155*4882a593Smuzhiyun /* common code */
4156*4882a593Smuzhiyun 
__shmem_file_setup(struct vfsmount * mnt,const char * name,loff_t size,unsigned long flags,unsigned int i_flags)4157*4882a593Smuzhiyun static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name, loff_t size,
4158*4882a593Smuzhiyun 				       unsigned long flags, unsigned int i_flags)
4159*4882a593Smuzhiyun {
4160*4882a593Smuzhiyun 	struct inode *inode;
4161*4882a593Smuzhiyun 	struct file *res;
4162*4882a593Smuzhiyun 
4163*4882a593Smuzhiyun 	if (IS_ERR(mnt))
4164*4882a593Smuzhiyun 		return ERR_CAST(mnt);
4165*4882a593Smuzhiyun 
4166*4882a593Smuzhiyun 	if (size < 0 || size > MAX_LFS_FILESIZE)
4167*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
4168*4882a593Smuzhiyun 
4169*4882a593Smuzhiyun 	if (shmem_acct_size(flags, size))
4170*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
4171*4882a593Smuzhiyun 
4172*4882a593Smuzhiyun 	inode = shmem_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0,
4173*4882a593Smuzhiyun 				flags);
4174*4882a593Smuzhiyun 	if (unlikely(!inode)) {
4175*4882a593Smuzhiyun 		shmem_unacct_size(flags, size);
4176*4882a593Smuzhiyun 		return ERR_PTR(-ENOSPC);
4177*4882a593Smuzhiyun 	}
4178*4882a593Smuzhiyun 	inode->i_flags |= i_flags;
4179*4882a593Smuzhiyun 	inode->i_size = size;
4180*4882a593Smuzhiyun 	clear_nlink(inode);	/* It is unlinked */
4181*4882a593Smuzhiyun 	res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size));
4182*4882a593Smuzhiyun 	if (!IS_ERR(res))
4183*4882a593Smuzhiyun 		res = alloc_file_pseudo(inode, mnt, name, O_RDWR,
4184*4882a593Smuzhiyun 				&shmem_file_operations);
4185*4882a593Smuzhiyun 	if (IS_ERR(res))
4186*4882a593Smuzhiyun 		iput(inode);
4187*4882a593Smuzhiyun 	return res;
4188*4882a593Smuzhiyun }
4189*4882a593Smuzhiyun 
4190*4882a593Smuzhiyun /**
4191*4882a593Smuzhiyun  * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be
4192*4882a593Smuzhiyun  * 	kernel internal.  There will be NO LSM permission checks against the
4193*4882a593Smuzhiyun  * 	underlying inode.  So users of this interface must do LSM checks at a
4194*4882a593Smuzhiyun  *	higher layer.  The users are the big_key and shm implementations.  LSM
4195*4882a593Smuzhiyun  *	checks are provided at the key or shm level rather than the inode.
4196*4882a593Smuzhiyun  * @name: name for dentry (to be seen in /proc/<pid>/maps
4197*4882a593Smuzhiyun  * @size: size to be set for the file
4198*4882a593Smuzhiyun  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4199*4882a593Smuzhiyun  */
shmem_kernel_file_setup(const char * name,loff_t size,unsigned long flags)4200*4882a593Smuzhiyun struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags)
4201*4882a593Smuzhiyun {
4202*4882a593Smuzhiyun 	return __shmem_file_setup(shm_mnt, name, size, flags, S_PRIVATE);
4203*4882a593Smuzhiyun }
4204*4882a593Smuzhiyun 
4205*4882a593Smuzhiyun /**
4206*4882a593Smuzhiyun  * shmem_file_setup - get an unlinked file living in tmpfs
4207*4882a593Smuzhiyun  * @name: name for dentry (to be seen in /proc/<pid>/maps
4208*4882a593Smuzhiyun  * @size: size to be set for the file
4209*4882a593Smuzhiyun  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4210*4882a593Smuzhiyun  */
shmem_file_setup(const char * name,loff_t size,unsigned long flags)4211*4882a593Smuzhiyun struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
4212*4882a593Smuzhiyun {
4213*4882a593Smuzhiyun 	return __shmem_file_setup(shm_mnt, name, size, flags, 0);
4214*4882a593Smuzhiyun }
4215*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(shmem_file_setup);
4216*4882a593Smuzhiyun 
4217*4882a593Smuzhiyun /**
4218*4882a593Smuzhiyun  * shmem_file_setup_with_mnt - get an unlinked file living in tmpfs
4219*4882a593Smuzhiyun  * @mnt: the tmpfs mount where the file will be created
4220*4882a593Smuzhiyun  * @name: name for dentry (to be seen in /proc/<pid>/maps
4221*4882a593Smuzhiyun  * @size: size to be set for the file
4222*4882a593Smuzhiyun  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4223*4882a593Smuzhiyun  */
shmem_file_setup_with_mnt(struct vfsmount * mnt,const char * name,loff_t size,unsigned long flags)4224*4882a593Smuzhiyun struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt, const char *name,
4225*4882a593Smuzhiyun 				       loff_t size, unsigned long flags)
4226*4882a593Smuzhiyun {
4227*4882a593Smuzhiyun 	return __shmem_file_setup(mnt, name, size, flags, 0);
4228*4882a593Smuzhiyun }
4229*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(shmem_file_setup_with_mnt);
4230*4882a593Smuzhiyun 
4231*4882a593Smuzhiyun /**
4232*4882a593Smuzhiyun  * shmem_zero_setup - setup a shared anonymous mapping
4233*4882a593Smuzhiyun  * @vma: the vma to be mmapped is prepared by do_mmap
4234*4882a593Smuzhiyun  */
shmem_zero_setup(struct vm_area_struct * vma)4235*4882a593Smuzhiyun int shmem_zero_setup(struct vm_area_struct *vma)
4236*4882a593Smuzhiyun {
4237*4882a593Smuzhiyun 	struct file *file;
4238*4882a593Smuzhiyun 	loff_t size = vma->vm_end - vma->vm_start;
4239*4882a593Smuzhiyun 
4240*4882a593Smuzhiyun 	/*
4241*4882a593Smuzhiyun 	 * Cloning a new file under mmap_lock leads to a lock ordering conflict
4242*4882a593Smuzhiyun 	 * between XFS directory reading and selinux: since this file is only
4243*4882a593Smuzhiyun 	 * accessible to the user through its mapping, use S_PRIVATE flag to
4244*4882a593Smuzhiyun 	 * bypass file security, in the same way as shmem_kernel_file_setup().
4245*4882a593Smuzhiyun 	 */
4246*4882a593Smuzhiyun 	file = shmem_kernel_file_setup("dev/zero", size, vma->vm_flags);
4247*4882a593Smuzhiyun 	if (IS_ERR(file))
4248*4882a593Smuzhiyun 		return PTR_ERR(file);
4249*4882a593Smuzhiyun 
4250*4882a593Smuzhiyun 	if (vma->vm_file)
4251*4882a593Smuzhiyun 		fput(vma->vm_file);
4252*4882a593Smuzhiyun 	vma->vm_file = file;
4253*4882a593Smuzhiyun 	vma->vm_ops = &shmem_vm_ops;
4254*4882a593Smuzhiyun 
4255*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
4256*4882a593Smuzhiyun 			((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
4257*4882a593Smuzhiyun 			(vma->vm_end & HPAGE_PMD_MASK)) {
4258*4882a593Smuzhiyun 		khugepaged_enter(vma, vma->vm_flags);
4259*4882a593Smuzhiyun 	}
4260*4882a593Smuzhiyun 
4261*4882a593Smuzhiyun 	return 0;
4262*4882a593Smuzhiyun }
4263*4882a593Smuzhiyun 
4264*4882a593Smuzhiyun /**
4265*4882a593Smuzhiyun  * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.
4266*4882a593Smuzhiyun  * @mapping:	the page's address_space
4267*4882a593Smuzhiyun  * @index:	the page index
4268*4882a593Smuzhiyun  * @gfp:	the page allocator flags to use if allocating
4269*4882a593Smuzhiyun  *
4270*4882a593Smuzhiyun  * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
4271*4882a593Smuzhiyun  * with any new page allocations done using the specified allocation flags.
4272*4882a593Smuzhiyun  * But read_cache_page_gfp() uses the ->readpage() method: which does not
4273*4882a593Smuzhiyun  * suit tmpfs, since it may have pages in swapcache, and needs to find those
4274*4882a593Smuzhiyun  * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
4275*4882a593Smuzhiyun  *
4276*4882a593Smuzhiyun  * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in
4277*4882a593Smuzhiyun  * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily.
4278*4882a593Smuzhiyun  */
shmem_read_mapping_page_gfp(struct address_space * mapping,pgoff_t index,gfp_t gfp)4279*4882a593Smuzhiyun struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
4280*4882a593Smuzhiyun 					 pgoff_t index, gfp_t gfp)
4281*4882a593Smuzhiyun {
4282*4882a593Smuzhiyun #ifdef CONFIG_SHMEM
4283*4882a593Smuzhiyun 	struct inode *inode = mapping->host;
4284*4882a593Smuzhiyun 	struct page *page;
4285*4882a593Smuzhiyun 	int error;
4286*4882a593Smuzhiyun 
4287*4882a593Smuzhiyun 	BUG_ON(mapping->a_ops != &shmem_aops);
4288*4882a593Smuzhiyun 	error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE,
4289*4882a593Smuzhiyun 				  gfp, NULL, NULL, NULL);
4290*4882a593Smuzhiyun 	if (error)
4291*4882a593Smuzhiyun 		page = ERR_PTR(error);
4292*4882a593Smuzhiyun 	else
4293*4882a593Smuzhiyun 		unlock_page(page);
4294*4882a593Smuzhiyun 	return page;
4295*4882a593Smuzhiyun #else
4296*4882a593Smuzhiyun 	/*
4297*4882a593Smuzhiyun 	 * The tiny !SHMEM case uses ramfs without swap
4298*4882a593Smuzhiyun 	 */
4299*4882a593Smuzhiyun 	return read_cache_page_gfp(mapping, index, gfp);
4300*4882a593Smuzhiyun #endif
4301*4882a593Smuzhiyun }
4302*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);
4303*4882a593Smuzhiyun 
shmem_mark_page_lazyfree(struct page * page,bool tail)4304*4882a593Smuzhiyun void shmem_mark_page_lazyfree(struct page *page, bool tail)
4305*4882a593Smuzhiyun {
4306*4882a593Smuzhiyun 	mark_page_lazyfree_movetail(page, tail);
4307*4882a593Smuzhiyun }
4308*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(shmem_mark_page_lazyfree);
4309*4882a593Smuzhiyun 
reclaim_shmem_address_space(struct address_space * mapping)4310*4882a593Smuzhiyun int reclaim_shmem_address_space(struct address_space *mapping)
4311*4882a593Smuzhiyun {
4312*4882a593Smuzhiyun #ifdef CONFIG_SHMEM
4313*4882a593Smuzhiyun 	pgoff_t start = 0;
4314*4882a593Smuzhiyun 	struct page *page;
4315*4882a593Smuzhiyun 	LIST_HEAD(page_list);
4316*4882a593Smuzhiyun 	XA_STATE(xas, &mapping->i_pages, start);
4317*4882a593Smuzhiyun 
4318*4882a593Smuzhiyun 	if (!shmem_mapping(mapping))
4319*4882a593Smuzhiyun 		return -EINVAL;
4320*4882a593Smuzhiyun 
4321*4882a593Smuzhiyun 	lru_add_drain();
4322*4882a593Smuzhiyun 
4323*4882a593Smuzhiyun 	rcu_read_lock();
4324*4882a593Smuzhiyun 	xas_for_each(&xas, page, ULONG_MAX) {
4325*4882a593Smuzhiyun 		if (xas_retry(&xas, page))
4326*4882a593Smuzhiyun 			continue;
4327*4882a593Smuzhiyun 		if (xa_is_value(page))
4328*4882a593Smuzhiyun 			continue;
4329*4882a593Smuzhiyun 		if (isolate_lru_page(page))
4330*4882a593Smuzhiyun 			continue;
4331*4882a593Smuzhiyun 
4332*4882a593Smuzhiyun 		list_add(&page->lru, &page_list);
4333*4882a593Smuzhiyun 
4334*4882a593Smuzhiyun 		if (need_resched()) {
4335*4882a593Smuzhiyun 			xas_pause(&xas);
4336*4882a593Smuzhiyun 			cond_resched_rcu();
4337*4882a593Smuzhiyun 		}
4338*4882a593Smuzhiyun 	}
4339*4882a593Smuzhiyun 	rcu_read_unlock();
4340*4882a593Smuzhiyun 
4341*4882a593Smuzhiyun 	return reclaim_pages(&page_list);
4342*4882a593Smuzhiyun #else
4343*4882a593Smuzhiyun 	return 0;
4344*4882a593Smuzhiyun #endif
4345*4882a593Smuzhiyun }
4346*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(reclaim_shmem_address_space);
4347