1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun FUSE: Filesystem in Userspace
3*4882a593Smuzhiyun Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun This program can be distributed under the terms of the GNU GPL.
6*4882a593Smuzhiyun See the file COPYING.
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include "fuse_i.h"
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include <linux/pagemap.h>
12*4882a593Smuzhiyun #include <linux/file.h>
13*4882a593Smuzhiyun #include <linux/fs_context.h>
14*4882a593Smuzhiyun #include <linux/sched.h>
15*4882a593Smuzhiyun #include <linux/namei.h>
16*4882a593Smuzhiyun #include <linux/slab.h>
17*4882a593Smuzhiyun #include <linux/xattr.h>
18*4882a593Smuzhiyun #include <linux/iversion.h>
19*4882a593Smuzhiyun #include <linux/posix_acl.h>
20*4882a593Smuzhiyun
fuse_advise_use_readdirplus(struct inode * dir)21*4882a593Smuzhiyun static void fuse_advise_use_readdirplus(struct inode *dir)
22*4882a593Smuzhiyun {
23*4882a593Smuzhiyun struct fuse_inode *fi = get_fuse_inode(dir);
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun set_bit(FUSE_I_ADVISE_RDPLUS, &fi->state);
26*4882a593Smuzhiyun }
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun #if BITS_PER_LONG >= 64
__fuse_dentry_settime(struct dentry * entry,u64 time)29*4882a593Smuzhiyun static inline void __fuse_dentry_settime(struct dentry *entry, u64 time)
30*4882a593Smuzhiyun {
31*4882a593Smuzhiyun entry->d_fsdata = (void *) time;
32*4882a593Smuzhiyun }
33*4882a593Smuzhiyun
fuse_dentry_time(const struct dentry * entry)34*4882a593Smuzhiyun static inline u64 fuse_dentry_time(const struct dentry *entry)
35*4882a593Smuzhiyun {
36*4882a593Smuzhiyun return (u64)entry->d_fsdata;
37*4882a593Smuzhiyun }
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun #else
40*4882a593Smuzhiyun union fuse_dentry {
41*4882a593Smuzhiyun u64 time;
42*4882a593Smuzhiyun struct rcu_head rcu;
43*4882a593Smuzhiyun };
44*4882a593Smuzhiyun
__fuse_dentry_settime(struct dentry * dentry,u64 time)45*4882a593Smuzhiyun static inline void __fuse_dentry_settime(struct dentry *dentry, u64 time)
46*4882a593Smuzhiyun {
47*4882a593Smuzhiyun ((union fuse_dentry *) dentry->d_fsdata)->time = time;
48*4882a593Smuzhiyun }
49*4882a593Smuzhiyun
fuse_dentry_time(const struct dentry * entry)50*4882a593Smuzhiyun static inline u64 fuse_dentry_time(const struct dentry *entry)
51*4882a593Smuzhiyun {
52*4882a593Smuzhiyun return ((union fuse_dentry *) entry->d_fsdata)->time;
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun #endif
55*4882a593Smuzhiyun
fuse_dentry_settime(struct dentry * dentry,u64 time)56*4882a593Smuzhiyun static void fuse_dentry_settime(struct dentry *dentry, u64 time)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun struct fuse_conn *fc = get_fuse_conn_super(dentry->d_sb);
59*4882a593Smuzhiyun bool delete = !time && fc->delete_stale;
60*4882a593Smuzhiyun /*
61*4882a593Smuzhiyun * Mess with DCACHE_OP_DELETE because dput() will be faster without it.
62*4882a593Smuzhiyun * Don't care about races, either way it's just an optimization
63*4882a593Smuzhiyun */
64*4882a593Smuzhiyun if ((!delete && (dentry->d_flags & DCACHE_OP_DELETE)) ||
65*4882a593Smuzhiyun (delete && !(dentry->d_flags & DCACHE_OP_DELETE))) {
66*4882a593Smuzhiyun spin_lock(&dentry->d_lock);
67*4882a593Smuzhiyun if (!delete)
68*4882a593Smuzhiyun dentry->d_flags &= ~DCACHE_OP_DELETE;
69*4882a593Smuzhiyun else
70*4882a593Smuzhiyun dentry->d_flags |= DCACHE_OP_DELETE;
71*4882a593Smuzhiyun spin_unlock(&dentry->d_lock);
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun __fuse_dentry_settime(dentry, time);
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun /*
78*4882a593Smuzhiyun * FUSE caches dentries and attributes with separate timeout. The
79*4882a593Smuzhiyun * time in jiffies until the dentry/attributes are valid is stored in
80*4882a593Smuzhiyun * dentry->d_fsdata and fuse_inode->i_time respectively.
81*4882a593Smuzhiyun */
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun /*
84*4882a593Smuzhiyun * Calculate the time in jiffies until a dentry/attributes are valid
85*4882a593Smuzhiyun */
time_to_jiffies(u64 sec,u32 nsec)86*4882a593Smuzhiyun static u64 time_to_jiffies(u64 sec, u32 nsec)
87*4882a593Smuzhiyun {
88*4882a593Smuzhiyun if (sec || nsec) {
89*4882a593Smuzhiyun struct timespec64 ts = {
90*4882a593Smuzhiyun sec,
91*4882a593Smuzhiyun min_t(u32, nsec, NSEC_PER_SEC - 1)
92*4882a593Smuzhiyun };
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun return get_jiffies_64() + timespec64_to_jiffies(&ts);
95*4882a593Smuzhiyun } else
96*4882a593Smuzhiyun return 0;
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun /*
100*4882a593Smuzhiyun * Set dentry and possibly attribute timeouts from the lookup/mk*
101*4882a593Smuzhiyun * replies
102*4882a593Smuzhiyun */
fuse_change_entry_timeout(struct dentry * entry,struct fuse_entry_out * o)103*4882a593Smuzhiyun void fuse_change_entry_timeout(struct dentry *entry, struct fuse_entry_out *o)
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun fuse_dentry_settime(entry,
106*4882a593Smuzhiyun time_to_jiffies(o->entry_valid, o->entry_valid_nsec));
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun
attr_timeout(struct fuse_attr_out * o)109*4882a593Smuzhiyun static u64 attr_timeout(struct fuse_attr_out *o)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun return time_to_jiffies(o->attr_valid, o->attr_valid_nsec);
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun
entry_attr_timeout(struct fuse_entry_out * o)114*4882a593Smuzhiyun u64 entry_attr_timeout(struct fuse_entry_out *o)
115*4882a593Smuzhiyun {
116*4882a593Smuzhiyun return time_to_jiffies(o->attr_valid, o->attr_valid_nsec);
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun
fuse_invalidate_attr_mask(struct inode * inode,u32 mask)119*4882a593Smuzhiyun static void fuse_invalidate_attr_mask(struct inode *inode, u32 mask)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun set_mask_bits(&get_fuse_inode(inode)->inval_mask, 0, mask);
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun /*
125*4882a593Smuzhiyun * Mark the attributes as stale, so that at the next call to
126*4882a593Smuzhiyun * ->getattr() they will be fetched from userspace
127*4882a593Smuzhiyun */
fuse_invalidate_attr(struct inode * inode)128*4882a593Smuzhiyun void fuse_invalidate_attr(struct inode *inode)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun fuse_invalidate_attr_mask(inode, STATX_BASIC_STATS);
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun
fuse_dir_changed(struct inode * dir)133*4882a593Smuzhiyun static void fuse_dir_changed(struct inode *dir)
134*4882a593Smuzhiyun {
135*4882a593Smuzhiyun fuse_invalidate_attr(dir);
136*4882a593Smuzhiyun inode_maybe_inc_iversion(dir, false);
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun /**
140*4882a593Smuzhiyun * Mark the attributes as stale due to an atime change. Avoid the invalidate if
141*4882a593Smuzhiyun * atime is not used.
142*4882a593Smuzhiyun */
fuse_invalidate_atime(struct inode * inode)143*4882a593Smuzhiyun void fuse_invalidate_atime(struct inode *inode)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun if (!IS_RDONLY(inode))
146*4882a593Smuzhiyun fuse_invalidate_attr_mask(inode, STATX_ATIME);
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun /*
150*4882a593Smuzhiyun * Just mark the entry as stale, so that a next attempt to look it up
151*4882a593Smuzhiyun * will result in a new lookup call to userspace
152*4882a593Smuzhiyun *
153*4882a593Smuzhiyun * This is called when a dentry is about to become negative and the
154*4882a593Smuzhiyun * timeout is unknown (unlink, rmdir, rename and in some cases
155*4882a593Smuzhiyun * lookup)
156*4882a593Smuzhiyun */
fuse_invalidate_entry_cache(struct dentry * entry)157*4882a593Smuzhiyun void fuse_invalidate_entry_cache(struct dentry *entry)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun fuse_dentry_settime(entry, 0);
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun /*
163*4882a593Smuzhiyun * Same as fuse_invalidate_entry_cache(), but also try to remove the
164*4882a593Smuzhiyun * dentry from the hash
165*4882a593Smuzhiyun */
fuse_invalidate_entry(struct dentry * entry)166*4882a593Smuzhiyun static void fuse_invalidate_entry(struct dentry *entry)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun d_invalidate(entry);
169*4882a593Smuzhiyun fuse_invalidate_entry_cache(entry);
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun
fuse_lookup_init(struct fuse_conn * fc,struct fuse_args * args,u64 nodeid,const struct qstr * name,struct fuse_entry_out * outarg)172*4882a593Smuzhiyun static void fuse_lookup_init(struct fuse_conn *fc, struct fuse_args *args,
173*4882a593Smuzhiyun u64 nodeid, const struct qstr *name,
174*4882a593Smuzhiyun struct fuse_entry_out *outarg)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun memset(outarg, 0, sizeof(struct fuse_entry_out));
177*4882a593Smuzhiyun args->opcode = FUSE_LOOKUP;
178*4882a593Smuzhiyun args->nodeid = nodeid;
179*4882a593Smuzhiyun args->in_numargs = 1;
180*4882a593Smuzhiyun args->in_args[0].size = name->len + 1;
181*4882a593Smuzhiyun args->in_args[0].value = name->name;
182*4882a593Smuzhiyun args->out_numargs = 1;
183*4882a593Smuzhiyun args->out_args[0].size = sizeof(struct fuse_entry_out);
184*4882a593Smuzhiyun args->out_args[0].value = outarg;
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun /*
188*4882a593Smuzhiyun * Check whether the dentry is still valid
189*4882a593Smuzhiyun *
190*4882a593Smuzhiyun * If the entry validity timeout has expired and the dentry is
191*4882a593Smuzhiyun * positive, try to redo the lookup. If the lookup results in a
192*4882a593Smuzhiyun * different inode, then let the VFS invalidate the dentry and redo
193*4882a593Smuzhiyun * the lookup once more. If the lookup results in the same inode,
194*4882a593Smuzhiyun * then refresh the attributes, timeouts and mark the dentry valid.
195*4882a593Smuzhiyun */
fuse_dentry_revalidate(struct dentry * entry,unsigned int flags)196*4882a593Smuzhiyun static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
197*4882a593Smuzhiyun {
198*4882a593Smuzhiyun struct inode *inode;
199*4882a593Smuzhiyun struct dentry *parent;
200*4882a593Smuzhiyun struct fuse_mount *fm;
201*4882a593Smuzhiyun struct fuse_inode *fi;
202*4882a593Smuzhiyun int ret;
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun inode = d_inode_rcu(entry);
205*4882a593Smuzhiyun if (inode && fuse_is_bad(inode))
206*4882a593Smuzhiyun goto invalid;
207*4882a593Smuzhiyun else if (time_before64(fuse_dentry_time(entry), get_jiffies_64()) ||
208*4882a593Smuzhiyun (flags & (LOOKUP_EXCL | LOOKUP_REVAL))) {
209*4882a593Smuzhiyun struct fuse_entry_out outarg;
210*4882a593Smuzhiyun FUSE_ARGS(args);
211*4882a593Smuzhiyun struct fuse_forget_link *forget;
212*4882a593Smuzhiyun u64 attr_version;
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun /* For negative dentries, always do a fresh lookup */
215*4882a593Smuzhiyun if (!inode)
216*4882a593Smuzhiyun goto invalid;
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun ret = -ECHILD;
219*4882a593Smuzhiyun if (flags & LOOKUP_RCU)
220*4882a593Smuzhiyun goto out;
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun fm = get_fuse_mount(inode);
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun forget = fuse_alloc_forget();
225*4882a593Smuzhiyun ret = -ENOMEM;
226*4882a593Smuzhiyun if (!forget)
227*4882a593Smuzhiyun goto out;
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun attr_version = fuse_get_attr_version(fm->fc);
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun parent = dget_parent(entry);
232*4882a593Smuzhiyun fuse_lookup_init(fm->fc, &args, get_node_id(d_inode(parent)),
233*4882a593Smuzhiyun &entry->d_name, &outarg);
234*4882a593Smuzhiyun ret = fuse_simple_request(fm, &args);
235*4882a593Smuzhiyun dput(parent);
236*4882a593Smuzhiyun /* Zero nodeid is same as -ENOENT */
237*4882a593Smuzhiyun if (!ret && !outarg.nodeid)
238*4882a593Smuzhiyun ret = -ENOENT;
239*4882a593Smuzhiyun if (!ret) {
240*4882a593Smuzhiyun fi = get_fuse_inode(inode);
241*4882a593Smuzhiyun if (outarg.nodeid != get_node_id(inode) ||
242*4882a593Smuzhiyun (bool) IS_AUTOMOUNT(inode) != (bool) (outarg.attr.flags & FUSE_ATTR_SUBMOUNT)) {
243*4882a593Smuzhiyun fuse_queue_forget(fm->fc, forget,
244*4882a593Smuzhiyun outarg.nodeid, 1);
245*4882a593Smuzhiyun goto invalid;
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun spin_lock(&fi->lock);
248*4882a593Smuzhiyun fi->nlookup++;
249*4882a593Smuzhiyun spin_unlock(&fi->lock);
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun kfree(forget);
252*4882a593Smuzhiyun if (ret == -ENOMEM)
253*4882a593Smuzhiyun goto out;
254*4882a593Smuzhiyun if (ret || fuse_invalid_attr(&outarg.attr) ||
255*4882a593Smuzhiyun fuse_stale_inode(inode, outarg.generation, &outarg.attr))
256*4882a593Smuzhiyun goto invalid;
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun forget_all_cached_acls(inode);
259*4882a593Smuzhiyun fuse_change_attributes(inode, &outarg.attr,
260*4882a593Smuzhiyun entry_attr_timeout(&outarg),
261*4882a593Smuzhiyun attr_version);
262*4882a593Smuzhiyun fuse_change_entry_timeout(entry, &outarg);
263*4882a593Smuzhiyun } else if (inode) {
264*4882a593Smuzhiyun fi = get_fuse_inode(inode);
265*4882a593Smuzhiyun if (flags & LOOKUP_RCU) {
266*4882a593Smuzhiyun if (test_bit(FUSE_I_INIT_RDPLUS, &fi->state))
267*4882a593Smuzhiyun return -ECHILD;
268*4882a593Smuzhiyun } else if (test_and_clear_bit(FUSE_I_INIT_RDPLUS, &fi->state)) {
269*4882a593Smuzhiyun parent = dget_parent(entry);
270*4882a593Smuzhiyun fuse_advise_use_readdirplus(d_inode(parent));
271*4882a593Smuzhiyun dput(parent);
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun ret = 1;
275*4882a593Smuzhiyun out:
276*4882a593Smuzhiyun return ret;
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun invalid:
279*4882a593Smuzhiyun ret = 0;
280*4882a593Smuzhiyun goto out;
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun #if BITS_PER_LONG < 64
fuse_dentry_init(struct dentry * dentry)284*4882a593Smuzhiyun static int fuse_dentry_init(struct dentry *dentry)
285*4882a593Smuzhiyun {
286*4882a593Smuzhiyun dentry->d_fsdata = kzalloc(sizeof(union fuse_dentry),
287*4882a593Smuzhiyun GFP_KERNEL_ACCOUNT | __GFP_RECLAIMABLE);
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun return dentry->d_fsdata ? 0 : -ENOMEM;
290*4882a593Smuzhiyun }
fuse_dentry_release(struct dentry * dentry)291*4882a593Smuzhiyun static void fuse_dentry_release(struct dentry *dentry)
292*4882a593Smuzhiyun {
293*4882a593Smuzhiyun union fuse_dentry *fd = dentry->d_fsdata;
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun kfree_rcu(fd, rcu);
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun #endif
298*4882a593Smuzhiyun
fuse_dentry_delete(const struct dentry * dentry)299*4882a593Smuzhiyun static int fuse_dentry_delete(const struct dentry *dentry)
300*4882a593Smuzhiyun {
301*4882a593Smuzhiyun return time_before64(fuse_dentry_time(dentry), get_jiffies_64());
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun /*
305*4882a593Smuzhiyun * Create a fuse_mount object with a new superblock (with path->dentry
306*4882a593Smuzhiyun * as the root), and return that mount so it can be auto-mounted on
307*4882a593Smuzhiyun * @path.
308*4882a593Smuzhiyun */
fuse_dentry_automount(struct path * path)309*4882a593Smuzhiyun static struct vfsmount *fuse_dentry_automount(struct path *path)
310*4882a593Smuzhiyun {
311*4882a593Smuzhiyun struct fs_context *fsc;
312*4882a593Smuzhiyun struct fuse_mount *parent_fm = get_fuse_mount_super(path->mnt->mnt_sb);
313*4882a593Smuzhiyun struct fuse_conn *fc = parent_fm->fc;
314*4882a593Smuzhiyun struct fuse_mount *fm;
315*4882a593Smuzhiyun struct vfsmount *mnt;
316*4882a593Smuzhiyun struct fuse_inode *mp_fi = get_fuse_inode(d_inode(path->dentry));
317*4882a593Smuzhiyun struct super_block *sb;
318*4882a593Smuzhiyun int err;
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun fsc = fs_context_for_submount(path->mnt->mnt_sb->s_type, path->dentry);
321*4882a593Smuzhiyun if (IS_ERR(fsc)) {
322*4882a593Smuzhiyun err = PTR_ERR(fsc);
323*4882a593Smuzhiyun goto out;
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun err = -ENOMEM;
327*4882a593Smuzhiyun fm = kzalloc(sizeof(struct fuse_mount), GFP_KERNEL);
328*4882a593Smuzhiyun if (!fm)
329*4882a593Smuzhiyun goto out_put_fsc;
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun refcount_set(&fm->count, 1);
332*4882a593Smuzhiyun fsc->s_fs_info = fm;
333*4882a593Smuzhiyun sb = sget_fc(fsc, NULL, set_anon_super_fc);
334*4882a593Smuzhiyun if (IS_ERR(sb)) {
335*4882a593Smuzhiyun err = PTR_ERR(sb);
336*4882a593Smuzhiyun fuse_mount_put(fm);
337*4882a593Smuzhiyun goto out_put_fsc;
338*4882a593Smuzhiyun }
339*4882a593Smuzhiyun fm->fc = fuse_conn_get(fc);
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun /* Initialize superblock, making @mp_fi its root */
342*4882a593Smuzhiyun err = fuse_fill_super_submount(sb, mp_fi);
343*4882a593Smuzhiyun if (err) {
344*4882a593Smuzhiyun fuse_conn_put(fc);
345*4882a593Smuzhiyun kfree(fm);
346*4882a593Smuzhiyun sb->s_fs_info = NULL;
347*4882a593Smuzhiyun goto out_put_sb;
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun down_write(&fc->killsb);
351*4882a593Smuzhiyun list_add_tail(&fm->fc_entry, &fc->mounts);
352*4882a593Smuzhiyun up_write(&fc->killsb);
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun sb->s_flags |= SB_ACTIVE;
355*4882a593Smuzhiyun fsc->root = dget(sb->s_root);
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun /*
358*4882a593Smuzhiyun * FIXME: setting SB_BORN requires a write barrier for
359*4882a593Smuzhiyun * super_cache_count(). We should actually come
360*4882a593Smuzhiyun * up with a proper ->get_tree() implementation
361*4882a593Smuzhiyun * for submounts and call vfs_get_tree() to take
362*4882a593Smuzhiyun * care of the write barrier.
363*4882a593Smuzhiyun */
364*4882a593Smuzhiyun smp_wmb();
365*4882a593Smuzhiyun sb->s_flags |= SB_BORN;
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun /* We are done configuring the superblock, so unlock it */
368*4882a593Smuzhiyun up_write(&sb->s_umount);
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun /* Create the submount */
371*4882a593Smuzhiyun mnt = vfs_create_mount(fsc);
372*4882a593Smuzhiyun if (IS_ERR(mnt)) {
373*4882a593Smuzhiyun err = PTR_ERR(mnt);
374*4882a593Smuzhiyun goto out_put_fsc;
375*4882a593Smuzhiyun }
376*4882a593Smuzhiyun mntget(mnt);
377*4882a593Smuzhiyun put_fs_context(fsc);
378*4882a593Smuzhiyun return mnt;
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun out_put_sb:
381*4882a593Smuzhiyun /*
382*4882a593Smuzhiyun * Only jump here when fsc->root is NULL and sb is still locked
383*4882a593Smuzhiyun * (otherwise put_fs_context() will put the superblock)
384*4882a593Smuzhiyun */
385*4882a593Smuzhiyun deactivate_locked_super(sb);
386*4882a593Smuzhiyun out_put_fsc:
387*4882a593Smuzhiyun put_fs_context(fsc);
388*4882a593Smuzhiyun out:
389*4882a593Smuzhiyun return ERR_PTR(err);
390*4882a593Smuzhiyun }
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun /*
393*4882a593Smuzhiyun * Get the canonical path. Since we must translate to a path, this must be done
394*4882a593Smuzhiyun * in the context of the userspace daemon, however, the userspace daemon cannot
395*4882a593Smuzhiyun * look up paths on its own. Instead, we handle the lookup as a special case
396*4882a593Smuzhiyun * inside of the write request.
397*4882a593Smuzhiyun */
fuse_dentry_canonical_path(const struct path * path,struct path * canonical_path)398*4882a593Smuzhiyun static void fuse_dentry_canonical_path(const struct path *path,
399*4882a593Smuzhiyun struct path *canonical_path)
400*4882a593Smuzhiyun {
401*4882a593Smuzhiyun struct inode *inode = d_inode(path->dentry);
402*4882a593Smuzhiyun //struct fuse_conn *fc = get_fuse_conn(inode);
403*4882a593Smuzhiyun struct fuse_mount *fm = get_fuse_mount_super(path->mnt->mnt_sb);
404*4882a593Smuzhiyun FUSE_ARGS(args);
405*4882a593Smuzhiyun char *path_name;
406*4882a593Smuzhiyun int err;
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun path_name = (char *)get_zeroed_page(GFP_KERNEL);
409*4882a593Smuzhiyun if (!path_name)
410*4882a593Smuzhiyun goto default_path;
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun args.opcode = FUSE_CANONICAL_PATH;
413*4882a593Smuzhiyun args.nodeid = get_node_id(inode);
414*4882a593Smuzhiyun args.in_numargs = 0;
415*4882a593Smuzhiyun args.out_numargs = 1;
416*4882a593Smuzhiyun args.out_args[0].size = PATH_MAX;
417*4882a593Smuzhiyun args.out_args[0].value = path_name;
418*4882a593Smuzhiyun args.canonical_path = canonical_path;
419*4882a593Smuzhiyun args.out_argvar = 1;
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun err = fuse_simple_request(fm, &args);
422*4882a593Smuzhiyun free_page((unsigned long)path_name);
423*4882a593Smuzhiyun if (err > 0)
424*4882a593Smuzhiyun return;
425*4882a593Smuzhiyun default_path:
426*4882a593Smuzhiyun canonical_path->dentry = path->dentry;
427*4882a593Smuzhiyun canonical_path->mnt = path->mnt;
428*4882a593Smuzhiyun path_get(canonical_path);
429*4882a593Smuzhiyun }
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun const struct dentry_operations fuse_dentry_operations = {
432*4882a593Smuzhiyun .d_revalidate = fuse_dentry_revalidate,
433*4882a593Smuzhiyun .d_delete = fuse_dentry_delete,
434*4882a593Smuzhiyun #if BITS_PER_LONG < 64
435*4882a593Smuzhiyun .d_init = fuse_dentry_init,
436*4882a593Smuzhiyun .d_release = fuse_dentry_release,
437*4882a593Smuzhiyun #endif
438*4882a593Smuzhiyun .d_automount = fuse_dentry_automount,
439*4882a593Smuzhiyun .d_canonical_path = fuse_dentry_canonical_path,
440*4882a593Smuzhiyun };
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun const struct dentry_operations fuse_root_dentry_operations = {
443*4882a593Smuzhiyun #if BITS_PER_LONG < 64
444*4882a593Smuzhiyun .d_init = fuse_dentry_init,
445*4882a593Smuzhiyun .d_release = fuse_dentry_release,
446*4882a593Smuzhiyun #endif
447*4882a593Smuzhiyun };
448*4882a593Smuzhiyun
fuse_valid_type(int m)449*4882a593Smuzhiyun int fuse_valid_type(int m)
450*4882a593Smuzhiyun {
451*4882a593Smuzhiyun return S_ISREG(m) || S_ISDIR(m) || S_ISLNK(m) || S_ISCHR(m) ||
452*4882a593Smuzhiyun S_ISBLK(m) || S_ISFIFO(m) || S_ISSOCK(m);
453*4882a593Smuzhiyun }
454*4882a593Smuzhiyun
fuse_invalid_attr(struct fuse_attr * attr)455*4882a593Smuzhiyun bool fuse_invalid_attr(struct fuse_attr *attr)
456*4882a593Smuzhiyun {
457*4882a593Smuzhiyun return !fuse_valid_type(attr->mode) ||
458*4882a593Smuzhiyun attr->size > LLONG_MAX;
459*4882a593Smuzhiyun }
460*4882a593Smuzhiyun
fuse_lookup_name(struct super_block * sb,u64 nodeid,const struct qstr * name,struct fuse_entry_out * outarg,struct inode ** inode)461*4882a593Smuzhiyun int fuse_lookup_name(struct super_block *sb, u64 nodeid, const struct qstr *name,
462*4882a593Smuzhiyun struct fuse_entry_out *outarg, struct inode **inode)
463*4882a593Smuzhiyun {
464*4882a593Smuzhiyun struct fuse_mount *fm = get_fuse_mount_super(sb);
465*4882a593Smuzhiyun FUSE_ARGS(args);
466*4882a593Smuzhiyun struct fuse_forget_link *forget;
467*4882a593Smuzhiyun u64 attr_version;
468*4882a593Smuzhiyun int err;
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun *inode = NULL;
471*4882a593Smuzhiyun err = -ENAMETOOLONG;
472*4882a593Smuzhiyun if (name->len > FUSE_NAME_MAX)
473*4882a593Smuzhiyun goto out;
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun forget = fuse_alloc_forget();
477*4882a593Smuzhiyun err = -ENOMEM;
478*4882a593Smuzhiyun if (!forget)
479*4882a593Smuzhiyun goto out;
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun attr_version = fuse_get_attr_version(fm->fc);
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun fuse_lookup_init(fm->fc, &args, nodeid, name, outarg);
484*4882a593Smuzhiyun err = fuse_simple_request(fm, &args);
485*4882a593Smuzhiyun /* Zero nodeid is same as -ENOENT, but with valid timeout */
486*4882a593Smuzhiyun if (err || !outarg->nodeid)
487*4882a593Smuzhiyun goto out_put_forget;
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun err = -EIO;
490*4882a593Smuzhiyun if (!outarg->nodeid)
491*4882a593Smuzhiyun goto out_put_forget;
492*4882a593Smuzhiyun if (fuse_invalid_attr(&outarg->attr))
493*4882a593Smuzhiyun goto out_put_forget;
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun *inode = fuse_iget(sb, outarg->nodeid, outarg->generation,
496*4882a593Smuzhiyun &outarg->attr, entry_attr_timeout(outarg),
497*4882a593Smuzhiyun attr_version);
498*4882a593Smuzhiyun err = -ENOMEM;
499*4882a593Smuzhiyun if (!*inode) {
500*4882a593Smuzhiyun fuse_queue_forget(fm->fc, forget, outarg->nodeid, 1);
501*4882a593Smuzhiyun goto out;
502*4882a593Smuzhiyun }
503*4882a593Smuzhiyun err = 0;
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun out_put_forget:
506*4882a593Smuzhiyun kfree(forget);
507*4882a593Smuzhiyun out:
508*4882a593Smuzhiyun return err;
509*4882a593Smuzhiyun }
510*4882a593Smuzhiyun
fuse_lookup(struct inode * dir,struct dentry * entry,unsigned int flags)511*4882a593Smuzhiyun static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry,
512*4882a593Smuzhiyun unsigned int flags)
513*4882a593Smuzhiyun {
514*4882a593Smuzhiyun int err;
515*4882a593Smuzhiyun struct fuse_entry_out outarg;
516*4882a593Smuzhiyun struct inode *inode;
517*4882a593Smuzhiyun struct dentry *newent;
518*4882a593Smuzhiyun bool outarg_valid = true;
519*4882a593Smuzhiyun bool locked;
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun if (fuse_is_bad(dir))
522*4882a593Smuzhiyun return ERR_PTR(-EIO);
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun locked = fuse_lock_inode(dir);
525*4882a593Smuzhiyun err = fuse_lookup_name(dir->i_sb, get_node_id(dir), &entry->d_name,
526*4882a593Smuzhiyun &outarg, &inode);
527*4882a593Smuzhiyun fuse_unlock_inode(dir, locked);
528*4882a593Smuzhiyun if (err == -ENOENT) {
529*4882a593Smuzhiyun outarg_valid = false;
530*4882a593Smuzhiyun err = 0;
531*4882a593Smuzhiyun }
532*4882a593Smuzhiyun if (err)
533*4882a593Smuzhiyun goto out_err;
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun err = -EIO;
536*4882a593Smuzhiyun if (inode && get_node_id(inode) == FUSE_ROOT_ID)
537*4882a593Smuzhiyun goto out_iput;
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun newent = d_splice_alias(inode, entry);
540*4882a593Smuzhiyun err = PTR_ERR(newent);
541*4882a593Smuzhiyun if (IS_ERR(newent))
542*4882a593Smuzhiyun goto out_err;
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun entry = newent ? newent : entry;
545*4882a593Smuzhiyun if (outarg_valid)
546*4882a593Smuzhiyun fuse_change_entry_timeout(entry, &outarg);
547*4882a593Smuzhiyun else
548*4882a593Smuzhiyun fuse_invalidate_entry_cache(entry);
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun if (inode)
551*4882a593Smuzhiyun fuse_advise_use_readdirplus(dir);
552*4882a593Smuzhiyun return newent;
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun out_iput:
555*4882a593Smuzhiyun iput(inode);
556*4882a593Smuzhiyun out_err:
557*4882a593Smuzhiyun return ERR_PTR(err);
558*4882a593Smuzhiyun }
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun /*
561*4882a593Smuzhiyun * Atomic create+open operation
562*4882a593Smuzhiyun *
563*4882a593Smuzhiyun * If the filesystem doesn't support this, then fall back to separate
564*4882a593Smuzhiyun * 'mknod' + 'open' requests.
565*4882a593Smuzhiyun */
fuse_create_open(struct inode * dir,struct dentry * entry,struct file * file,unsigned flags,umode_t mode)566*4882a593Smuzhiyun static int fuse_create_open(struct inode *dir, struct dentry *entry,
567*4882a593Smuzhiyun struct file *file, unsigned flags,
568*4882a593Smuzhiyun umode_t mode)
569*4882a593Smuzhiyun {
570*4882a593Smuzhiyun int err;
571*4882a593Smuzhiyun struct inode *inode;
572*4882a593Smuzhiyun struct fuse_conn *fc = get_fuse_conn(dir);
573*4882a593Smuzhiyun struct fuse_mount *fm = get_fuse_mount(dir);
574*4882a593Smuzhiyun FUSE_ARGS(args);
575*4882a593Smuzhiyun struct fuse_forget_link *forget;
576*4882a593Smuzhiyun struct fuse_create_in inarg;
577*4882a593Smuzhiyun struct fuse_open_out outopen;
578*4882a593Smuzhiyun struct fuse_entry_out outentry;
579*4882a593Smuzhiyun struct fuse_inode *fi;
580*4882a593Smuzhiyun struct fuse_file *ff;
581*4882a593Smuzhiyun
582*4882a593Smuzhiyun /* Userspace expects S_IFREG in create mode */
583*4882a593Smuzhiyun BUG_ON((mode & S_IFMT) != S_IFREG);
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun forget = fuse_alloc_forget();
586*4882a593Smuzhiyun err = -ENOMEM;
587*4882a593Smuzhiyun if (!forget)
588*4882a593Smuzhiyun goto out_err;
589*4882a593Smuzhiyun
590*4882a593Smuzhiyun err = -ENOMEM;
591*4882a593Smuzhiyun ff = fuse_file_alloc(fm);
592*4882a593Smuzhiyun if (!ff)
593*4882a593Smuzhiyun goto out_put_forget_req;
594*4882a593Smuzhiyun
595*4882a593Smuzhiyun if (!fm->fc->dont_mask)
596*4882a593Smuzhiyun mode &= ~current_umask();
597*4882a593Smuzhiyun
598*4882a593Smuzhiyun flags &= ~O_NOCTTY;
599*4882a593Smuzhiyun memset(&inarg, 0, sizeof(inarg));
600*4882a593Smuzhiyun memset(&outentry, 0, sizeof(outentry));
601*4882a593Smuzhiyun inarg.flags = flags;
602*4882a593Smuzhiyun inarg.mode = mode;
603*4882a593Smuzhiyun inarg.umask = current_umask();
604*4882a593Smuzhiyun args.opcode = FUSE_CREATE;
605*4882a593Smuzhiyun args.nodeid = get_node_id(dir);
606*4882a593Smuzhiyun args.in_numargs = 2;
607*4882a593Smuzhiyun args.in_args[0].size = sizeof(inarg);
608*4882a593Smuzhiyun args.in_args[0].value = &inarg;
609*4882a593Smuzhiyun args.in_args[1].size = entry->d_name.len + 1;
610*4882a593Smuzhiyun args.in_args[1].value = entry->d_name.name;
611*4882a593Smuzhiyun args.out_numargs = 2;
612*4882a593Smuzhiyun args.out_args[0].size = sizeof(outentry);
613*4882a593Smuzhiyun args.out_args[0].value = &outentry;
614*4882a593Smuzhiyun args.out_args[1].size = sizeof(outopen);
615*4882a593Smuzhiyun args.out_args[1].value = &outopen;
616*4882a593Smuzhiyun err = fuse_simple_request(fm, &args);
617*4882a593Smuzhiyun if (err)
618*4882a593Smuzhiyun goto out_free_ff;
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun err = -EIO;
621*4882a593Smuzhiyun if (!S_ISREG(outentry.attr.mode) || invalid_nodeid(outentry.nodeid) ||
622*4882a593Smuzhiyun fuse_invalid_attr(&outentry.attr))
623*4882a593Smuzhiyun goto out_free_ff;
624*4882a593Smuzhiyun
625*4882a593Smuzhiyun ff->fh = outopen.fh;
626*4882a593Smuzhiyun ff->nodeid = outentry.nodeid;
627*4882a593Smuzhiyun ff->open_flags = outopen.open_flags;
628*4882a593Smuzhiyun fuse_passthrough_setup(fc, ff, &outopen);
629*4882a593Smuzhiyun inode = fuse_iget(dir->i_sb, outentry.nodeid, outentry.generation,
630*4882a593Smuzhiyun &outentry.attr, entry_attr_timeout(&outentry), 0);
631*4882a593Smuzhiyun if (!inode) {
632*4882a593Smuzhiyun flags &= ~(O_CREAT | O_EXCL | O_TRUNC);
633*4882a593Smuzhiyun fuse_sync_release(NULL, ff, flags);
634*4882a593Smuzhiyun fuse_queue_forget(fm->fc, forget, outentry.nodeid, 1);
635*4882a593Smuzhiyun err = -ENOMEM;
636*4882a593Smuzhiyun goto out_err;
637*4882a593Smuzhiyun }
638*4882a593Smuzhiyun kfree(forget);
639*4882a593Smuzhiyun d_instantiate(entry, inode);
640*4882a593Smuzhiyun fuse_change_entry_timeout(entry, &outentry);
641*4882a593Smuzhiyun fuse_dir_changed(dir);
642*4882a593Smuzhiyun err = finish_open(file, entry, generic_file_open);
643*4882a593Smuzhiyun if (err) {
644*4882a593Smuzhiyun fi = get_fuse_inode(inode);
645*4882a593Smuzhiyun fuse_sync_release(fi, ff, flags);
646*4882a593Smuzhiyun } else {
647*4882a593Smuzhiyun file->private_data = ff;
648*4882a593Smuzhiyun fuse_finish_open(inode, file);
649*4882a593Smuzhiyun }
650*4882a593Smuzhiyun return err;
651*4882a593Smuzhiyun
652*4882a593Smuzhiyun out_free_ff:
653*4882a593Smuzhiyun fuse_file_free(ff);
654*4882a593Smuzhiyun out_put_forget_req:
655*4882a593Smuzhiyun kfree(forget);
656*4882a593Smuzhiyun out_err:
657*4882a593Smuzhiyun return err;
658*4882a593Smuzhiyun }
659*4882a593Smuzhiyun
660*4882a593Smuzhiyun static int fuse_mknod(struct inode *, struct dentry *, umode_t, dev_t);
fuse_atomic_open(struct inode * dir,struct dentry * entry,struct file * file,unsigned flags,umode_t mode)661*4882a593Smuzhiyun static int fuse_atomic_open(struct inode *dir, struct dentry *entry,
662*4882a593Smuzhiyun struct file *file, unsigned flags,
663*4882a593Smuzhiyun umode_t mode)
664*4882a593Smuzhiyun {
665*4882a593Smuzhiyun int err;
666*4882a593Smuzhiyun struct fuse_conn *fc = get_fuse_conn(dir);
667*4882a593Smuzhiyun struct dentry *res = NULL;
668*4882a593Smuzhiyun
669*4882a593Smuzhiyun if (fuse_is_bad(dir))
670*4882a593Smuzhiyun return -EIO;
671*4882a593Smuzhiyun
672*4882a593Smuzhiyun if (d_in_lookup(entry)) {
673*4882a593Smuzhiyun res = fuse_lookup(dir, entry, 0);
674*4882a593Smuzhiyun if (IS_ERR(res))
675*4882a593Smuzhiyun return PTR_ERR(res);
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun if (res)
678*4882a593Smuzhiyun entry = res;
679*4882a593Smuzhiyun }
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun if (!(flags & O_CREAT) || d_really_is_positive(entry))
682*4882a593Smuzhiyun goto no_open;
683*4882a593Smuzhiyun
684*4882a593Smuzhiyun /* Only creates */
685*4882a593Smuzhiyun file->f_mode |= FMODE_CREATED;
686*4882a593Smuzhiyun
687*4882a593Smuzhiyun if (fc->no_create)
688*4882a593Smuzhiyun goto mknod;
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun err = fuse_create_open(dir, entry, file, flags, mode);
691*4882a593Smuzhiyun if (err == -ENOSYS) {
692*4882a593Smuzhiyun fc->no_create = 1;
693*4882a593Smuzhiyun goto mknod;
694*4882a593Smuzhiyun }
695*4882a593Smuzhiyun out_dput:
696*4882a593Smuzhiyun dput(res);
697*4882a593Smuzhiyun return err;
698*4882a593Smuzhiyun
699*4882a593Smuzhiyun mknod:
700*4882a593Smuzhiyun err = fuse_mknod(dir, entry, mode, 0);
701*4882a593Smuzhiyun if (err)
702*4882a593Smuzhiyun goto out_dput;
703*4882a593Smuzhiyun no_open:
704*4882a593Smuzhiyun return finish_no_open(file, res);
705*4882a593Smuzhiyun }
706*4882a593Smuzhiyun
707*4882a593Smuzhiyun /*
708*4882a593Smuzhiyun * Code shared between mknod, mkdir, symlink and link
709*4882a593Smuzhiyun */
create_new_entry(struct fuse_mount * fm,struct fuse_args * args,struct inode * dir,struct dentry * entry,umode_t mode)710*4882a593Smuzhiyun static int create_new_entry(struct fuse_mount *fm, struct fuse_args *args,
711*4882a593Smuzhiyun struct inode *dir, struct dentry *entry,
712*4882a593Smuzhiyun umode_t mode)
713*4882a593Smuzhiyun {
714*4882a593Smuzhiyun struct fuse_entry_out outarg;
715*4882a593Smuzhiyun struct inode *inode;
716*4882a593Smuzhiyun struct dentry *d;
717*4882a593Smuzhiyun int err;
718*4882a593Smuzhiyun struct fuse_forget_link *forget;
719*4882a593Smuzhiyun
720*4882a593Smuzhiyun if (fuse_is_bad(dir))
721*4882a593Smuzhiyun return -EIO;
722*4882a593Smuzhiyun
723*4882a593Smuzhiyun forget = fuse_alloc_forget();
724*4882a593Smuzhiyun if (!forget)
725*4882a593Smuzhiyun return -ENOMEM;
726*4882a593Smuzhiyun
727*4882a593Smuzhiyun memset(&outarg, 0, sizeof(outarg));
728*4882a593Smuzhiyun args->nodeid = get_node_id(dir);
729*4882a593Smuzhiyun args->out_numargs = 1;
730*4882a593Smuzhiyun args->out_args[0].size = sizeof(outarg);
731*4882a593Smuzhiyun args->out_args[0].value = &outarg;
732*4882a593Smuzhiyun err = fuse_simple_request(fm, args);
733*4882a593Smuzhiyun if (err)
734*4882a593Smuzhiyun goto out_put_forget_req;
735*4882a593Smuzhiyun
736*4882a593Smuzhiyun err = -EIO;
737*4882a593Smuzhiyun if (invalid_nodeid(outarg.nodeid) || fuse_invalid_attr(&outarg.attr))
738*4882a593Smuzhiyun goto out_put_forget_req;
739*4882a593Smuzhiyun
740*4882a593Smuzhiyun if ((outarg.attr.mode ^ mode) & S_IFMT)
741*4882a593Smuzhiyun goto out_put_forget_req;
742*4882a593Smuzhiyun
743*4882a593Smuzhiyun inode = fuse_iget(dir->i_sb, outarg.nodeid, outarg.generation,
744*4882a593Smuzhiyun &outarg.attr, entry_attr_timeout(&outarg), 0);
745*4882a593Smuzhiyun if (!inode) {
746*4882a593Smuzhiyun fuse_queue_forget(fm->fc, forget, outarg.nodeid, 1);
747*4882a593Smuzhiyun return -ENOMEM;
748*4882a593Smuzhiyun }
749*4882a593Smuzhiyun kfree(forget);
750*4882a593Smuzhiyun
751*4882a593Smuzhiyun d_drop(entry);
752*4882a593Smuzhiyun d = d_splice_alias(inode, entry);
753*4882a593Smuzhiyun if (IS_ERR(d))
754*4882a593Smuzhiyun return PTR_ERR(d);
755*4882a593Smuzhiyun
756*4882a593Smuzhiyun if (d) {
757*4882a593Smuzhiyun fuse_change_entry_timeout(d, &outarg);
758*4882a593Smuzhiyun dput(d);
759*4882a593Smuzhiyun } else {
760*4882a593Smuzhiyun fuse_change_entry_timeout(entry, &outarg);
761*4882a593Smuzhiyun }
762*4882a593Smuzhiyun fuse_dir_changed(dir);
763*4882a593Smuzhiyun return 0;
764*4882a593Smuzhiyun
765*4882a593Smuzhiyun out_put_forget_req:
766*4882a593Smuzhiyun kfree(forget);
767*4882a593Smuzhiyun return err;
768*4882a593Smuzhiyun }
769*4882a593Smuzhiyun
fuse_mknod(struct inode * dir,struct dentry * entry,umode_t mode,dev_t rdev)770*4882a593Smuzhiyun static int fuse_mknod(struct inode *dir, struct dentry *entry, umode_t mode,
771*4882a593Smuzhiyun dev_t rdev)
772*4882a593Smuzhiyun {
773*4882a593Smuzhiyun struct fuse_mknod_in inarg;
774*4882a593Smuzhiyun struct fuse_mount *fm = get_fuse_mount(dir);
775*4882a593Smuzhiyun FUSE_ARGS(args);
776*4882a593Smuzhiyun
777*4882a593Smuzhiyun if (!fm->fc->dont_mask)
778*4882a593Smuzhiyun mode &= ~current_umask();
779*4882a593Smuzhiyun
780*4882a593Smuzhiyun memset(&inarg, 0, sizeof(inarg));
781*4882a593Smuzhiyun inarg.mode = mode;
782*4882a593Smuzhiyun inarg.rdev = new_encode_dev(rdev);
783*4882a593Smuzhiyun inarg.umask = current_umask();
784*4882a593Smuzhiyun args.opcode = FUSE_MKNOD;
785*4882a593Smuzhiyun args.in_numargs = 2;
786*4882a593Smuzhiyun args.in_args[0].size = sizeof(inarg);
787*4882a593Smuzhiyun args.in_args[0].value = &inarg;
788*4882a593Smuzhiyun args.in_args[1].size = entry->d_name.len + 1;
789*4882a593Smuzhiyun args.in_args[1].value = entry->d_name.name;
790*4882a593Smuzhiyun return create_new_entry(fm, &args, dir, entry, mode);
791*4882a593Smuzhiyun }
792*4882a593Smuzhiyun
fuse_create(struct inode * dir,struct dentry * entry,umode_t mode,bool excl)793*4882a593Smuzhiyun static int fuse_create(struct inode *dir, struct dentry *entry, umode_t mode,
794*4882a593Smuzhiyun bool excl)
795*4882a593Smuzhiyun {
796*4882a593Smuzhiyun return fuse_mknod(dir, entry, mode, 0);
797*4882a593Smuzhiyun }
798*4882a593Smuzhiyun
fuse_mkdir(struct inode * dir,struct dentry * entry,umode_t mode)799*4882a593Smuzhiyun static int fuse_mkdir(struct inode *dir, struct dentry *entry, umode_t mode)
800*4882a593Smuzhiyun {
801*4882a593Smuzhiyun struct fuse_mkdir_in inarg;
802*4882a593Smuzhiyun struct fuse_mount *fm = get_fuse_mount(dir);
803*4882a593Smuzhiyun FUSE_ARGS(args);
804*4882a593Smuzhiyun
805*4882a593Smuzhiyun if (!fm->fc->dont_mask)
806*4882a593Smuzhiyun mode &= ~current_umask();
807*4882a593Smuzhiyun
808*4882a593Smuzhiyun memset(&inarg, 0, sizeof(inarg));
809*4882a593Smuzhiyun inarg.mode = mode;
810*4882a593Smuzhiyun inarg.umask = current_umask();
811*4882a593Smuzhiyun args.opcode = FUSE_MKDIR;
812*4882a593Smuzhiyun args.in_numargs = 2;
813*4882a593Smuzhiyun args.in_args[0].size = sizeof(inarg);
814*4882a593Smuzhiyun args.in_args[0].value = &inarg;
815*4882a593Smuzhiyun args.in_args[1].size = entry->d_name.len + 1;
816*4882a593Smuzhiyun args.in_args[1].value = entry->d_name.name;
817*4882a593Smuzhiyun return create_new_entry(fm, &args, dir, entry, S_IFDIR);
818*4882a593Smuzhiyun }
819*4882a593Smuzhiyun
fuse_symlink(struct inode * dir,struct dentry * entry,const char * link)820*4882a593Smuzhiyun static int fuse_symlink(struct inode *dir, struct dentry *entry,
821*4882a593Smuzhiyun const char *link)
822*4882a593Smuzhiyun {
823*4882a593Smuzhiyun struct fuse_mount *fm = get_fuse_mount(dir);
824*4882a593Smuzhiyun unsigned len = strlen(link) + 1;
825*4882a593Smuzhiyun FUSE_ARGS(args);
826*4882a593Smuzhiyun
827*4882a593Smuzhiyun args.opcode = FUSE_SYMLINK;
828*4882a593Smuzhiyun args.in_numargs = 2;
829*4882a593Smuzhiyun args.in_args[0].size = entry->d_name.len + 1;
830*4882a593Smuzhiyun args.in_args[0].value = entry->d_name.name;
831*4882a593Smuzhiyun args.in_args[1].size = len;
832*4882a593Smuzhiyun args.in_args[1].value = link;
833*4882a593Smuzhiyun return create_new_entry(fm, &args, dir, entry, S_IFLNK);
834*4882a593Smuzhiyun }
835*4882a593Smuzhiyun
fuse_flush_time_update(struct inode * inode)836*4882a593Smuzhiyun void fuse_flush_time_update(struct inode *inode)
837*4882a593Smuzhiyun {
838*4882a593Smuzhiyun int err = sync_inode_metadata(inode, 1);
839*4882a593Smuzhiyun
840*4882a593Smuzhiyun mapping_set_error(inode->i_mapping, err);
841*4882a593Smuzhiyun }
842*4882a593Smuzhiyun
fuse_update_ctime(struct inode * inode)843*4882a593Smuzhiyun void fuse_update_ctime(struct inode *inode)
844*4882a593Smuzhiyun {
845*4882a593Smuzhiyun if (!IS_NOCMTIME(inode)) {
846*4882a593Smuzhiyun inode->i_ctime = current_time(inode);
847*4882a593Smuzhiyun mark_inode_dirty_sync(inode);
848*4882a593Smuzhiyun fuse_flush_time_update(inode);
849*4882a593Smuzhiyun }
850*4882a593Smuzhiyun }
851*4882a593Smuzhiyun
fuse_unlink(struct inode * dir,struct dentry * entry)852*4882a593Smuzhiyun static int fuse_unlink(struct inode *dir, struct dentry *entry)
853*4882a593Smuzhiyun {
854*4882a593Smuzhiyun int err;
855*4882a593Smuzhiyun struct fuse_mount *fm = get_fuse_mount(dir);
856*4882a593Smuzhiyun FUSE_ARGS(args);
857*4882a593Smuzhiyun
858*4882a593Smuzhiyun if (fuse_is_bad(dir))
859*4882a593Smuzhiyun return -EIO;
860*4882a593Smuzhiyun
861*4882a593Smuzhiyun args.opcode = FUSE_UNLINK;
862*4882a593Smuzhiyun args.nodeid = get_node_id(dir);
863*4882a593Smuzhiyun args.in_numargs = 1;
864*4882a593Smuzhiyun args.in_args[0].size = entry->d_name.len + 1;
865*4882a593Smuzhiyun args.in_args[0].value = entry->d_name.name;
866*4882a593Smuzhiyun err = fuse_simple_request(fm, &args);
867*4882a593Smuzhiyun if (!err) {
868*4882a593Smuzhiyun struct inode *inode = d_inode(entry);
869*4882a593Smuzhiyun struct fuse_inode *fi = get_fuse_inode(inode);
870*4882a593Smuzhiyun
871*4882a593Smuzhiyun spin_lock(&fi->lock);
872*4882a593Smuzhiyun fi->attr_version = atomic64_inc_return(&fm->fc->attr_version);
873*4882a593Smuzhiyun /*
874*4882a593Smuzhiyun * If i_nlink == 0 then unlink doesn't make sense, yet this can
875*4882a593Smuzhiyun * happen if userspace filesystem is careless. It would be
876*4882a593Smuzhiyun * difficult to enforce correct nlink usage so just ignore this
877*4882a593Smuzhiyun * condition here
878*4882a593Smuzhiyun */
879*4882a593Smuzhiyun if (inode->i_nlink > 0)
880*4882a593Smuzhiyun drop_nlink(inode);
881*4882a593Smuzhiyun spin_unlock(&fi->lock);
882*4882a593Smuzhiyun fuse_invalidate_attr(inode);
883*4882a593Smuzhiyun fuse_dir_changed(dir);
884*4882a593Smuzhiyun fuse_invalidate_entry_cache(entry);
885*4882a593Smuzhiyun fuse_update_ctime(inode);
886*4882a593Smuzhiyun } else if (err == -EINTR)
887*4882a593Smuzhiyun fuse_invalidate_entry(entry);
888*4882a593Smuzhiyun return err;
889*4882a593Smuzhiyun }
890*4882a593Smuzhiyun
fuse_rmdir(struct inode * dir,struct dentry * entry)891*4882a593Smuzhiyun static int fuse_rmdir(struct inode *dir, struct dentry *entry)
892*4882a593Smuzhiyun {
893*4882a593Smuzhiyun int err;
894*4882a593Smuzhiyun struct fuse_mount *fm = get_fuse_mount(dir);
895*4882a593Smuzhiyun FUSE_ARGS(args);
896*4882a593Smuzhiyun
897*4882a593Smuzhiyun if (fuse_is_bad(dir))
898*4882a593Smuzhiyun return -EIO;
899*4882a593Smuzhiyun
900*4882a593Smuzhiyun args.opcode = FUSE_RMDIR;
901*4882a593Smuzhiyun args.nodeid = get_node_id(dir);
902*4882a593Smuzhiyun args.in_numargs = 1;
903*4882a593Smuzhiyun args.in_args[0].size = entry->d_name.len + 1;
904*4882a593Smuzhiyun args.in_args[0].value = entry->d_name.name;
905*4882a593Smuzhiyun err = fuse_simple_request(fm, &args);
906*4882a593Smuzhiyun if (!err) {
907*4882a593Smuzhiyun clear_nlink(d_inode(entry));
908*4882a593Smuzhiyun fuse_dir_changed(dir);
909*4882a593Smuzhiyun fuse_invalidate_entry_cache(entry);
910*4882a593Smuzhiyun } else if (err == -EINTR)
911*4882a593Smuzhiyun fuse_invalidate_entry(entry);
912*4882a593Smuzhiyun return err;
913*4882a593Smuzhiyun }
914*4882a593Smuzhiyun
fuse_rename_common(struct inode * olddir,struct dentry * oldent,struct inode * newdir,struct dentry * newent,unsigned int flags,int opcode,size_t argsize)915*4882a593Smuzhiyun static int fuse_rename_common(struct inode *olddir, struct dentry *oldent,
916*4882a593Smuzhiyun struct inode *newdir, struct dentry *newent,
917*4882a593Smuzhiyun unsigned int flags, int opcode, size_t argsize)
918*4882a593Smuzhiyun {
919*4882a593Smuzhiyun int err;
920*4882a593Smuzhiyun struct fuse_rename2_in inarg;
921*4882a593Smuzhiyun struct fuse_mount *fm = get_fuse_mount(olddir);
922*4882a593Smuzhiyun FUSE_ARGS(args);
923*4882a593Smuzhiyun
924*4882a593Smuzhiyun memset(&inarg, 0, argsize);
925*4882a593Smuzhiyun inarg.newdir = get_node_id(newdir);
926*4882a593Smuzhiyun inarg.flags = flags;
927*4882a593Smuzhiyun args.opcode = opcode;
928*4882a593Smuzhiyun args.nodeid = get_node_id(olddir);
929*4882a593Smuzhiyun args.in_numargs = 3;
930*4882a593Smuzhiyun args.in_args[0].size = argsize;
931*4882a593Smuzhiyun args.in_args[0].value = &inarg;
932*4882a593Smuzhiyun args.in_args[1].size = oldent->d_name.len + 1;
933*4882a593Smuzhiyun args.in_args[1].value = oldent->d_name.name;
934*4882a593Smuzhiyun args.in_args[2].size = newent->d_name.len + 1;
935*4882a593Smuzhiyun args.in_args[2].value = newent->d_name.name;
936*4882a593Smuzhiyun err = fuse_simple_request(fm, &args);
937*4882a593Smuzhiyun if (!err) {
938*4882a593Smuzhiyun /* ctime changes */
939*4882a593Smuzhiyun fuse_invalidate_attr(d_inode(oldent));
940*4882a593Smuzhiyun fuse_update_ctime(d_inode(oldent));
941*4882a593Smuzhiyun
942*4882a593Smuzhiyun if (flags & RENAME_EXCHANGE) {
943*4882a593Smuzhiyun fuse_invalidate_attr(d_inode(newent));
944*4882a593Smuzhiyun fuse_update_ctime(d_inode(newent));
945*4882a593Smuzhiyun }
946*4882a593Smuzhiyun
947*4882a593Smuzhiyun fuse_dir_changed(olddir);
948*4882a593Smuzhiyun if (olddir != newdir)
949*4882a593Smuzhiyun fuse_dir_changed(newdir);
950*4882a593Smuzhiyun
951*4882a593Smuzhiyun /* newent will end up negative */
952*4882a593Smuzhiyun if (!(flags & RENAME_EXCHANGE) && d_really_is_positive(newent)) {
953*4882a593Smuzhiyun fuse_invalidate_attr(d_inode(newent));
954*4882a593Smuzhiyun fuse_invalidate_entry_cache(newent);
955*4882a593Smuzhiyun fuse_update_ctime(d_inode(newent));
956*4882a593Smuzhiyun }
957*4882a593Smuzhiyun } else if (err == -EINTR) {
958*4882a593Smuzhiyun /* If request was interrupted, DEITY only knows if the
959*4882a593Smuzhiyun rename actually took place. If the invalidation
960*4882a593Smuzhiyun fails (e.g. some process has CWD under the renamed
961*4882a593Smuzhiyun directory), then there can be inconsistency between
962*4882a593Smuzhiyun the dcache and the real filesystem. Tough luck. */
963*4882a593Smuzhiyun fuse_invalidate_entry(oldent);
964*4882a593Smuzhiyun if (d_really_is_positive(newent))
965*4882a593Smuzhiyun fuse_invalidate_entry(newent);
966*4882a593Smuzhiyun }
967*4882a593Smuzhiyun
968*4882a593Smuzhiyun return err;
969*4882a593Smuzhiyun }
970*4882a593Smuzhiyun
fuse_rename2(struct inode * olddir,struct dentry * oldent,struct inode * newdir,struct dentry * newent,unsigned int flags)971*4882a593Smuzhiyun static int fuse_rename2(struct inode *olddir, struct dentry *oldent,
972*4882a593Smuzhiyun struct inode *newdir, struct dentry *newent,
973*4882a593Smuzhiyun unsigned int flags)
974*4882a593Smuzhiyun {
975*4882a593Smuzhiyun struct fuse_conn *fc = get_fuse_conn(olddir);
976*4882a593Smuzhiyun int err;
977*4882a593Smuzhiyun
978*4882a593Smuzhiyun if (fuse_is_bad(olddir))
979*4882a593Smuzhiyun return -EIO;
980*4882a593Smuzhiyun
981*4882a593Smuzhiyun if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
982*4882a593Smuzhiyun return -EINVAL;
983*4882a593Smuzhiyun
984*4882a593Smuzhiyun if (flags) {
985*4882a593Smuzhiyun if (fc->no_rename2 || fc->minor < 23)
986*4882a593Smuzhiyun return -EINVAL;
987*4882a593Smuzhiyun
988*4882a593Smuzhiyun err = fuse_rename_common(olddir, oldent, newdir, newent, flags,
989*4882a593Smuzhiyun FUSE_RENAME2,
990*4882a593Smuzhiyun sizeof(struct fuse_rename2_in));
991*4882a593Smuzhiyun if (err == -ENOSYS) {
992*4882a593Smuzhiyun fc->no_rename2 = 1;
993*4882a593Smuzhiyun err = -EINVAL;
994*4882a593Smuzhiyun }
995*4882a593Smuzhiyun } else {
996*4882a593Smuzhiyun err = fuse_rename_common(olddir, oldent, newdir, newent, 0,
997*4882a593Smuzhiyun FUSE_RENAME,
998*4882a593Smuzhiyun sizeof(struct fuse_rename_in));
999*4882a593Smuzhiyun }
1000*4882a593Smuzhiyun
1001*4882a593Smuzhiyun return err;
1002*4882a593Smuzhiyun }
1003*4882a593Smuzhiyun
fuse_link(struct dentry * entry,struct inode * newdir,struct dentry * newent)1004*4882a593Smuzhiyun static int fuse_link(struct dentry *entry, struct inode *newdir,
1005*4882a593Smuzhiyun struct dentry *newent)
1006*4882a593Smuzhiyun {
1007*4882a593Smuzhiyun int err;
1008*4882a593Smuzhiyun struct fuse_link_in inarg;
1009*4882a593Smuzhiyun struct inode *inode = d_inode(entry);
1010*4882a593Smuzhiyun struct fuse_mount *fm = get_fuse_mount(inode);
1011*4882a593Smuzhiyun FUSE_ARGS(args);
1012*4882a593Smuzhiyun
1013*4882a593Smuzhiyun memset(&inarg, 0, sizeof(inarg));
1014*4882a593Smuzhiyun inarg.oldnodeid = get_node_id(inode);
1015*4882a593Smuzhiyun args.opcode = FUSE_LINK;
1016*4882a593Smuzhiyun args.in_numargs = 2;
1017*4882a593Smuzhiyun args.in_args[0].size = sizeof(inarg);
1018*4882a593Smuzhiyun args.in_args[0].value = &inarg;
1019*4882a593Smuzhiyun args.in_args[1].size = newent->d_name.len + 1;
1020*4882a593Smuzhiyun args.in_args[1].value = newent->d_name.name;
1021*4882a593Smuzhiyun err = create_new_entry(fm, &args, newdir, newent, inode->i_mode);
1022*4882a593Smuzhiyun /* Contrary to "normal" filesystems it can happen that link
1023*4882a593Smuzhiyun makes two "logical" inodes point to the same "physical"
1024*4882a593Smuzhiyun inode. We invalidate the attributes of the old one, so it
1025*4882a593Smuzhiyun will reflect changes in the backing inode (link count,
1026*4882a593Smuzhiyun etc.)
1027*4882a593Smuzhiyun */
1028*4882a593Smuzhiyun if (!err) {
1029*4882a593Smuzhiyun struct fuse_inode *fi = get_fuse_inode(inode);
1030*4882a593Smuzhiyun
1031*4882a593Smuzhiyun spin_lock(&fi->lock);
1032*4882a593Smuzhiyun fi->attr_version = atomic64_inc_return(&fm->fc->attr_version);
1033*4882a593Smuzhiyun if (likely(inode->i_nlink < UINT_MAX))
1034*4882a593Smuzhiyun inc_nlink(inode);
1035*4882a593Smuzhiyun spin_unlock(&fi->lock);
1036*4882a593Smuzhiyun fuse_invalidate_attr(inode);
1037*4882a593Smuzhiyun fuse_update_ctime(inode);
1038*4882a593Smuzhiyun } else if (err == -EINTR) {
1039*4882a593Smuzhiyun fuse_invalidate_attr(inode);
1040*4882a593Smuzhiyun }
1041*4882a593Smuzhiyun return err;
1042*4882a593Smuzhiyun }
1043*4882a593Smuzhiyun
fuse_fillattr(struct inode * inode,struct fuse_attr * attr,struct kstat * stat)1044*4882a593Smuzhiyun static void fuse_fillattr(struct inode *inode, struct fuse_attr *attr,
1045*4882a593Smuzhiyun struct kstat *stat)
1046*4882a593Smuzhiyun {
1047*4882a593Smuzhiyun unsigned int blkbits;
1048*4882a593Smuzhiyun struct fuse_conn *fc = get_fuse_conn(inode);
1049*4882a593Smuzhiyun
1050*4882a593Smuzhiyun /* see the comment in fuse_change_attributes() */
1051*4882a593Smuzhiyun if (fc->writeback_cache && S_ISREG(inode->i_mode)) {
1052*4882a593Smuzhiyun attr->size = i_size_read(inode);
1053*4882a593Smuzhiyun attr->mtime = inode->i_mtime.tv_sec;
1054*4882a593Smuzhiyun attr->mtimensec = inode->i_mtime.tv_nsec;
1055*4882a593Smuzhiyun attr->ctime = inode->i_ctime.tv_sec;
1056*4882a593Smuzhiyun attr->ctimensec = inode->i_ctime.tv_nsec;
1057*4882a593Smuzhiyun }
1058*4882a593Smuzhiyun
1059*4882a593Smuzhiyun stat->dev = inode->i_sb->s_dev;
1060*4882a593Smuzhiyun stat->ino = attr->ino;
1061*4882a593Smuzhiyun stat->mode = (inode->i_mode & S_IFMT) | (attr->mode & 07777);
1062*4882a593Smuzhiyun stat->nlink = attr->nlink;
1063*4882a593Smuzhiyun stat->uid = make_kuid(fc->user_ns, attr->uid);
1064*4882a593Smuzhiyun stat->gid = make_kgid(fc->user_ns, attr->gid);
1065*4882a593Smuzhiyun stat->rdev = inode->i_rdev;
1066*4882a593Smuzhiyun stat->atime.tv_sec = attr->atime;
1067*4882a593Smuzhiyun stat->atime.tv_nsec = attr->atimensec;
1068*4882a593Smuzhiyun stat->mtime.tv_sec = attr->mtime;
1069*4882a593Smuzhiyun stat->mtime.tv_nsec = attr->mtimensec;
1070*4882a593Smuzhiyun stat->ctime.tv_sec = attr->ctime;
1071*4882a593Smuzhiyun stat->ctime.tv_nsec = attr->ctimensec;
1072*4882a593Smuzhiyun stat->size = attr->size;
1073*4882a593Smuzhiyun stat->blocks = attr->blocks;
1074*4882a593Smuzhiyun
1075*4882a593Smuzhiyun if (attr->blksize != 0)
1076*4882a593Smuzhiyun blkbits = ilog2(attr->blksize);
1077*4882a593Smuzhiyun else
1078*4882a593Smuzhiyun blkbits = inode->i_sb->s_blocksize_bits;
1079*4882a593Smuzhiyun
1080*4882a593Smuzhiyun stat->blksize = 1 << blkbits;
1081*4882a593Smuzhiyun }
1082*4882a593Smuzhiyun
fuse_do_getattr(struct inode * inode,struct kstat * stat,struct file * file)1083*4882a593Smuzhiyun static int fuse_do_getattr(struct inode *inode, struct kstat *stat,
1084*4882a593Smuzhiyun struct file *file)
1085*4882a593Smuzhiyun {
1086*4882a593Smuzhiyun int err;
1087*4882a593Smuzhiyun struct fuse_getattr_in inarg;
1088*4882a593Smuzhiyun struct fuse_attr_out outarg;
1089*4882a593Smuzhiyun struct fuse_mount *fm = get_fuse_mount(inode);
1090*4882a593Smuzhiyun FUSE_ARGS(args);
1091*4882a593Smuzhiyun u64 attr_version;
1092*4882a593Smuzhiyun
1093*4882a593Smuzhiyun attr_version = fuse_get_attr_version(fm->fc);
1094*4882a593Smuzhiyun
1095*4882a593Smuzhiyun memset(&inarg, 0, sizeof(inarg));
1096*4882a593Smuzhiyun memset(&outarg, 0, sizeof(outarg));
1097*4882a593Smuzhiyun /* Directories have separate file-handle space */
1098*4882a593Smuzhiyun if (file && S_ISREG(inode->i_mode)) {
1099*4882a593Smuzhiyun struct fuse_file *ff = file->private_data;
1100*4882a593Smuzhiyun
1101*4882a593Smuzhiyun inarg.getattr_flags |= FUSE_GETATTR_FH;
1102*4882a593Smuzhiyun inarg.fh = ff->fh;
1103*4882a593Smuzhiyun }
1104*4882a593Smuzhiyun args.opcode = FUSE_GETATTR;
1105*4882a593Smuzhiyun args.nodeid = get_node_id(inode);
1106*4882a593Smuzhiyun args.in_numargs = 1;
1107*4882a593Smuzhiyun args.in_args[0].size = sizeof(inarg);
1108*4882a593Smuzhiyun args.in_args[0].value = &inarg;
1109*4882a593Smuzhiyun args.out_numargs = 1;
1110*4882a593Smuzhiyun args.out_args[0].size = sizeof(outarg);
1111*4882a593Smuzhiyun args.out_args[0].value = &outarg;
1112*4882a593Smuzhiyun err = fuse_simple_request(fm, &args);
1113*4882a593Smuzhiyun if (!err) {
1114*4882a593Smuzhiyun if (fuse_invalid_attr(&outarg.attr) ||
1115*4882a593Smuzhiyun inode_wrong_type(inode, outarg.attr.mode)) {
1116*4882a593Smuzhiyun fuse_make_bad(inode);
1117*4882a593Smuzhiyun err = -EIO;
1118*4882a593Smuzhiyun } else {
1119*4882a593Smuzhiyun fuse_change_attributes(inode, &outarg.attr,
1120*4882a593Smuzhiyun attr_timeout(&outarg),
1121*4882a593Smuzhiyun attr_version);
1122*4882a593Smuzhiyun if (stat)
1123*4882a593Smuzhiyun fuse_fillattr(inode, &outarg.attr, stat);
1124*4882a593Smuzhiyun }
1125*4882a593Smuzhiyun }
1126*4882a593Smuzhiyun return err;
1127*4882a593Smuzhiyun }
1128*4882a593Smuzhiyun
fuse_update_get_attr(struct inode * inode,struct file * file,struct kstat * stat,u32 request_mask,unsigned int flags)1129*4882a593Smuzhiyun static int fuse_update_get_attr(struct inode *inode, struct file *file,
1130*4882a593Smuzhiyun struct kstat *stat, u32 request_mask,
1131*4882a593Smuzhiyun unsigned int flags)
1132*4882a593Smuzhiyun {
1133*4882a593Smuzhiyun struct fuse_inode *fi = get_fuse_inode(inode);
1134*4882a593Smuzhiyun int err = 0;
1135*4882a593Smuzhiyun bool sync;
1136*4882a593Smuzhiyun
1137*4882a593Smuzhiyun if (flags & AT_STATX_FORCE_SYNC)
1138*4882a593Smuzhiyun sync = true;
1139*4882a593Smuzhiyun else if (flags & AT_STATX_DONT_SYNC)
1140*4882a593Smuzhiyun sync = false;
1141*4882a593Smuzhiyun else if (request_mask & READ_ONCE(fi->inval_mask))
1142*4882a593Smuzhiyun sync = true;
1143*4882a593Smuzhiyun else
1144*4882a593Smuzhiyun sync = time_before64(fi->i_time, get_jiffies_64());
1145*4882a593Smuzhiyun
1146*4882a593Smuzhiyun if (sync) {
1147*4882a593Smuzhiyun forget_all_cached_acls(inode);
1148*4882a593Smuzhiyun err = fuse_do_getattr(inode, stat, file);
1149*4882a593Smuzhiyun } else if (stat) {
1150*4882a593Smuzhiyun generic_fillattr(inode, stat);
1151*4882a593Smuzhiyun stat->mode = fi->orig_i_mode;
1152*4882a593Smuzhiyun stat->ino = fi->orig_ino;
1153*4882a593Smuzhiyun }
1154*4882a593Smuzhiyun
1155*4882a593Smuzhiyun return err;
1156*4882a593Smuzhiyun }
1157*4882a593Smuzhiyun
fuse_update_attributes(struct inode * inode,struct file * file)1158*4882a593Smuzhiyun int fuse_update_attributes(struct inode *inode, struct file *file)
1159*4882a593Smuzhiyun {
1160*4882a593Smuzhiyun /* Do *not* need to get atime for internal purposes */
1161*4882a593Smuzhiyun return fuse_update_get_attr(inode, file, NULL,
1162*4882a593Smuzhiyun STATX_BASIC_STATS & ~STATX_ATIME, 0);
1163*4882a593Smuzhiyun }
1164*4882a593Smuzhiyun
fuse_reverse_inval_entry(struct fuse_conn * fc,u64 parent_nodeid,u64 child_nodeid,struct qstr * name)1165*4882a593Smuzhiyun int fuse_reverse_inval_entry(struct fuse_conn *fc, u64 parent_nodeid,
1166*4882a593Smuzhiyun u64 child_nodeid, struct qstr *name)
1167*4882a593Smuzhiyun {
1168*4882a593Smuzhiyun int err = -ENOTDIR;
1169*4882a593Smuzhiyun struct inode *parent;
1170*4882a593Smuzhiyun struct dentry *dir;
1171*4882a593Smuzhiyun struct dentry *entry;
1172*4882a593Smuzhiyun
1173*4882a593Smuzhiyun parent = fuse_ilookup(fc, parent_nodeid, NULL);
1174*4882a593Smuzhiyun if (!parent)
1175*4882a593Smuzhiyun return -ENOENT;
1176*4882a593Smuzhiyun
1177*4882a593Smuzhiyun inode_lock_nested(parent, I_MUTEX_PARENT);
1178*4882a593Smuzhiyun if (!S_ISDIR(parent->i_mode))
1179*4882a593Smuzhiyun goto unlock;
1180*4882a593Smuzhiyun
1181*4882a593Smuzhiyun err = -ENOENT;
1182*4882a593Smuzhiyun dir = d_find_alias(parent);
1183*4882a593Smuzhiyun if (!dir)
1184*4882a593Smuzhiyun goto unlock;
1185*4882a593Smuzhiyun
1186*4882a593Smuzhiyun name->hash = full_name_hash(dir, name->name, name->len);
1187*4882a593Smuzhiyun entry = d_lookup(dir, name);
1188*4882a593Smuzhiyun dput(dir);
1189*4882a593Smuzhiyun if (!entry)
1190*4882a593Smuzhiyun goto unlock;
1191*4882a593Smuzhiyun
1192*4882a593Smuzhiyun fuse_dir_changed(parent);
1193*4882a593Smuzhiyun fuse_invalidate_entry(entry);
1194*4882a593Smuzhiyun
1195*4882a593Smuzhiyun if (child_nodeid != 0 && d_really_is_positive(entry)) {
1196*4882a593Smuzhiyun inode_lock(d_inode(entry));
1197*4882a593Smuzhiyun if (get_node_id(d_inode(entry)) != child_nodeid) {
1198*4882a593Smuzhiyun err = -ENOENT;
1199*4882a593Smuzhiyun goto badentry;
1200*4882a593Smuzhiyun }
1201*4882a593Smuzhiyun if (d_mountpoint(entry)) {
1202*4882a593Smuzhiyun err = -EBUSY;
1203*4882a593Smuzhiyun goto badentry;
1204*4882a593Smuzhiyun }
1205*4882a593Smuzhiyun if (d_is_dir(entry)) {
1206*4882a593Smuzhiyun shrink_dcache_parent(entry);
1207*4882a593Smuzhiyun if (!simple_empty(entry)) {
1208*4882a593Smuzhiyun err = -ENOTEMPTY;
1209*4882a593Smuzhiyun goto badentry;
1210*4882a593Smuzhiyun }
1211*4882a593Smuzhiyun d_inode(entry)->i_flags |= S_DEAD;
1212*4882a593Smuzhiyun }
1213*4882a593Smuzhiyun dont_mount(entry);
1214*4882a593Smuzhiyun clear_nlink(d_inode(entry));
1215*4882a593Smuzhiyun err = 0;
1216*4882a593Smuzhiyun badentry:
1217*4882a593Smuzhiyun inode_unlock(d_inode(entry));
1218*4882a593Smuzhiyun if (!err)
1219*4882a593Smuzhiyun d_delete(entry);
1220*4882a593Smuzhiyun } else {
1221*4882a593Smuzhiyun err = 0;
1222*4882a593Smuzhiyun }
1223*4882a593Smuzhiyun dput(entry);
1224*4882a593Smuzhiyun
1225*4882a593Smuzhiyun unlock:
1226*4882a593Smuzhiyun inode_unlock(parent);
1227*4882a593Smuzhiyun iput(parent);
1228*4882a593Smuzhiyun return err;
1229*4882a593Smuzhiyun }
1230*4882a593Smuzhiyun
1231*4882a593Smuzhiyun /*
1232*4882a593Smuzhiyun * Calling into a user-controlled filesystem gives the filesystem
1233*4882a593Smuzhiyun * daemon ptrace-like capabilities over the current process. This
1234*4882a593Smuzhiyun * means, that the filesystem daemon is able to record the exact
1235*4882a593Smuzhiyun * filesystem operations performed, and can also control the behavior
1236*4882a593Smuzhiyun * of the requester process in otherwise impossible ways. For example
1237*4882a593Smuzhiyun * it can delay the operation for arbitrary length of time allowing
1238*4882a593Smuzhiyun * DoS against the requester.
1239*4882a593Smuzhiyun *
1240*4882a593Smuzhiyun * For this reason only those processes can call into the filesystem,
1241*4882a593Smuzhiyun * for which the owner of the mount has ptrace privilege. This
1242*4882a593Smuzhiyun * excludes processes started by other users, suid or sgid processes.
1243*4882a593Smuzhiyun */
fuse_allow_current_process(struct fuse_conn * fc)1244*4882a593Smuzhiyun int fuse_allow_current_process(struct fuse_conn *fc)
1245*4882a593Smuzhiyun {
1246*4882a593Smuzhiyun const struct cred *cred;
1247*4882a593Smuzhiyun
1248*4882a593Smuzhiyun if (fc->allow_other)
1249*4882a593Smuzhiyun return current_in_userns(fc->user_ns);
1250*4882a593Smuzhiyun
1251*4882a593Smuzhiyun cred = current_cred();
1252*4882a593Smuzhiyun if (uid_eq(cred->euid, fc->user_id) &&
1253*4882a593Smuzhiyun uid_eq(cred->suid, fc->user_id) &&
1254*4882a593Smuzhiyun uid_eq(cred->uid, fc->user_id) &&
1255*4882a593Smuzhiyun gid_eq(cred->egid, fc->group_id) &&
1256*4882a593Smuzhiyun gid_eq(cred->sgid, fc->group_id) &&
1257*4882a593Smuzhiyun gid_eq(cred->gid, fc->group_id))
1258*4882a593Smuzhiyun return 1;
1259*4882a593Smuzhiyun
1260*4882a593Smuzhiyun return 0;
1261*4882a593Smuzhiyun }
1262*4882a593Smuzhiyun
fuse_access(struct inode * inode,int mask)1263*4882a593Smuzhiyun static int fuse_access(struct inode *inode, int mask)
1264*4882a593Smuzhiyun {
1265*4882a593Smuzhiyun struct fuse_mount *fm = get_fuse_mount(inode);
1266*4882a593Smuzhiyun FUSE_ARGS(args);
1267*4882a593Smuzhiyun struct fuse_access_in inarg;
1268*4882a593Smuzhiyun int err;
1269*4882a593Smuzhiyun
1270*4882a593Smuzhiyun BUG_ON(mask & MAY_NOT_BLOCK);
1271*4882a593Smuzhiyun
1272*4882a593Smuzhiyun if (fm->fc->no_access)
1273*4882a593Smuzhiyun return 0;
1274*4882a593Smuzhiyun
1275*4882a593Smuzhiyun memset(&inarg, 0, sizeof(inarg));
1276*4882a593Smuzhiyun inarg.mask = mask & (MAY_READ | MAY_WRITE | MAY_EXEC);
1277*4882a593Smuzhiyun args.opcode = FUSE_ACCESS;
1278*4882a593Smuzhiyun args.nodeid = get_node_id(inode);
1279*4882a593Smuzhiyun args.in_numargs = 1;
1280*4882a593Smuzhiyun args.in_args[0].size = sizeof(inarg);
1281*4882a593Smuzhiyun args.in_args[0].value = &inarg;
1282*4882a593Smuzhiyun err = fuse_simple_request(fm, &args);
1283*4882a593Smuzhiyun if (err == -ENOSYS) {
1284*4882a593Smuzhiyun fm->fc->no_access = 1;
1285*4882a593Smuzhiyun err = 0;
1286*4882a593Smuzhiyun }
1287*4882a593Smuzhiyun return err;
1288*4882a593Smuzhiyun }
1289*4882a593Smuzhiyun
fuse_perm_getattr(struct inode * inode,int mask)1290*4882a593Smuzhiyun static int fuse_perm_getattr(struct inode *inode, int mask)
1291*4882a593Smuzhiyun {
1292*4882a593Smuzhiyun if (mask & MAY_NOT_BLOCK)
1293*4882a593Smuzhiyun return -ECHILD;
1294*4882a593Smuzhiyun
1295*4882a593Smuzhiyun forget_all_cached_acls(inode);
1296*4882a593Smuzhiyun return fuse_do_getattr(inode, NULL, NULL);
1297*4882a593Smuzhiyun }
1298*4882a593Smuzhiyun
1299*4882a593Smuzhiyun /*
1300*4882a593Smuzhiyun * Check permission. The two basic access models of FUSE are:
1301*4882a593Smuzhiyun *
1302*4882a593Smuzhiyun * 1) Local access checking ('default_permissions' mount option) based
1303*4882a593Smuzhiyun * on file mode. This is the plain old disk filesystem permission
1304*4882a593Smuzhiyun * modell.
1305*4882a593Smuzhiyun *
1306*4882a593Smuzhiyun * 2) "Remote" access checking, where server is responsible for
1307*4882a593Smuzhiyun * checking permission in each inode operation. An exception to this
1308*4882a593Smuzhiyun * is if ->permission() was invoked from sys_access() in which case an
1309*4882a593Smuzhiyun * access request is sent. Execute permission is still checked
1310*4882a593Smuzhiyun * locally based on file mode.
1311*4882a593Smuzhiyun */
fuse_permission(struct inode * inode,int mask)1312*4882a593Smuzhiyun static int fuse_permission(struct inode *inode, int mask)
1313*4882a593Smuzhiyun {
1314*4882a593Smuzhiyun struct fuse_conn *fc = get_fuse_conn(inode);
1315*4882a593Smuzhiyun bool refreshed = false;
1316*4882a593Smuzhiyun int err = 0;
1317*4882a593Smuzhiyun
1318*4882a593Smuzhiyun if (fuse_is_bad(inode))
1319*4882a593Smuzhiyun return -EIO;
1320*4882a593Smuzhiyun
1321*4882a593Smuzhiyun if (!fuse_allow_current_process(fc))
1322*4882a593Smuzhiyun return -EACCES;
1323*4882a593Smuzhiyun
1324*4882a593Smuzhiyun /*
1325*4882a593Smuzhiyun * If attributes are needed, refresh them before proceeding
1326*4882a593Smuzhiyun */
1327*4882a593Smuzhiyun if (fc->default_permissions ||
1328*4882a593Smuzhiyun ((mask & MAY_EXEC) && S_ISREG(inode->i_mode))) {
1329*4882a593Smuzhiyun struct fuse_inode *fi = get_fuse_inode(inode);
1330*4882a593Smuzhiyun u32 perm_mask = STATX_MODE | STATX_UID | STATX_GID;
1331*4882a593Smuzhiyun
1332*4882a593Smuzhiyun if (perm_mask & READ_ONCE(fi->inval_mask) ||
1333*4882a593Smuzhiyun time_before64(fi->i_time, get_jiffies_64())) {
1334*4882a593Smuzhiyun refreshed = true;
1335*4882a593Smuzhiyun
1336*4882a593Smuzhiyun err = fuse_perm_getattr(inode, mask);
1337*4882a593Smuzhiyun if (err)
1338*4882a593Smuzhiyun return err;
1339*4882a593Smuzhiyun }
1340*4882a593Smuzhiyun }
1341*4882a593Smuzhiyun
1342*4882a593Smuzhiyun if (fc->default_permissions) {
1343*4882a593Smuzhiyun err = generic_permission(inode, mask);
1344*4882a593Smuzhiyun
1345*4882a593Smuzhiyun /* If permission is denied, try to refresh file
1346*4882a593Smuzhiyun attributes. This is also needed, because the root
1347*4882a593Smuzhiyun node will at first have no permissions */
1348*4882a593Smuzhiyun if (err == -EACCES && !refreshed) {
1349*4882a593Smuzhiyun err = fuse_perm_getattr(inode, mask);
1350*4882a593Smuzhiyun if (!err)
1351*4882a593Smuzhiyun err = generic_permission(inode, mask);
1352*4882a593Smuzhiyun }
1353*4882a593Smuzhiyun
1354*4882a593Smuzhiyun /* Note: the opposite of the above test does not
1355*4882a593Smuzhiyun exist. So if permissions are revoked this won't be
1356*4882a593Smuzhiyun noticed immediately, only after the attribute
1357*4882a593Smuzhiyun timeout has expired */
1358*4882a593Smuzhiyun } else if (mask & (MAY_ACCESS | MAY_CHDIR)) {
1359*4882a593Smuzhiyun err = fuse_access(inode, mask);
1360*4882a593Smuzhiyun } else if ((mask & MAY_EXEC) && S_ISREG(inode->i_mode)) {
1361*4882a593Smuzhiyun if (!(inode->i_mode & S_IXUGO)) {
1362*4882a593Smuzhiyun if (refreshed)
1363*4882a593Smuzhiyun return -EACCES;
1364*4882a593Smuzhiyun
1365*4882a593Smuzhiyun err = fuse_perm_getattr(inode, mask);
1366*4882a593Smuzhiyun if (!err && !(inode->i_mode & S_IXUGO))
1367*4882a593Smuzhiyun return -EACCES;
1368*4882a593Smuzhiyun }
1369*4882a593Smuzhiyun }
1370*4882a593Smuzhiyun return err;
1371*4882a593Smuzhiyun }
1372*4882a593Smuzhiyun
fuse_readlink_page(struct inode * inode,struct page * page)1373*4882a593Smuzhiyun static int fuse_readlink_page(struct inode *inode, struct page *page)
1374*4882a593Smuzhiyun {
1375*4882a593Smuzhiyun struct fuse_mount *fm = get_fuse_mount(inode);
1376*4882a593Smuzhiyun struct fuse_page_desc desc = { .length = PAGE_SIZE - 1 };
1377*4882a593Smuzhiyun struct fuse_args_pages ap = {
1378*4882a593Smuzhiyun .num_pages = 1,
1379*4882a593Smuzhiyun .pages = &page,
1380*4882a593Smuzhiyun .descs = &desc,
1381*4882a593Smuzhiyun };
1382*4882a593Smuzhiyun char *link;
1383*4882a593Smuzhiyun ssize_t res;
1384*4882a593Smuzhiyun
1385*4882a593Smuzhiyun ap.args.opcode = FUSE_READLINK;
1386*4882a593Smuzhiyun ap.args.nodeid = get_node_id(inode);
1387*4882a593Smuzhiyun ap.args.out_pages = true;
1388*4882a593Smuzhiyun ap.args.out_argvar = true;
1389*4882a593Smuzhiyun ap.args.page_zeroing = true;
1390*4882a593Smuzhiyun ap.args.out_numargs = 1;
1391*4882a593Smuzhiyun ap.args.out_args[0].size = desc.length;
1392*4882a593Smuzhiyun res = fuse_simple_request(fm, &ap.args);
1393*4882a593Smuzhiyun
1394*4882a593Smuzhiyun fuse_invalidate_atime(inode);
1395*4882a593Smuzhiyun
1396*4882a593Smuzhiyun if (res < 0)
1397*4882a593Smuzhiyun return res;
1398*4882a593Smuzhiyun
1399*4882a593Smuzhiyun if (WARN_ON(res >= PAGE_SIZE))
1400*4882a593Smuzhiyun return -EIO;
1401*4882a593Smuzhiyun
1402*4882a593Smuzhiyun link = page_address(page);
1403*4882a593Smuzhiyun link[res] = '\0';
1404*4882a593Smuzhiyun
1405*4882a593Smuzhiyun return 0;
1406*4882a593Smuzhiyun }
1407*4882a593Smuzhiyun
fuse_get_link(struct dentry * dentry,struct inode * inode,struct delayed_call * callback)1408*4882a593Smuzhiyun static const char *fuse_get_link(struct dentry *dentry, struct inode *inode,
1409*4882a593Smuzhiyun struct delayed_call *callback)
1410*4882a593Smuzhiyun {
1411*4882a593Smuzhiyun struct fuse_conn *fc = get_fuse_conn(inode);
1412*4882a593Smuzhiyun struct page *page;
1413*4882a593Smuzhiyun int err;
1414*4882a593Smuzhiyun
1415*4882a593Smuzhiyun err = -EIO;
1416*4882a593Smuzhiyun if (fuse_is_bad(inode))
1417*4882a593Smuzhiyun goto out_err;
1418*4882a593Smuzhiyun
1419*4882a593Smuzhiyun if (fc->cache_symlinks)
1420*4882a593Smuzhiyun return page_get_link(dentry, inode, callback);
1421*4882a593Smuzhiyun
1422*4882a593Smuzhiyun err = -ECHILD;
1423*4882a593Smuzhiyun if (!dentry)
1424*4882a593Smuzhiyun goto out_err;
1425*4882a593Smuzhiyun
1426*4882a593Smuzhiyun page = alloc_page(GFP_KERNEL);
1427*4882a593Smuzhiyun err = -ENOMEM;
1428*4882a593Smuzhiyun if (!page)
1429*4882a593Smuzhiyun goto out_err;
1430*4882a593Smuzhiyun
1431*4882a593Smuzhiyun err = fuse_readlink_page(inode, page);
1432*4882a593Smuzhiyun if (err) {
1433*4882a593Smuzhiyun __free_page(page);
1434*4882a593Smuzhiyun goto out_err;
1435*4882a593Smuzhiyun }
1436*4882a593Smuzhiyun
1437*4882a593Smuzhiyun set_delayed_call(callback, page_put_link, page);
1438*4882a593Smuzhiyun
1439*4882a593Smuzhiyun return page_address(page);
1440*4882a593Smuzhiyun
1441*4882a593Smuzhiyun out_err:
1442*4882a593Smuzhiyun return ERR_PTR(err);
1443*4882a593Smuzhiyun }
1444*4882a593Smuzhiyun
fuse_dir_open(struct inode * inode,struct file * file)1445*4882a593Smuzhiyun static int fuse_dir_open(struct inode *inode, struct file *file)
1446*4882a593Smuzhiyun {
1447*4882a593Smuzhiyun return fuse_open_common(inode, file, true);
1448*4882a593Smuzhiyun }
1449*4882a593Smuzhiyun
fuse_dir_release(struct inode * inode,struct file * file)1450*4882a593Smuzhiyun static int fuse_dir_release(struct inode *inode, struct file *file)
1451*4882a593Smuzhiyun {
1452*4882a593Smuzhiyun fuse_release_common(file, true);
1453*4882a593Smuzhiyun
1454*4882a593Smuzhiyun return 0;
1455*4882a593Smuzhiyun }
1456*4882a593Smuzhiyun
fuse_dir_fsync(struct file * file,loff_t start,loff_t end,int datasync)1457*4882a593Smuzhiyun static int fuse_dir_fsync(struct file *file, loff_t start, loff_t end,
1458*4882a593Smuzhiyun int datasync)
1459*4882a593Smuzhiyun {
1460*4882a593Smuzhiyun struct inode *inode = file->f_mapping->host;
1461*4882a593Smuzhiyun struct fuse_conn *fc = get_fuse_conn(inode);
1462*4882a593Smuzhiyun int err;
1463*4882a593Smuzhiyun
1464*4882a593Smuzhiyun if (fuse_is_bad(inode))
1465*4882a593Smuzhiyun return -EIO;
1466*4882a593Smuzhiyun
1467*4882a593Smuzhiyun if (fc->no_fsyncdir)
1468*4882a593Smuzhiyun return 0;
1469*4882a593Smuzhiyun
1470*4882a593Smuzhiyun inode_lock(inode);
1471*4882a593Smuzhiyun err = fuse_fsync_common(file, start, end, datasync, FUSE_FSYNCDIR);
1472*4882a593Smuzhiyun if (err == -ENOSYS) {
1473*4882a593Smuzhiyun fc->no_fsyncdir = 1;
1474*4882a593Smuzhiyun err = 0;
1475*4882a593Smuzhiyun }
1476*4882a593Smuzhiyun inode_unlock(inode);
1477*4882a593Smuzhiyun
1478*4882a593Smuzhiyun return err;
1479*4882a593Smuzhiyun }
1480*4882a593Smuzhiyun
fuse_dir_ioctl(struct file * file,unsigned int cmd,unsigned long arg)1481*4882a593Smuzhiyun static long fuse_dir_ioctl(struct file *file, unsigned int cmd,
1482*4882a593Smuzhiyun unsigned long arg)
1483*4882a593Smuzhiyun {
1484*4882a593Smuzhiyun struct fuse_conn *fc = get_fuse_conn(file->f_mapping->host);
1485*4882a593Smuzhiyun
1486*4882a593Smuzhiyun /* FUSE_IOCTL_DIR only supported for API version >= 7.18 */
1487*4882a593Smuzhiyun if (fc->minor < 18)
1488*4882a593Smuzhiyun return -ENOTTY;
1489*4882a593Smuzhiyun
1490*4882a593Smuzhiyun return fuse_ioctl_common(file, cmd, arg, FUSE_IOCTL_DIR);
1491*4882a593Smuzhiyun }
1492*4882a593Smuzhiyun
fuse_dir_compat_ioctl(struct file * file,unsigned int cmd,unsigned long arg)1493*4882a593Smuzhiyun static long fuse_dir_compat_ioctl(struct file *file, unsigned int cmd,
1494*4882a593Smuzhiyun unsigned long arg)
1495*4882a593Smuzhiyun {
1496*4882a593Smuzhiyun struct fuse_conn *fc = get_fuse_conn(file->f_mapping->host);
1497*4882a593Smuzhiyun
1498*4882a593Smuzhiyun if (fc->minor < 18)
1499*4882a593Smuzhiyun return -ENOTTY;
1500*4882a593Smuzhiyun
1501*4882a593Smuzhiyun return fuse_ioctl_common(file, cmd, arg,
1502*4882a593Smuzhiyun FUSE_IOCTL_COMPAT | FUSE_IOCTL_DIR);
1503*4882a593Smuzhiyun }
1504*4882a593Smuzhiyun
update_mtime(unsigned ivalid,bool trust_local_mtime)1505*4882a593Smuzhiyun static bool update_mtime(unsigned ivalid, bool trust_local_mtime)
1506*4882a593Smuzhiyun {
1507*4882a593Smuzhiyun /* Always update if mtime is explicitly set */
1508*4882a593Smuzhiyun if (ivalid & ATTR_MTIME_SET)
1509*4882a593Smuzhiyun return true;
1510*4882a593Smuzhiyun
1511*4882a593Smuzhiyun /* Or if kernel i_mtime is the official one */
1512*4882a593Smuzhiyun if (trust_local_mtime)
1513*4882a593Smuzhiyun return true;
1514*4882a593Smuzhiyun
1515*4882a593Smuzhiyun /* If it's an open(O_TRUNC) or an ftruncate(), don't update */
1516*4882a593Smuzhiyun if ((ivalid & ATTR_SIZE) && (ivalid & (ATTR_OPEN | ATTR_FILE)))
1517*4882a593Smuzhiyun return false;
1518*4882a593Smuzhiyun
1519*4882a593Smuzhiyun /* In all other cases update */
1520*4882a593Smuzhiyun return true;
1521*4882a593Smuzhiyun }
1522*4882a593Smuzhiyun
iattr_to_fattr(struct fuse_conn * fc,struct iattr * iattr,struct fuse_setattr_in * arg,bool trust_local_cmtime)1523*4882a593Smuzhiyun static void iattr_to_fattr(struct fuse_conn *fc, struct iattr *iattr,
1524*4882a593Smuzhiyun struct fuse_setattr_in *arg, bool trust_local_cmtime)
1525*4882a593Smuzhiyun {
1526*4882a593Smuzhiyun unsigned ivalid = iattr->ia_valid;
1527*4882a593Smuzhiyun
1528*4882a593Smuzhiyun if (ivalid & ATTR_MODE)
1529*4882a593Smuzhiyun arg->valid |= FATTR_MODE, arg->mode = iattr->ia_mode;
1530*4882a593Smuzhiyun if (ivalid & ATTR_UID)
1531*4882a593Smuzhiyun arg->valid |= FATTR_UID, arg->uid = from_kuid(fc->user_ns, iattr->ia_uid);
1532*4882a593Smuzhiyun if (ivalid & ATTR_GID)
1533*4882a593Smuzhiyun arg->valid |= FATTR_GID, arg->gid = from_kgid(fc->user_ns, iattr->ia_gid);
1534*4882a593Smuzhiyun if (ivalid & ATTR_SIZE)
1535*4882a593Smuzhiyun arg->valid |= FATTR_SIZE, arg->size = iattr->ia_size;
1536*4882a593Smuzhiyun if (ivalid & ATTR_ATIME) {
1537*4882a593Smuzhiyun arg->valid |= FATTR_ATIME;
1538*4882a593Smuzhiyun arg->atime = iattr->ia_atime.tv_sec;
1539*4882a593Smuzhiyun arg->atimensec = iattr->ia_atime.tv_nsec;
1540*4882a593Smuzhiyun if (!(ivalid & ATTR_ATIME_SET))
1541*4882a593Smuzhiyun arg->valid |= FATTR_ATIME_NOW;
1542*4882a593Smuzhiyun }
1543*4882a593Smuzhiyun if ((ivalid & ATTR_MTIME) && update_mtime(ivalid, trust_local_cmtime)) {
1544*4882a593Smuzhiyun arg->valid |= FATTR_MTIME;
1545*4882a593Smuzhiyun arg->mtime = iattr->ia_mtime.tv_sec;
1546*4882a593Smuzhiyun arg->mtimensec = iattr->ia_mtime.tv_nsec;
1547*4882a593Smuzhiyun if (!(ivalid & ATTR_MTIME_SET) && !trust_local_cmtime)
1548*4882a593Smuzhiyun arg->valid |= FATTR_MTIME_NOW;
1549*4882a593Smuzhiyun }
1550*4882a593Smuzhiyun if ((ivalid & ATTR_CTIME) && trust_local_cmtime) {
1551*4882a593Smuzhiyun arg->valid |= FATTR_CTIME;
1552*4882a593Smuzhiyun arg->ctime = iattr->ia_ctime.tv_sec;
1553*4882a593Smuzhiyun arg->ctimensec = iattr->ia_ctime.tv_nsec;
1554*4882a593Smuzhiyun }
1555*4882a593Smuzhiyun }
1556*4882a593Smuzhiyun
1557*4882a593Smuzhiyun /*
1558*4882a593Smuzhiyun * Prevent concurrent writepages on inode
1559*4882a593Smuzhiyun *
1560*4882a593Smuzhiyun * This is done by adding a negative bias to the inode write counter
1561*4882a593Smuzhiyun * and waiting for all pending writes to finish.
1562*4882a593Smuzhiyun */
fuse_set_nowrite(struct inode * inode)1563*4882a593Smuzhiyun void fuse_set_nowrite(struct inode *inode)
1564*4882a593Smuzhiyun {
1565*4882a593Smuzhiyun struct fuse_inode *fi = get_fuse_inode(inode);
1566*4882a593Smuzhiyun
1567*4882a593Smuzhiyun BUG_ON(!inode_is_locked(inode));
1568*4882a593Smuzhiyun
1569*4882a593Smuzhiyun spin_lock(&fi->lock);
1570*4882a593Smuzhiyun BUG_ON(fi->writectr < 0);
1571*4882a593Smuzhiyun fi->writectr += FUSE_NOWRITE;
1572*4882a593Smuzhiyun spin_unlock(&fi->lock);
1573*4882a593Smuzhiyun wait_event(fi->page_waitq, fi->writectr == FUSE_NOWRITE);
1574*4882a593Smuzhiyun }
1575*4882a593Smuzhiyun
1576*4882a593Smuzhiyun /*
1577*4882a593Smuzhiyun * Allow writepages on inode
1578*4882a593Smuzhiyun *
1579*4882a593Smuzhiyun * Remove the bias from the writecounter and send any queued
1580*4882a593Smuzhiyun * writepages.
1581*4882a593Smuzhiyun */
__fuse_release_nowrite(struct inode * inode)1582*4882a593Smuzhiyun static void __fuse_release_nowrite(struct inode *inode)
1583*4882a593Smuzhiyun {
1584*4882a593Smuzhiyun struct fuse_inode *fi = get_fuse_inode(inode);
1585*4882a593Smuzhiyun
1586*4882a593Smuzhiyun BUG_ON(fi->writectr != FUSE_NOWRITE);
1587*4882a593Smuzhiyun fi->writectr = 0;
1588*4882a593Smuzhiyun fuse_flush_writepages(inode);
1589*4882a593Smuzhiyun }
1590*4882a593Smuzhiyun
fuse_release_nowrite(struct inode * inode)1591*4882a593Smuzhiyun void fuse_release_nowrite(struct inode *inode)
1592*4882a593Smuzhiyun {
1593*4882a593Smuzhiyun struct fuse_inode *fi = get_fuse_inode(inode);
1594*4882a593Smuzhiyun
1595*4882a593Smuzhiyun spin_lock(&fi->lock);
1596*4882a593Smuzhiyun __fuse_release_nowrite(inode);
1597*4882a593Smuzhiyun spin_unlock(&fi->lock);
1598*4882a593Smuzhiyun }
1599*4882a593Smuzhiyun
fuse_setattr_fill(struct fuse_conn * fc,struct fuse_args * args,struct inode * inode,struct fuse_setattr_in * inarg_p,struct fuse_attr_out * outarg_p)1600*4882a593Smuzhiyun static void fuse_setattr_fill(struct fuse_conn *fc, struct fuse_args *args,
1601*4882a593Smuzhiyun struct inode *inode,
1602*4882a593Smuzhiyun struct fuse_setattr_in *inarg_p,
1603*4882a593Smuzhiyun struct fuse_attr_out *outarg_p)
1604*4882a593Smuzhiyun {
1605*4882a593Smuzhiyun args->opcode = FUSE_SETATTR;
1606*4882a593Smuzhiyun args->nodeid = get_node_id(inode);
1607*4882a593Smuzhiyun args->in_numargs = 1;
1608*4882a593Smuzhiyun args->in_args[0].size = sizeof(*inarg_p);
1609*4882a593Smuzhiyun args->in_args[0].value = inarg_p;
1610*4882a593Smuzhiyun args->out_numargs = 1;
1611*4882a593Smuzhiyun args->out_args[0].size = sizeof(*outarg_p);
1612*4882a593Smuzhiyun args->out_args[0].value = outarg_p;
1613*4882a593Smuzhiyun }
1614*4882a593Smuzhiyun
1615*4882a593Smuzhiyun /*
1616*4882a593Smuzhiyun * Flush inode->i_mtime to the server
1617*4882a593Smuzhiyun */
fuse_flush_times(struct inode * inode,struct fuse_file * ff)1618*4882a593Smuzhiyun int fuse_flush_times(struct inode *inode, struct fuse_file *ff)
1619*4882a593Smuzhiyun {
1620*4882a593Smuzhiyun struct fuse_mount *fm = get_fuse_mount(inode);
1621*4882a593Smuzhiyun FUSE_ARGS(args);
1622*4882a593Smuzhiyun struct fuse_setattr_in inarg;
1623*4882a593Smuzhiyun struct fuse_attr_out outarg;
1624*4882a593Smuzhiyun
1625*4882a593Smuzhiyun memset(&inarg, 0, sizeof(inarg));
1626*4882a593Smuzhiyun memset(&outarg, 0, sizeof(outarg));
1627*4882a593Smuzhiyun
1628*4882a593Smuzhiyun inarg.valid = FATTR_MTIME;
1629*4882a593Smuzhiyun inarg.mtime = inode->i_mtime.tv_sec;
1630*4882a593Smuzhiyun inarg.mtimensec = inode->i_mtime.tv_nsec;
1631*4882a593Smuzhiyun if (fm->fc->minor >= 23) {
1632*4882a593Smuzhiyun inarg.valid |= FATTR_CTIME;
1633*4882a593Smuzhiyun inarg.ctime = inode->i_ctime.tv_sec;
1634*4882a593Smuzhiyun inarg.ctimensec = inode->i_ctime.tv_nsec;
1635*4882a593Smuzhiyun }
1636*4882a593Smuzhiyun if (ff) {
1637*4882a593Smuzhiyun inarg.valid |= FATTR_FH;
1638*4882a593Smuzhiyun inarg.fh = ff->fh;
1639*4882a593Smuzhiyun }
1640*4882a593Smuzhiyun fuse_setattr_fill(fm->fc, &args, inode, &inarg, &outarg);
1641*4882a593Smuzhiyun
1642*4882a593Smuzhiyun return fuse_simple_request(fm, &args);
1643*4882a593Smuzhiyun }
1644*4882a593Smuzhiyun
1645*4882a593Smuzhiyun /*
1646*4882a593Smuzhiyun * Set attributes, and at the same time refresh them.
1647*4882a593Smuzhiyun *
1648*4882a593Smuzhiyun * Truncation is slightly complicated, because the 'truncate' request
1649*4882a593Smuzhiyun * may fail, in which case we don't want to touch the mapping.
1650*4882a593Smuzhiyun * vmtruncate() doesn't allow for this case, so do the rlimit checking
1651*4882a593Smuzhiyun * and the actual truncation by hand.
1652*4882a593Smuzhiyun */
fuse_do_setattr(struct dentry * dentry,struct iattr * attr,struct file * file)1653*4882a593Smuzhiyun int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
1654*4882a593Smuzhiyun struct file *file)
1655*4882a593Smuzhiyun {
1656*4882a593Smuzhiyun struct inode *inode = d_inode(dentry);
1657*4882a593Smuzhiyun struct fuse_mount *fm = get_fuse_mount(inode);
1658*4882a593Smuzhiyun struct fuse_conn *fc = fm->fc;
1659*4882a593Smuzhiyun struct fuse_inode *fi = get_fuse_inode(inode);
1660*4882a593Smuzhiyun FUSE_ARGS(args);
1661*4882a593Smuzhiyun struct fuse_setattr_in inarg;
1662*4882a593Smuzhiyun struct fuse_attr_out outarg;
1663*4882a593Smuzhiyun bool is_truncate = false;
1664*4882a593Smuzhiyun bool is_wb = fc->writeback_cache;
1665*4882a593Smuzhiyun loff_t oldsize;
1666*4882a593Smuzhiyun int err;
1667*4882a593Smuzhiyun bool trust_local_cmtime = is_wb && S_ISREG(inode->i_mode);
1668*4882a593Smuzhiyun bool fault_blocked = false;
1669*4882a593Smuzhiyun
1670*4882a593Smuzhiyun if (!fc->default_permissions)
1671*4882a593Smuzhiyun attr->ia_valid |= ATTR_FORCE;
1672*4882a593Smuzhiyun
1673*4882a593Smuzhiyun err = setattr_prepare(dentry, attr);
1674*4882a593Smuzhiyun if (err)
1675*4882a593Smuzhiyun return err;
1676*4882a593Smuzhiyun
1677*4882a593Smuzhiyun if (attr->ia_valid & ATTR_SIZE) {
1678*4882a593Smuzhiyun if (WARN_ON(!S_ISREG(inode->i_mode)))
1679*4882a593Smuzhiyun return -EIO;
1680*4882a593Smuzhiyun is_truncate = true;
1681*4882a593Smuzhiyun }
1682*4882a593Smuzhiyun
1683*4882a593Smuzhiyun if (FUSE_IS_DAX(inode) && is_truncate) {
1684*4882a593Smuzhiyun down_write(&fi->i_mmap_sem);
1685*4882a593Smuzhiyun fault_blocked = true;
1686*4882a593Smuzhiyun err = fuse_dax_break_layouts(inode, 0, 0);
1687*4882a593Smuzhiyun if (err) {
1688*4882a593Smuzhiyun up_write(&fi->i_mmap_sem);
1689*4882a593Smuzhiyun return err;
1690*4882a593Smuzhiyun }
1691*4882a593Smuzhiyun }
1692*4882a593Smuzhiyun
1693*4882a593Smuzhiyun if (attr->ia_valid & ATTR_OPEN) {
1694*4882a593Smuzhiyun /* This is coming from open(..., ... | O_TRUNC); */
1695*4882a593Smuzhiyun WARN_ON(!(attr->ia_valid & ATTR_SIZE));
1696*4882a593Smuzhiyun WARN_ON(attr->ia_size != 0);
1697*4882a593Smuzhiyun if (fc->atomic_o_trunc) {
1698*4882a593Smuzhiyun /*
1699*4882a593Smuzhiyun * No need to send request to userspace, since actual
1700*4882a593Smuzhiyun * truncation has already been done by OPEN. But still
1701*4882a593Smuzhiyun * need to truncate page cache.
1702*4882a593Smuzhiyun */
1703*4882a593Smuzhiyun i_size_write(inode, 0);
1704*4882a593Smuzhiyun truncate_pagecache(inode, 0);
1705*4882a593Smuzhiyun goto out;
1706*4882a593Smuzhiyun }
1707*4882a593Smuzhiyun file = NULL;
1708*4882a593Smuzhiyun }
1709*4882a593Smuzhiyun
1710*4882a593Smuzhiyun /* Flush dirty data/metadata before non-truncate SETATTR */
1711*4882a593Smuzhiyun if (is_wb && S_ISREG(inode->i_mode) &&
1712*4882a593Smuzhiyun attr->ia_valid &
1713*4882a593Smuzhiyun (ATTR_MODE | ATTR_UID | ATTR_GID | ATTR_MTIME_SET |
1714*4882a593Smuzhiyun ATTR_TIMES_SET)) {
1715*4882a593Smuzhiyun err = write_inode_now(inode, true);
1716*4882a593Smuzhiyun if (err)
1717*4882a593Smuzhiyun return err;
1718*4882a593Smuzhiyun
1719*4882a593Smuzhiyun fuse_set_nowrite(inode);
1720*4882a593Smuzhiyun fuse_release_nowrite(inode);
1721*4882a593Smuzhiyun }
1722*4882a593Smuzhiyun
1723*4882a593Smuzhiyun if (is_truncate) {
1724*4882a593Smuzhiyun fuse_set_nowrite(inode);
1725*4882a593Smuzhiyun set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
1726*4882a593Smuzhiyun if (trust_local_cmtime && attr->ia_size != inode->i_size)
1727*4882a593Smuzhiyun attr->ia_valid |= ATTR_MTIME | ATTR_CTIME;
1728*4882a593Smuzhiyun }
1729*4882a593Smuzhiyun
1730*4882a593Smuzhiyun memset(&inarg, 0, sizeof(inarg));
1731*4882a593Smuzhiyun memset(&outarg, 0, sizeof(outarg));
1732*4882a593Smuzhiyun iattr_to_fattr(fc, attr, &inarg, trust_local_cmtime);
1733*4882a593Smuzhiyun if (file) {
1734*4882a593Smuzhiyun struct fuse_file *ff = file->private_data;
1735*4882a593Smuzhiyun inarg.valid |= FATTR_FH;
1736*4882a593Smuzhiyun inarg.fh = ff->fh;
1737*4882a593Smuzhiyun }
1738*4882a593Smuzhiyun if (attr->ia_valid & ATTR_SIZE) {
1739*4882a593Smuzhiyun /* For mandatory locking in truncate */
1740*4882a593Smuzhiyun inarg.valid |= FATTR_LOCKOWNER;
1741*4882a593Smuzhiyun inarg.lock_owner = fuse_lock_owner_id(fc, current->files);
1742*4882a593Smuzhiyun }
1743*4882a593Smuzhiyun fuse_setattr_fill(fc, &args, inode, &inarg, &outarg);
1744*4882a593Smuzhiyun err = fuse_simple_request(fm, &args);
1745*4882a593Smuzhiyun if (err) {
1746*4882a593Smuzhiyun if (err == -EINTR)
1747*4882a593Smuzhiyun fuse_invalidate_attr(inode);
1748*4882a593Smuzhiyun goto error;
1749*4882a593Smuzhiyun }
1750*4882a593Smuzhiyun
1751*4882a593Smuzhiyun if (fuse_invalid_attr(&outarg.attr) ||
1752*4882a593Smuzhiyun inode_wrong_type(inode, outarg.attr.mode)) {
1753*4882a593Smuzhiyun fuse_make_bad(inode);
1754*4882a593Smuzhiyun err = -EIO;
1755*4882a593Smuzhiyun goto error;
1756*4882a593Smuzhiyun }
1757*4882a593Smuzhiyun
1758*4882a593Smuzhiyun spin_lock(&fi->lock);
1759*4882a593Smuzhiyun /* the kernel maintains i_mtime locally */
1760*4882a593Smuzhiyun if (trust_local_cmtime) {
1761*4882a593Smuzhiyun if (attr->ia_valid & ATTR_MTIME)
1762*4882a593Smuzhiyun inode->i_mtime = attr->ia_mtime;
1763*4882a593Smuzhiyun if (attr->ia_valid & ATTR_CTIME)
1764*4882a593Smuzhiyun inode->i_ctime = attr->ia_ctime;
1765*4882a593Smuzhiyun /* FIXME: clear I_DIRTY_SYNC? */
1766*4882a593Smuzhiyun }
1767*4882a593Smuzhiyun
1768*4882a593Smuzhiyun fuse_change_attributes_common(inode, &outarg.attr,
1769*4882a593Smuzhiyun attr_timeout(&outarg));
1770*4882a593Smuzhiyun oldsize = inode->i_size;
1771*4882a593Smuzhiyun /* see the comment in fuse_change_attributes() */
1772*4882a593Smuzhiyun if (!is_wb || is_truncate || !S_ISREG(inode->i_mode))
1773*4882a593Smuzhiyun i_size_write(inode, outarg.attr.size);
1774*4882a593Smuzhiyun
1775*4882a593Smuzhiyun if (is_truncate) {
1776*4882a593Smuzhiyun /* NOTE: this may release/reacquire fi->lock */
1777*4882a593Smuzhiyun __fuse_release_nowrite(inode);
1778*4882a593Smuzhiyun }
1779*4882a593Smuzhiyun spin_unlock(&fi->lock);
1780*4882a593Smuzhiyun
1781*4882a593Smuzhiyun /*
1782*4882a593Smuzhiyun * Only call invalidate_inode_pages2() after removing
1783*4882a593Smuzhiyun * FUSE_NOWRITE, otherwise fuse_launder_page() would deadlock.
1784*4882a593Smuzhiyun */
1785*4882a593Smuzhiyun if ((is_truncate || !is_wb) &&
1786*4882a593Smuzhiyun S_ISREG(inode->i_mode) && oldsize != outarg.attr.size) {
1787*4882a593Smuzhiyun truncate_pagecache(inode, outarg.attr.size);
1788*4882a593Smuzhiyun invalidate_inode_pages2(inode->i_mapping);
1789*4882a593Smuzhiyun }
1790*4882a593Smuzhiyun
1791*4882a593Smuzhiyun clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
1792*4882a593Smuzhiyun out:
1793*4882a593Smuzhiyun if (fault_blocked)
1794*4882a593Smuzhiyun up_write(&fi->i_mmap_sem);
1795*4882a593Smuzhiyun
1796*4882a593Smuzhiyun return 0;
1797*4882a593Smuzhiyun
1798*4882a593Smuzhiyun error:
1799*4882a593Smuzhiyun if (is_truncate)
1800*4882a593Smuzhiyun fuse_release_nowrite(inode);
1801*4882a593Smuzhiyun
1802*4882a593Smuzhiyun clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
1803*4882a593Smuzhiyun
1804*4882a593Smuzhiyun if (fault_blocked)
1805*4882a593Smuzhiyun up_write(&fi->i_mmap_sem);
1806*4882a593Smuzhiyun return err;
1807*4882a593Smuzhiyun }
1808*4882a593Smuzhiyun
fuse_setattr(struct dentry * entry,struct iattr * attr)1809*4882a593Smuzhiyun static int fuse_setattr(struct dentry *entry, struct iattr *attr)
1810*4882a593Smuzhiyun {
1811*4882a593Smuzhiyun struct inode *inode = d_inode(entry);
1812*4882a593Smuzhiyun struct fuse_conn *fc = get_fuse_conn(inode);
1813*4882a593Smuzhiyun struct file *file = (attr->ia_valid & ATTR_FILE) ? attr->ia_file : NULL;
1814*4882a593Smuzhiyun int ret;
1815*4882a593Smuzhiyun
1816*4882a593Smuzhiyun if (fuse_is_bad(inode))
1817*4882a593Smuzhiyun return -EIO;
1818*4882a593Smuzhiyun
1819*4882a593Smuzhiyun if (!fuse_allow_current_process(get_fuse_conn(inode)))
1820*4882a593Smuzhiyun return -EACCES;
1821*4882a593Smuzhiyun
1822*4882a593Smuzhiyun if (attr->ia_valid & (ATTR_KILL_SUID | ATTR_KILL_SGID)) {
1823*4882a593Smuzhiyun attr->ia_valid &= ~(ATTR_KILL_SUID | ATTR_KILL_SGID |
1824*4882a593Smuzhiyun ATTR_MODE);
1825*4882a593Smuzhiyun
1826*4882a593Smuzhiyun /*
1827*4882a593Smuzhiyun * The only sane way to reliably kill suid/sgid is to do it in
1828*4882a593Smuzhiyun * the userspace filesystem
1829*4882a593Smuzhiyun *
1830*4882a593Smuzhiyun * This should be done on write(), truncate() and chown().
1831*4882a593Smuzhiyun */
1832*4882a593Smuzhiyun if (!fc->handle_killpriv) {
1833*4882a593Smuzhiyun /*
1834*4882a593Smuzhiyun * ia_mode calculation may have used stale i_mode.
1835*4882a593Smuzhiyun * Refresh and recalculate.
1836*4882a593Smuzhiyun */
1837*4882a593Smuzhiyun ret = fuse_do_getattr(inode, NULL, file);
1838*4882a593Smuzhiyun if (ret)
1839*4882a593Smuzhiyun return ret;
1840*4882a593Smuzhiyun
1841*4882a593Smuzhiyun attr->ia_mode = inode->i_mode;
1842*4882a593Smuzhiyun if (inode->i_mode & S_ISUID) {
1843*4882a593Smuzhiyun attr->ia_valid |= ATTR_MODE;
1844*4882a593Smuzhiyun attr->ia_mode &= ~S_ISUID;
1845*4882a593Smuzhiyun }
1846*4882a593Smuzhiyun if ((inode->i_mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
1847*4882a593Smuzhiyun attr->ia_valid |= ATTR_MODE;
1848*4882a593Smuzhiyun attr->ia_mode &= ~S_ISGID;
1849*4882a593Smuzhiyun }
1850*4882a593Smuzhiyun }
1851*4882a593Smuzhiyun }
1852*4882a593Smuzhiyun if (!attr->ia_valid)
1853*4882a593Smuzhiyun return 0;
1854*4882a593Smuzhiyun
1855*4882a593Smuzhiyun ret = fuse_do_setattr(entry, attr, file);
1856*4882a593Smuzhiyun if (!ret) {
1857*4882a593Smuzhiyun /*
1858*4882a593Smuzhiyun * If filesystem supports acls it may have updated acl xattrs in
1859*4882a593Smuzhiyun * the filesystem, so forget cached acls for the inode.
1860*4882a593Smuzhiyun */
1861*4882a593Smuzhiyun if (fc->posix_acl)
1862*4882a593Smuzhiyun forget_all_cached_acls(inode);
1863*4882a593Smuzhiyun
1864*4882a593Smuzhiyun /* Directory mode changed, may need to revalidate access */
1865*4882a593Smuzhiyun if (d_is_dir(entry) && (attr->ia_valid & ATTR_MODE))
1866*4882a593Smuzhiyun fuse_invalidate_entry_cache(entry);
1867*4882a593Smuzhiyun }
1868*4882a593Smuzhiyun return ret;
1869*4882a593Smuzhiyun }
1870*4882a593Smuzhiyun
fuse_getattr(const struct path * path,struct kstat * stat,u32 request_mask,unsigned int flags)1871*4882a593Smuzhiyun static int fuse_getattr(const struct path *path, struct kstat *stat,
1872*4882a593Smuzhiyun u32 request_mask, unsigned int flags)
1873*4882a593Smuzhiyun {
1874*4882a593Smuzhiyun struct inode *inode = d_inode(path->dentry);
1875*4882a593Smuzhiyun struct fuse_conn *fc = get_fuse_conn(inode);
1876*4882a593Smuzhiyun
1877*4882a593Smuzhiyun if (fuse_is_bad(inode))
1878*4882a593Smuzhiyun return -EIO;
1879*4882a593Smuzhiyun
1880*4882a593Smuzhiyun if (!fuse_allow_current_process(fc)) {
1881*4882a593Smuzhiyun if (!request_mask) {
1882*4882a593Smuzhiyun /*
1883*4882a593Smuzhiyun * If user explicitly requested *nothing* then don't
1884*4882a593Smuzhiyun * error out, but return st_dev only.
1885*4882a593Smuzhiyun */
1886*4882a593Smuzhiyun stat->result_mask = 0;
1887*4882a593Smuzhiyun stat->dev = inode->i_sb->s_dev;
1888*4882a593Smuzhiyun return 0;
1889*4882a593Smuzhiyun }
1890*4882a593Smuzhiyun return -EACCES;
1891*4882a593Smuzhiyun }
1892*4882a593Smuzhiyun
1893*4882a593Smuzhiyun return fuse_update_get_attr(inode, NULL, stat, request_mask, flags);
1894*4882a593Smuzhiyun }
1895*4882a593Smuzhiyun
1896*4882a593Smuzhiyun static const struct inode_operations fuse_dir_inode_operations = {
1897*4882a593Smuzhiyun .lookup = fuse_lookup,
1898*4882a593Smuzhiyun .mkdir = fuse_mkdir,
1899*4882a593Smuzhiyun .symlink = fuse_symlink,
1900*4882a593Smuzhiyun .unlink = fuse_unlink,
1901*4882a593Smuzhiyun .rmdir = fuse_rmdir,
1902*4882a593Smuzhiyun .rename = fuse_rename2,
1903*4882a593Smuzhiyun .link = fuse_link,
1904*4882a593Smuzhiyun .setattr = fuse_setattr,
1905*4882a593Smuzhiyun .create = fuse_create,
1906*4882a593Smuzhiyun .atomic_open = fuse_atomic_open,
1907*4882a593Smuzhiyun .mknod = fuse_mknod,
1908*4882a593Smuzhiyun .permission = fuse_permission,
1909*4882a593Smuzhiyun .getattr = fuse_getattr,
1910*4882a593Smuzhiyun .listxattr = fuse_listxattr,
1911*4882a593Smuzhiyun .get_acl = fuse_get_acl,
1912*4882a593Smuzhiyun .set_acl = fuse_set_acl,
1913*4882a593Smuzhiyun };
1914*4882a593Smuzhiyun
1915*4882a593Smuzhiyun static const struct file_operations fuse_dir_operations = {
1916*4882a593Smuzhiyun .llseek = generic_file_llseek,
1917*4882a593Smuzhiyun .read = generic_read_dir,
1918*4882a593Smuzhiyun .iterate_shared = fuse_readdir,
1919*4882a593Smuzhiyun .open = fuse_dir_open,
1920*4882a593Smuzhiyun .release = fuse_dir_release,
1921*4882a593Smuzhiyun .fsync = fuse_dir_fsync,
1922*4882a593Smuzhiyun .unlocked_ioctl = fuse_dir_ioctl,
1923*4882a593Smuzhiyun .compat_ioctl = fuse_dir_compat_ioctl,
1924*4882a593Smuzhiyun };
1925*4882a593Smuzhiyun
1926*4882a593Smuzhiyun static const struct inode_operations fuse_common_inode_operations = {
1927*4882a593Smuzhiyun .setattr = fuse_setattr,
1928*4882a593Smuzhiyun .permission = fuse_permission,
1929*4882a593Smuzhiyun .getattr = fuse_getattr,
1930*4882a593Smuzhiyun .listxattr = fuse_listxattr,
1931*4882a593Smuzhiyun .get_acl = fuse_get_acl,
1932*4882a593Smuzhiyun .set_acl = fuse_set_acl,
1933*4882a593Smuzhiyun };
1934*4882a593Smuzhiyun
1935*4882a593Smuzhiyun static const struct inode_operations fuse_symlink_inode_operations = {
1936*4882a593Smuzhiyun .setattr = fuse_setattr,
1937*4882a593Smuzhiyun .get_link = fuse_get_link,
1938*4882a593Smuzhiyun .getattr = fuse_getattr,
1939*4882a593Smuzhiyun .listxattr = fuse_listxattr,
1940*4882a593Smuzhiyun };
1941*4882a593Smuzhiyun
fuse_init_common(struct inode * inode)1942*4882a593Smuzhiyun void fuse_init_common(struct inode *inode)
1943*4882a593Smuzhiyun {
1944*4882a593Smuzhiyun inode->i_op = &fuse_common_inode_operations;
1945*4882a593Smuzhiyun }
1946*4882a593Smuzhiyun
fuse_init_dir(struct inode * inode)1947*4882a593Smuzhiyun void fuse_init_dir(struct inode *inode)
1948*4882a593Smuzhiyun {
1949*4882a593Smuzhiyun struct fuse_inode *fi = get_fuse_inode(inode);
1950*4882a593Smuzhiyun
1951*4882a593Smuzhiyun inode->i_op = &fuse_dir_inode_operations;
1952*4882a593Smuzhiyun inode->i_fop = &fuse_dir_operations;
1953*4882a593Smuzhiyun
1954*4882a593Smuzhiyun spin_lock_init(&fi->rdc.lock);
1955*4882a593Smuzhiyun fi->rdc.cached = false;
1956*4882a593Smuzhiyun fi->rdc.size = 0;
1957*4882a593Smuzhiyun fi->rdc.pos = 0;
1958*4882a593Smuzhiyun fi->rdc.version = 0;
1959*4882a593Smuzhiyun }
1960*4882a593Smuzhiyun
fuse_symlink_readpage(struct file * null,struct page * page)1961*4882a593Smuzhiyun static int fuse_symlink_readpage(struct file *null, struct page *page)
1962*4882a593Smuzhiyun {
1963*4882a593Smuzhiyun int err = fuse_readlink_page(page->mapping->host, page);
1964*4882a593Smuzhiyun
1965*4882a593Smuzhiyun if (!err)
1966*4882a593Smuzhiyun SetPageUptodate(page);
1967*4882a593Smuzhiyun
1968*4882a593Smuzhiyun unlock_page(page);
1969*4882a593Smuzhiyun
1970*4882a593Smuzhiyun return err;
1971*4882a593Smuzhiyun }
1972*4882a593Smuzhiyun
1973*4882a593Smuzhiyun static const struct address_space_operations fuse_symlink_aops = {
1974*4882a593Smuzhiyun .readpage = fuse_symlink_readpage,
1975*4882a593Smuzhiyun };
1976*4882a593Smuzhiyun
fuse_init_symlink(struct inode * inode)1977*4882a593Smuzhiyun void fuse_init_symlink(struct inode *inode)
1978*4882a593Smuzhiyun {
1979*4882a593Smuzhiyun inode->i_op = &fuse_symlink_inode_operations;
1980*4882a593Smuzhiyun inode->i_data.a_ops = &fuse_symlink_aops;
1981*4882a593Smuzhiyun inode_nohighmem(inode);
1982*4882a593Smuzhiyun }
1983