1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun FUSE: Filesystem in Userspace
3*4882a593Smuzhiyun Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun This program can be distributed under the terms of the GNU GPL.
6*4882a593Smuzhiyun See the file COPYING.
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #ifndef _FS_FUSE_I_H
10*4882a593Smuzhiyun #define _FS_FUSE_I_H
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #ifndef pr_fmt
13*4882a593Smuzhiyun # define pr_fmt(fmt) "fuse: " fmt
14*4882a593Smuzhiyun #endif
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun #include <linux/fuse.h>
17*4882a593Smuzhiyun #include <linux/fs.h>
18*4882a593Smuzhiyun #include <linux/mount.h>
19*4882a593Smuzhiyun #include <linux/wait.h>
20*4882a593Smuzhiyun #include <linux/list.h>
21*4882a593Smuzhiyun #include <linux/spinlock.h>
22*4882a593Smuzhiyun #include <linux/mm.h>
23*4882a593Smuzhiyun #include <linux/backing-dev.h>
24*4882a593Smuzhiyun #include <linux/mutex.h>
25*4882a593Smuzhiyun #include <linux/rwsem.h>
26*4882a593Smuzhiyun #include <linux/rbtree.h>
27*4882a593Smuzhiyun #include <linux/poll.h>
28*4882a593Smuzhiyun #include <linux/workqueue.h>
29*4882a593Smuzhiyun #include <linux/kref.h>
30*4882a593Smuzhiyun #include <linux/xattr.h>
31*4882a593Smuzhiyun #include <linux/pid_namespace.h>
32*4882a593Smuzhiyun #include <linux/refcount.h>
33*4882a593Smuzhiyun #include <linux/user_namespace.h>
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun /** Default max number of pages that can be used in a single read request */
36*4882a593Smuzhiyun #define FUSE_DEFAULT_MAX_PAGES_PER_REQ 32
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun /** Maximum of max_pages received in init_out */
39*4882a593Smuzhiyun #define FUSE_MAX_MAX_PAGES 256
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun /** Bias for fi->writectr, meaning new writepages must not be sent */
42*4882a593Smuzhiyun #define FUSE_NOWRITE INT_MIN
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun /** It could be as large as PATH_MAX, but would that have any uses? */
45*4882a593Smuzhiyun #define FUSE_NAME_MAX 1024
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun /** Number of dentries for each connection in the control filesystem */
48*4882a593Smuzhiyun #define FUSE_CTL_NUM_DENTRIES 5
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun /** List of active connections */
51*4882a593Smuzhiyun extern struct list_head fuse_conn_list;
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun /** Global mutex protecting fuse_conn_list and the control filesystem */
54*4882a593Smuzhiyun extern struct mutex fuse_mutex;
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun /** Module parameters */
57*4882a593Smuzhiyun extern unsigned max_user_bgreq;
58*4882a593Smuzhiyun extern unsigned max_user_congthresh;
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun /* One forget request */
61*4882a593Smuzhiyun struct fuse_forget_link {
62*4882a593Smuzhiyun struct fuse_forget_one forget_one;
63*4882a593Smuzhiyun struct fuse_forget_link *next;
64*4882a593Smuzhiyun };
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun /** FUSE inode */
67*4882a593Smuzhiyun struct fuse_inode {
68*4882a593Smuzhiyun /** Inode data */
69*4882a593Smuzhiyun struct inode inode;
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun /** Unique ID, which identifies the inode between userspace
72*4882a593Smuzhiyun * and kernel */
73*4882a593Smuzhiyun u64 nodeid;
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun /** Number of lookups on this inode */
76*4882a593Smuzhiyun u64 nlookup;
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun /** The request used for sending the FORGET message */
79*4882a593Smuzhiyun struct fuse_forget_link *forget;
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun /** Time in jiffies until the file attributes are valid */
82*4882a593Smuzhiyun u64 i_time;
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun /* Which attributes are invalid */
85*4882a593Smuzhiyun u32 inval_mask;
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun /** The sticky bit in inode->i_mode may have been removed, so
88*4882a593Smuzhiyun preserve the original mode */
89*4882a593Smuzhiyun umode_t orig_i_mode;
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun /** 64 bit inode number */
92*4882a593Smuzhiyun u64 orig_ino;
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun /** Version of last attribute change */
95*4882a593Smuzhiyun u64 attr_version;
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun union {
98*4882a593Smuzhiyun /* Write related fields (regular file only) */
99*4882a593Smuzhiyun struct {
100*4882a593Smuzhiyun /* Files usable in writepage. Protected by fi->lock */
101*4882a593Smuzhiyun struct list_head write_files;
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun /* Writepages pending on truncate or fsync */
104*4882a593Smuzhiyun struct list_head queued_writes;
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun /* Number of sent writes, a negative bias
107*4882a593Smuzhiyun * (FUSE_NOWRITE) means more writes are blocked */
108*4882a593Smuzhiyun int writectr;
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun /* Waitq for writepage completion */
111*4882a593Smuzhiyun wait_queue_head_t page_waitq;
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun /* List of writepage requestst (pending or sent) */
114*4882a593Smuzhiyun struct rb_root writepages;
115*4882a593Smuzhiyun };
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun /* readdir cache (directory only) */
118*4882a593Smuzhiyun struct {
119*4882a593Smuzhiyun /* true if fully cached */
120*4882a593Smuzhiyun bool cached;
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun /* size of cache */
123*4882a593Smuzhiyun loff_t size;
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun /* position at end of cache (position of next entry) */
126*4882a593Smuzhiyun loff_t pos;
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun /* version of the cache */
129*4882a593Smuzhiyun u64 version;
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun /* modification time of directory when cache was
132*4882a593Smuzhiyun * started */
133*4882a593Smuzhiyun struct timespec64 mtime;
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun /* iversion of directory when cache was started */
136*4882a593Smuzhiyun u64 iversion;
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun /* protects above fields */
139*4882a593Smuzhiyun spinlock_t lock;
140*4882a593Smuzhiyun } rdc;
141*4882a593Smuzhiyun };
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun /** Miscellaneous bits describing inode state */
144*4882a593Smuzhiyun unsigned long state;
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun /** Lock for serializing lookup and readdir for back compatibility*/
147*4882a593Smuzhiyun struct mutex mutex;
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun /** Lock to protect write related fields */
150*4882a593Smuzhiyun spinlock_t lock;
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun /**
153*4882a593Smuzhiyun * Can't take inode lock in fault path (leads to circular dependency).
154*4882a593Smuzhiyun * Introduce another semaphore which can be taken in fault path and
155*4882a593Smuzhiyun * then other filesystem paths can take this to block faults.
156*4882a593Smuzhiyun */
157*4882a593Smuzhiyun struct rw_semaphore i_mmap_sem;
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun #ifdef CONFIG_FUSE_DAX
160*4882a593Smuzhiyun /*
161*4882a593Smuzhiyun * Dax specific inode data
162*4882a593Smuzhiyun */
163*4882a593Smuzhiyun struct fuse_inode_dax *dax;
164*4882a593Smuzhiyun #endif
165*4882a593Smuzhiyun };
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun /** FUSE inode state bits */
168*4882a593Smuzhiyun enum {
169*4882a593Smuzhiyun /** Advise readdirplus */
170*4882a593Smuzhiyun FUSE_I_ADVISE_RDPLUS,
171*4882a593Smuzhiyun /** Initialized with readdirplus */
172*4882a593Smuzhiyun FUSE_I_INIT_RDPLUS,
173*4882a593Smuzhiyun /** An operation changing file size is in progress */
174*4882a593Smuzhiyun FUSE_I_SIZE_UNSTABLE,
175*4882a593Smuzhiyun /* Bad inode */
176*4882a593Smuzhiyun FUSE_I_BAD,
177*4882a593Smuzhiyun };
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun struct fuse_conn;
180*4882a593Smuzhiyun struct fuse_mount;
181*4882a593Smuzhiyun struct fuse_release_args;
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun /**
184*4882a593Smuzhiyun * Reference to lower filesystem file for read/write operations handled in
185*4882a593Smuzhiyun * passthrough mode.
186*4882a593Smuzhiyun * This struct also tracks the credentials to be used for handling read/write
187*4882a593Smuzhiyun * operations.
188*4882a593Smuzhiyun */
189*4882a593Smuzhiyun struct fuse_passthrough {
190*4882a593Smuzhiyun struct file *filp;
191*4882a593Smuzhiyun struct cred *cred;
192*4882a593Smuzhiyun };
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun /** FUSE specific file data */
195*4882a593Smuzhiyun struct fuse_file {
196*4882a593Smuzhiyun /** Fuse connection for this file */
197*4882a593Smuzhiyun struct fuse_mount *fm;
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun /* Argument space reserved for release */
200*4882a593Smuzhiyun struct fuse_release_args *release_args;
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun /** Kernel file handle guaranteed to be unique */
203*4882a593Smuzhiyun u64 kh;
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun /** File handle used by userspace */
206*4882a593Smuzhiyun u64 fh;
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun /** Node id of this file */
209*4882a593Smuzhiyun u64 nodeid;
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun /** Refcount */
212*4882a593Smuzhiyun refcount_t count;
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun /** FOPEN_* flags returned by open */
215*4882a593Smuzhiyun u32 open_flags;
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun /** Entry on inode's write_files list */
218*4882a593Smuzhiyun struct list_head write_entry;
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun /* Readdir related */
221*4882a593Smuzhiyun struct {
222*4882a593Smuzhiyun /*
223*4882a593Smuzhiyun * Protects below fields against (crazy) parallel readdir on
224*4882a593Smuzhiyun * same open file. Uncontended in the normal case.
225*4882a593Smuzhiyun */
226*4882a593Smuzhiyun struct mutex lock;
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun /* Dir stream position */
229*4882a593Smuzhiyun loff_t pos;
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun /* Offset in cache */
232*4882a593Smuzhiyun loff_t cache_off;
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun /* Version of cache we are reading */
235*4882a593Smuzhiyun u64 version;
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun } readdir;
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun /** Container for data related to the passthrough functionality */
240*4882a593Smuzhiyun struct fuse_passthrough passthrough;
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun /** RB node to be linked on fuse_conn->polled_files */
243*4882a593Smuzhiyun struct rb_node polled_node;
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun /** Wait queue head for poll */
246*4882a593Smuzhiyun wait_queue_head_t poll_wait;
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun /** Has flock been performed on this file? */
249*4882a593Smuzhiyun bool flock:1;
250*4882a593Smuzhiyun };
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun /** One input argument of a request */
253*4882a593Smuzhiyun struct fuse_in_arg {
254*4882a593Smuzhiyun unsigned size;
255*4882a593Smuzhiyun const void *value;
256*4882a593Smuzhiyun };
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun /** One output argument of a request */
259*4882a593Smuzhiyun struct fuse_arg {
260*4882a593Smuzhiyun unsigned size;
261*4882a593Smuzhiyun void *value;
262*4882a593Smuzhiyun };
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun /** FUSE page descriptor */
265*4882a593Smuzhiyun struct fuse_page_desc {
266*4882a593Smuzhiyun unsigned int length;
267*4882a593Smuzhiyun unsigned int offset;
268*4882a593Smuzhiyun };
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun struct fuse_args {
271*4882a593Smuzhiyun uint64_t nodeid;
272*4882a593Smuzhiyun uint32_t opcode;
273*4882a593Smuzhiyun unsigned short in_numargs;
274*4882a593Smuzhiyun unsigned short out_numargs;
275*4882a593Smuzhiyun bool force:1;
276*4882a593Smuzhiyun bool noreply:1;
277*4882a593Smuzhiyun bool nocreds:1;
278*4882a593Smuzhiyun bool in_pages:1;
279*4882a593Smuzhiyun bool out_pages:1;
280*4882a593Smuzhiyun bool user_pages:1;
281*4882a593Smuzhiyun bool out_argvar:1;
282*4882a593Smuzhiyun bool page_zeroing:1;
283*4882a593Smuzhiyun bool page_replace:1;
284*4882a593Smuzhiyun bool may_block:1;
285*4882a593Smuzhiyun struct fuse_in_arg in_args[3];
286*4882a593Smuzhiyun struct fuse_arg out_args[2];
287*4882a593Smuzhiyun void (*end)(struct fuse_mount *fm, struct fuse_args *args, int error);
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun /* Path used for completing d_canonical_path */
290*4882a593Smuzhiyun struct path *canonical_path;
291*4882a593Smuzhiyun };
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun struct fuse_args_pages {
294*4882a593Smuzhiyun struct fuse_args args;
295*4882a593Smuzhiyun struct page **pages;
296*4882a593Smuzhiyun struct fuse_page_desc *descs;
297*4882a593Smuzhiyun unsigned int num_pages;
298*4882a593Smuzhiyun };
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun #define FUSE_ARGS(args) struct fuse_args args = {}
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun /** The request IO state (for asynchronous processing) */
303*4882a593Smuzhiyun struct fuse_io_priv {
304*4882a593Smuzhiyun struct kref refcnt;
305*4882a593Smuzhiyun int async;
306*4882a593Smuzhiyun spinlock_t lock;
307*4882a593Smuzhiyun unsigned reqs;
308*4882a593Smuzhiyun ssize_t bytes;
309*4882a593Smuzhiyun size_t size;
310*4882a593Smuzhiyun __u64 offset;
311*4882a593Smuzhiyun bool write;
312*4882a593Smuzhiyun bool should_dirty;
313*4882a593Smuzhiyun int err;
314*4882a593Smuzhiyun struct kiocb *iocb;
315*4882a593Smuzhiyun struct completion *done;
316*4882a593Smuzhiyun bool blocking;
317*4882a593Smuzhiyun };
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun #define FUSE_IO_PRIV_SYNC(i) \
320*4882a593Smuzhiyun { \
321*4882a593Smuzhiyun .refcnt = KREF_INIT(1), \
322*4882a593Smuzhiyun .async = 0, \
323*4882a593Smuzhiyun .iocb = i, \
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun /**
327*4882a593Smuzhiyun * Request flags
328*4882a593Smuzhiyun *
329*4882a593Smuzhiyun * FR_ISREPLY: set if the request has reply
330*4882a593Smuzhiyun * FR_FORCE: force sending of the request even if interrupted
331*4882a593Smuzhiyun * FR_BACKGROUND: request is sent in the background
332*4882a593Smuzhiyun * FR_WAITING: request is counted as "waiting"
333*4882a593Smuzhiyun * FR_ABORTED: the request was aborted
334*4882a593Smuzhiyun * FR_INTERRUPTED: the request has been interrupted
335*4882a593Smuzhiyun * FR_LOCKED: data is being copied to/from the request
336*4882a593Smuzhiyun * FR_PENDING: request is not yet in userspace
337*4882a593Smuzhiyun * FR_SENT: request is in userspace, waiting for an answer
338*4882a593Smuzhiyun * FR_FINISHED: request is finished
339*4882a593Smuzhiyun * FR_PRIVATE: request is on private list
340*4882a593Smuzhiyun * FR_ASYNC: request is asynchronous
341*4882a593Smuzhiyun */
342*4882a593Smuzhiyun enum fuse_req_flag {
343*4882a593Smuzhiyun FR_ISREPLY,
344*4882a593Smuzhiyun FR_FORCE,
345*4882a593Smuzhiyun FR_BACKGROUND,
346*4882a593Smuzhiyun FR_WAITING,
347*4882a593Smuzhiyun FR_ABORTED,
348*4882a593Smuzhiyun FR_INTERRUPTED,
349*4882a593Smuzhiyun FR_LOCKED,
350*4882a593Smuzhiyun FR_PENDING,
351*4882a593Smuzhiyun FR_SENT,
352*4882a593Smuzhiyun FR_FINISHED,
353*4882a593Smuzhiyun FR_PRIVATE,
354*4882a593Smuzhiyun FR_ASYNC,
355*4882a593Smuzhiyun };
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun /**
358*4882a593Smuzhiyun * A request to the client
359*4882a593Smuzhiyun *
360*4882a593Smuzhiyun * .waitq.lock protects the following fields:
361*4882a593Smuzhiyun * - FR_ABORTED
362*4882a593Smuzhiyun * - FR_LOCKED (may also be modified under fc->lock, tested under both)
363*4882a593Smuzhiyun */
364*4882a593Smuzhiyun struct fuse_req {
365*4882a593Smuzhiyun /** This can be on either pending processing or io lists in
366*4882a593Smuzhiyun fuse_conn */
367*4882a593Smuzhiyun struct list_head list;
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun /** Entry on the interrupts list */
370*4882a593Smuzhiyun struct list_head intr_entry;
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun /* Input/output arguments */
373*4882a593Smuzhiyun struct fuse_args *args;
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun /** refcount */
376*4882a593Smuzhiyun refcount_t count;
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun /* Request flags, updated with test/set/clear_bit() */
379*4882a593Smuzhiyun unsigned long flags;
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun /* The request input header */
382*4882a593Smuzhiyun struct {
383*4882a593Smuzhiyun struct fuse_in_header h;
384*4882a593Smuzhiyun } in;
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun /* The request output header */
387*4882a593Smuzhiyun struct {
388*4882a593Smuzhiyun struct fuse_out_header h;
389*4882a593Smuzhiyun } out;
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun /** Used to wake up the task waiting for completion of request*/
392*4882a593Smuzhiyun wait_queue_head_t waitq;
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun /** virtio-fs's physically contiguous buffer for in and out args */
395*4882a593Smuzhiyun void *argbuf;
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun /** fuse_mount this request belongs to */
398*4882a593Smuzhiyun struct fuse_mount *fm;
399*4882a593Smuzhiyun };
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun struct fuse_iqueue;
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun /**
404*4882a593Smuzhiyun * Input queue callbacks
405*4882a593Smuzhiyun *
406*4882a593Smuzhiyun * Input queue signalling is device-specific. For example, the /dev/fuse file
407*4882a593Smuzhiyun * uses fiq->waitq and fasync to wake processes that are waiting on queue
408*4882a593Smuzhiyun * readiness. These callbacks allow other device types to respond to input
409*4882a593Smuzhiyun * queue activity.
410*4882a593Smuzhiyun */
411*4882a593Smuzhiyun struct fuse_iqueue_ops {
412*4882a593Smuzhiyun /**
413*4882a593Smuzhiyun * Signal that a forget has been queued
414*4882a593Smuzhiyun */
415*4882a593Smuzhiyun void (*wake_forget_and_unlock)(struct fuse_iqueue *fiq, bool sync)
416*4882a593Smuzhiyun __releases(fiq->lock);
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun /**
419*4882a593Smuzhiyun * Signal that an INTERRUPT request has been queued
420*4882a593Smuzhiyun */
421*4882a593Smuzhiyun void (*wake_interrupt_and_unlock)(struct fuse_iqueue *fiq, bool sync)
422*4882a593Smuzhiyun __releases(fiq->lock);
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun /**
425*4882a593Smuzhiyun * Signal that a request has been queued
426*4882a593Smuzhiyun */
427*4882a593Smuzhiyun void (*wake_pending_and_unlock)(struct fuse_iqueue *fiq, bool sync)
428*4882a593Smuzhiyun __releases(fiq->lock);
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun /**
431*4882a593Smuzhiyun * Clean up when fuse_iqueue is destroyed
432*4882a593Smuzhiyun */
433*4882a593Smuzhiyun void (*release)(struct fuse_iqueue *fiq);
434*4882a593Smuzhiyun };
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun /** /dev/fuse input queue operations */
437*4882a593Smuzhiyun extern const struct fuse_iqueue_ops fuse_dev_fiq_ops;
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun struct fuse_iqueue {
440*4882a593Smuzhiyun /** Connection established */
441*4882a593Smuzhiyun unsigned connected;
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun /** Lock protecting accesses to members of this structure */
444*4882a593Smuzhiyun spinlock_t lock;
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun /** Readers of the connection are waiting on this */
447*4882a593Smuzhiyun wait_queue_head_t waitq;
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun /** The next unique request id */
450*4882a593Smuzhiyun u64 reqctr;
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun /** The list of pending requests */
453*4882a593Smuzhiyun struct list_head pending;
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun /** Pending interrupts */
456*4882a593Smuzhiyun struct list_head interrupts;
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun /** Queue of pending forgets */
459*4882a593Smuzhiyun struct fuse_forget_link forget_list_head;
460*4882a593Smuzhiyun struct fuse_forget_link *forget_list_tail;
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun /** Batching of FORGET requests (positive indicates FORGET batch) */
463*4882a593Smuzhiyun int forget_batch;
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun /** O_ASYNC requests */
466*4882a593Smuzhiyun struct fasync_struct *fasync;
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun /** Device-specific callbacks */
469*4882a593Smuzhiyun const struct fuse_iqueue_ops *ops;
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun /** Device-specific state */
472*4882a593Smuzhiyun void *priv;
473*4882a593Smuzhiyun };
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun #define FUSE_PQ_HASH_BITS 8
476*4882a593Smuzhiyun #define FUSE_PQ_HASH_SIZE (1 << FUSE_PQ_HASH_BITS)
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun struct fuse_pqueue {
479*4882a593Smuzhiyun /** Connection established */
480*4882a593Smuzhiyun unsigned connected;
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun /** Lock protecting accessess to members of this structure */
483*4882a593Smuzhiyun spinlock_t lock;
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun /** Hash table of requests being processed */
486*4882a593Smuzhiyun struct list_head *processing;
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun /** The list of requests under I/O */
489*4882a593Smuzhiyun struct list_head io;
490*4882a593Smuzhiyun };
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun /**
493*4882a593Smuzhiyun * Fuse device instance
494*4882a593Smuzhiyun */
495*4882a593Smuzhiyun struct fuse_dev {
496*4882a593Smuzhiyun /** Fuse connection for this device */
497*4882a593Smuzhiyun struct fuse_conn *fc;
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun /** Processing queue */
500*4882a593Smuzhiyun struct fuse_pqueue pq;
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun /** list entry on fc->devices */
503*4882a593Smuzhiyun struct list_head entry;
504*4882a593Smuzhiyun };
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun struct fuse_fs_context {
507*4882a593Smuzhiyun int fd;
508*4882a593Smuzhiyun unsigned int rootmode;
509*4882a593Smuzhiyun kuid_t user_id;
510*4882a593Smuzhiyun kgid_t group_id;
511*4882a593Smuzhiyun bool is_bdev:1;
512*4882a593Smuzhiyun bool fd_present:1;
513*4882a593Smuzhiyun bool rootmode_present:1;
514*4882a593Smuzhiyun bool user_id_present:1;
515*4882a593Smuzhiyun bool group_id_present:1;
516*4882a593Smuzhiyun bool default_permissions:1;
517*4882a593Smuzhiyun bool allow_other:1;
518*4882a593Smuzhiyun bool destroy:1;
519*4882a593Smuzhiyun bool no_control:1;
520*4882a593Smuzhiyun bool no_force_umount:1;
521*4882a593Smuzhiyun bool legacy_opts_show:1;
522*4882a593Smuzhiyun bool dax:1;
523*4882a593Smuzhiyun unsigned int max_read;
524*4882a593Smuzhiyun unsigned int blksize;
525*4882a593Smuzhiyun const char *subtype;
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun /* DAX device, may be NULL */
528*4882a593Smuzhiyun struct dax_device *dax_dev;
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun /* fuse_dev pointer to fill in, should contain NULL on entry */
531*4882a593Smuzhiyun void **fudptr;
532*4882a593Smuzhiyun };
533*4882a593Smuzhiyun
534*4882a593Smuzhiyun /**
535*4882a593Smuzhiyun * A Fuse connection.
536*4882a593Smuzhiyun *
537*4882a593Smuzhiyun * This structure is created, when the root filesystem is mounted, and
538*4882a593Smuzhiyun * is destroyed, when the client device is closed and the last
539*4882a593Smuzhiyun * fuse_mount is destroyed.
540*4882a593Smuzhiyun */
541*4882a593Smuzhiyun struct fuse_conn {
542*4882a593Smuzhiyun /** Lock protecting accessess to members of this structure */
543*4882a593Smuzhiyun spinlock_t lock;
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun /** Refcount */
546*4882a593Smuzhiyun refcount_t count;
547*4882a593Smuzhiyun
548*4882a593Smuzhiyun /** Number of fuse_dev's */
549*4882a593Smuzhiyun atomic_t dev_count;
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun struct rcu_head rcu;
552*4882a593Smuzhiyun
553*4882a593Smuzhiyun /** The user id for this mount */
554*4882a593Smuzhiyun kuid_t user_id;
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun /** The group id for this mount */
557*4882a593Smuzhiyun kgid_t group_id;
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun /** The pid namespace for this mount */
560*4882a593Smuzhiyun struct pid_namespace *pid_ns;
561*4882a593Smuzhiyun
562*4882a593Smuzhiyun /** The user namespace for this mount */
563*4882a593Smuzhiyun struct user_namespace *user_ns;
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun /** Maximum read size */
566*4882a593Smuzhiyun unsigned max_read;
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun /** Maximum write size */
569*4882a593Smuzhiyun unsigned max_write;
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun /** Maxmum number of pages that can be used in a single request */
572*4882a593Smuzhiyun unsigned int max_pages;
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun /** Input queue */
575*4882a593Smuzhiyun struct fuse_iqueue iq;
576*4882a593Smuzhiyun
577*4882a593Smuzhiyun /** The next unique kernel file handle */
578*4882a593Smuzhiyun atomic64_t khctr;
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun /** rbtree of fuse_files waiting for poll events indexed by ph */
581*4882a593Smuzhiyun struct rb_root polled_files;
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun /** Maximum number of outstanding background requests */
584*4882a593Smuzhiyun unsigned max_background;
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun /** Number of background requests at which congestion starts */
587*4882a593Smuzhiyun unsigned congestion_threshold;
588*4882a593Smuzhiyun
589*4882a593Smuzhiyun /** Number of requests currently in the background */
590*4882a593Smuzhiyun unsigned num_background;
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun /** Number of background requests currently queued for userspace */
593*4882a593Smuzhiyun unsigned active_background;
594*4882a593Smuzhiyun
595*4882a593Smuzhiyun /** The list of background requests set aside for later queuing */
596*4882a593Smuzhiyun struct list_head bg_queue;
597*4882a593Smuzhiyun
598*4882a593Smuzhiyun /** Protects: max_background, congestion_threshold, num_background,
599*4882a593Smuzhiyun * active_background, bg_queue, blocked */
600*4882a593Smuzhiyun spinlock_t bg_lock;
601*4882a593Smuzhiyun
602*4882a593Smuzhiyun /** Flag indicating that INIT reply has been received. Allocating
603*4882a593Smuzhiyun * any fuse request will be suspended until the flag is set */
604*4882a593Smuzhiyun int initialized;
605*4882a593Smuzhiyun
606*4882a593Smuzhiyun /** Flag indicating if connection is blocked. This will be
607*4882a593Smuzhiyun the case before the INIT reply is received, and if there
608*4882a593Smuzhiyun are too many outstading backgrounds requests */
609*4882a593Smuzhiyun int blocked;
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun /** waitq for blocked connection */
612*4882a593Smuzhiyun wait_queue_head_t blocked_waitq;
613*4882a593Smuzhiyun
614*4882a593Smuzhiyun /** Connection established, cleared on umount, connection
615*4882a593Smuzhiyun abort and device release */
616*4882a593Smuzhiyun unsigned connected;
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun /** Connection aborted via sysfs */
619*4882a593Smuzhiyun bool aborted;
620*4882a593Smuzhiyun
621*4882a593Smuzhiyun /** Connection failed (version mismatch). Cannot race with
622*4882a593Smuzhiyun setting other bitfields since it is only set once in INIT
623*4882a593Smuzhiyun reply, before any other request, and never cleared */
624*4882a593Smuzhiyun unsigned conn_error:1;
625*4882a593Smuzhiyun
626*4882a593Smuzhiyun /** Connection successful. Only set in INIT */
627*4882a593Smuzhiyun unsigned conn_init:1;
628*4882a593Smuzhiyun
629*4882a593Smuzhiyun /** Do readpages asynchronously? Only set in INIT */
630*4882a593Smuzhiyun unsigned async_read:1;
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun /** Return an unique read error after abort. Only set in INIT */
633*4882a593Smuzhiyun unsigned abort_err:1;
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun /** Do not send separate SETATTR request before open(O_TRUNC) */
636*4882a593Smuzhiyun unsigned atomic_o_trunc:1;
637*4882a593Smuzhiyun
638*4882a593Smuzhiyun /** Filesystem supports NFS exporting. Only set in INIT */
639*4882a593Smuzhiyun unsigned export_support:1;
640*4882a593Smuzhiyun
641*4882a593Smuzhiyun /** write-back cache policy (default is write-through) */
642*4882a593Smuzhiyun unsigned writeback_cache:1;
643*4882a593Smuzhiyun
644*4882a593Smuzhiyun /** allow parallel lookups and readdir (default is serialized) */
645*4882a593Smuzhiyun unsigned parallel_dirops:1;
646*4882a593Smuzhiyun
647*4882a593Smuzhiyun /** handle fs handles killing suid/sgid/cap on write/chown/trunc */
648*4882a593Smuzhiyun unsigned handle_killpriv:1;
649*4882a593Smuzhiyun
650*4882a593Smuzhiyun /** cache READLINK responses in page cache */
651*4882a593Smuzhiyun unsigned cache_symlinks:1;
652*4882a593Smuzhiyun
653*4882a593Smuzhiyun /* show legacy mount options */
654*4882a593Smuzhiyun unsigned int legacy_opts_show:1;
655*4882a593Smuzhiyun
656*4882a593Smuzhiyun /*
657*4882a593Smuzhiyun * The following bitfields are only for optimization purposes
658*4882a593Smuzhiyun * and hence races in setting them will not cause malfunction
659*4882a593Smuzhiyun */
660*4882a593Smuzhiyun
661*4882a593Smuzhiyun /** Is open/release not implemented by fs? */
662*4882a593Smuzhiyun unsigned no_open:1;
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun /** Is opendir/releasedir not implemented by fs? */
665*4882a593Smuzhiyun unsigned no_opendir:1;
666*4882a593Smuzhiyun
667*4882a593Smuzhiyun /** Is fsync not implemented by fs? */
668*4882a593Smuzhiyun unsigned no_fsync:1;
669*4882a593Smuzhiyun
670*4882a593Smuzhiyun /** Is fsyncdir not implemented by fs? */
671*4882a593Smuzhiyun unsigned no_fsyncdir:1;
672*4882a593Smuzhiyun
673*4882a593Smuzhiyun /** Is flush not implemented by fs? */
674*4882a593Smuzhiyun unsigned no_flush:1;
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun /** Is setxattr not implemented by fs? */
677*4882a593Smuzhiyun unsigned no_setxattr:1;
678*4882a593Smuzhiyun
679*4882a593Smuzhiyun /** Is getxattr not implemented by fs? */
680*4882a593Smuzhiyun unsigned no_getxattr:1;
681*4882a593Smuzhiyun
682*4882a593Smuzhiyun /** Is listxattr not implemented by fs? */
683*4882a593Smuzhiyun unsigned no_listxattr:1;
684*4882a593Smuzhiyun
685*4882a593Smuzhiyun /** Is removexattr not implemented by fs? */
686*4882a593Smuzhiyun unsigned no_removexattr:1;
687*4882a593Smuzhiyun
688*4882a593Smuzhiyun /** Are posix file locking primitives not implemented by fs? */
689*4882a593Smuzhiyun unsigned no_lock:1;
690*4882a593Smuzhiyun
691*4882a593Smuzhiyun /** Is access not implemented by fs? */
692*4882a593Smuzhiyun unsigned no_access:1;
693*4882a593Smuzhiyun
694*4882a593Smuzhiyun /** Is create not implemented by fs? */
695*4882a593Smuzhiyun unsigned no_create:1;
696*4882a593Smuzhiyun
697*4882a593Smuzhiyun /** Is interrupt not implemented by fs? */
698*4882a593Smuzhiyun unsigned no_interrupt:1;
699*4882a593Smuzhiyun
700*4882a593Smuzhiyun /** Is bmap not implemented by fs? */
701*4882a593Smuzhiyun unsigned no_bmap:1;
702*4882a593Smuzhiyun
703*4882a593Smuzhiyun /** Is poll not implemented by fs? */
704*4882a593Smuzhiyun unsigned no_poll:1;
705*4882a593Smuzhiyun
706*4882a593Smuzhiyun /** Do multi-page cached writes */
707*4882a593Smuzhiyun unsigned big_writes:1;
708*4882a593Smuzhiyun
709*4882a593Smuzhiyun /** Don't apply umask to creation modes */
710*4882a593Smuzhiyun unsigned dont_mask:1;
711*4882a593Smuzhiyun
712*4882a593Smuzhiyun /** Are BSD file locking primitives not implemented by fs? */
713*4882a593Smuzhiyun unsigned no_flock:1;
714*4882a593Smuzhiyun
715*4882a593Smuzhiyun /** Is fallocate not implemented by fs? */
716*4882a593Smuzhiyun unsigned no_fallocate:1;
717*4882a593Smuzhiyun
718*4882a593Smuzhiyun /** Is rename with flags implemented by fs? */
719*4882a593Smuzhiyun unsigned no_rename2:1;
720*4882a593Smuzhiyun
721*4882a593Smuzhiyun /** Use enhanced/automatic page cache invalidation. */
722*4882a593Smuzhiyun unsigned auto_inval_data:1;
723*4882a593Smuzhiyun
724*4882a593Smuzhiyun /** Filesystem is fully reponsible for page cache invalidation. */
725*4882a593Smuzhiyun unsigned explicit_inval_data:1;
726*4882a593Smuzhiyun
727*4882a593Smuzhiyun /** Does the filesystem support readdirplus? */
728*4882a593Smuzhiyun unsigned do_readdirplus:1;
729*4882a593Smuzhiyun
730*4882a593Smuzhiyun /** Does the filesystem want adaptive readdirplus? */
731*4882a593Smuzhiyun unsigned readdirplus_auto:1;
732*4882a593Smuzhiyun
733*4882a593Smuzhiyun /** Does the filesystem support asynchronous direct-IO submission? */
734*4882a593Smuzhiyun unsigned async_dio:1;
735*4882a593Smuzhiyun
736*4882a593Smuzhiyun /** Is lseek not implemented by fs? */
737*4882a593Smuzhiyun unsigned no_lseek:1;
738*4882a593Smuzhiyun
739*4882a593Smuzhiyun /** Does the filesystem support posix acls? */
740*4882a593Smuzhiyun unsigned posix_acl:1;
741*4882a593Smuzhiyun
742*4882a593Smuzhiyun /** Check permissions based on the file mode or not? */
743*4882a593Smuzhiyun unsigned default_permissions:1;
744*4882a593Smuzhiyun
745*4882a593Smuzhiyun /** Allow other than the mounter user to access the filesystem ? */
746*4882a593Smuzhiyun unsigned allow_other:1;
747*4882a593Smuzhiyun
748*4882a593Smuzhiyun /** Does the filesystem support copy_file_range? */
749*4882a593Smuzhiyun unsigned no_copy_file_range:1;
750*4882a593Smuzhiyun
751*4882a593Smuzhiyun /* Send DESTROY request */
752*4882a593Smuzhiyun unsigned int destroy:1;
753*4882a593Smuzhiyun
754*4882a593Smuzhiyun /* Delete dentries that have gone stale */
755*4882a593Smuzhiyun unsigned int delete_stale:1;
756*4882a593Smuzhiyun
757*4882a593Smuzhiyun /** Do not create entry in fusectl fs */
758*4882a593Smuzhiyun unsigned int no_control:1;
759*4882a593Smuzhiyun
760*4882a593Smuzhiyun /** Do not allow MNT_FORCE umount */
761*4882a593Smuzhiyun unsigned int no_force_umount:1;
762*4882a593Smuzhiyun
763*4882a593Smuzhiyun /* Auto-mount submounts announced by the server */
764*4882a593Smuzhiyun unsigned int auto_submounts:1;
765*4882a593Smuzhiyun
766*4882a593Smuzhiyun /** Passthrough mode for read/write IO */
767*4882a593Smuzhiyun unsigned int passthrough:1;
768*4882a593Smuzhiyun
769*4882a593Smuzhiyun /** The number of requests waiting for completion */
770*4882a593Smuzhiyun atomic_t num_waiting;
771*4882a593Smuzhiyun
772*4882a593Smuzhiyun /** Negotiated minor version */
773*4882a593Smuzhiyun unsigned minor;
774*4882a593Smuzhiyun
775*4882a593Smuzhiyun /** Entry on the fuse_mount_list */
776*4882a593Smuzhiyun struct list_head entry;
777*4882a593Smuzhiyun
778*4882a593Smuzhiyun /** Device ID from the root super block */
779*4882a593Smuzhiyun dev_t dev;
780*4882a593Smuzhiyun
781*4882a593Smuzhiyun /** Dentries in the control filesystem */
782*4882a593Smuzhiyun struct dentry *ctl_dentry[FUSE_CTL_NUM_DENTRIES];
783*4882a593Smuzhiyun
784*4882a593Smuzhiyun /** number of dentries used in the above array */
785*4882a593Smuzhiyun int ctl_ndents;
786*4882a593Smuzhiyun
787*4882a593Smuzhiyun /** Key for lock owner ID scrambling */
788*4882a593Smuzhiyun u32 scramble_key[4];
789*4882a593Smuzhiyun
790*4882a593Smuzhiyun /** Version counter for attribute changes */
791*4882a593Smuzhiyun atomic64_t attr_version;
792*4882a593Smuzhiyun
793*4882a593Smuzhiyun /** Called on final put */
794*4882a593Smuzhiyun void (*release)(struct fuse_conn *);
795*4882a593Smuzhiyun
796*4882a593Smuzhiyun /**
797*4882a593Smuzhiyun * Read/write semaphore to hold when accessing the sb of any
798*4882a593Smuzhiyun * fuse_mount belonging to this connection
799*4882a593Smuzhiyun */
800*4882a593Smuzhiyun struct rw_semaphore killsb;
801*4882a593Smuzhiyun
802*4882a593Smuzhiyun /** List of device instances belonging to this connection */
803*4882a593Smuzhiyun struct list_head devices;
804*4882a593Smuzhiyun
805*4882a593Smuzhiyun #ifdef CONFIG_FUSE_DAX
806*4882a593Smuzhiyun /* Dax specific conn data, non-NULL if DAX is enabled */
807*4882a593Smuzhiyun struct fuse_conn_dax *dax;
808*4882a593Smuzhiyun #endif
809*4882a593Smuzhiyun
810*4882a593Smuzhiyun /** List of filesystems using this connection */
811*4882a593Smuzhiyun struct list_head mounts;
812*4882a593Smuzhiyun
813*4882a593Smuzhiyun /** IDR for passthrough requests */
814*4882a593Smuzhiyun struct idr passthrough_req;
815*4882a593Smuzhiyun
816*4882a593Smuzhiyun /** Protects passthrough_req */
817*4882a593Smuzhiyun spinlock_t passthrough_req_lock;
818*4882a593Smuzhiyun };
819*4882a593Smuzhiyun
820*4882a593Smuzhiyun /*
821*4882a593Smuzhiyun * Represents a mounted filesystem, potentially a submount.
822*4882a593Smuzhiyun *
823*4882a593Smuzhiyun * This object allows sharing a fuse_conn between separate mounts to
824*4882a593Smuzhiyun * allow submounts with dedicated superblocks and thus separate device
825*4882a593Smuzhiyun * IDs.
826*4882a593Smuzhiyun */
827*4882a593Smuzhiyun struct fuse_mount {
828*4882a593Smuzhiyun /* Underlying (potentially shared) connection to the FUSE server */
829*4882a593Smuzhiyun struct fuse_conn *fc;
830*4882a593Smuzhiyun
831*4882a593Smuzhiyun /* Refcount */
832*4882a593Smuzhiyun refcount_t count;
833*4882a593Smuzhiyun
834*4882a593Smuzhiyun /*
835*4882a593Smuzhiyun * Super block for this connection (fc->killsb must be held when
836*4882a593Smuzhiyun * accessing this).
837*4882a593Smuzhiyun */
838*4882a593Smuzhiyun struct super_block *sb;
839*4882a593Smuzhiyun
840*4882a593Smuzhiyun /* Entry on fc->mounts */
841*4882a593Smuzhiyun struct list_head fc_entry;
842*4882a593Smuzhiyun };
843*4882a593Smuzhiyun
get_fuse_mount_super(struct super_block * sb)844*4882a593Smuzhiyun static inline struct fuse_mount *get_fuse_mount_super(struct super_block *sb)
845*4882a593Smuzhiyun {
846*4882a593Smuzhiyun return sb->s_fs_info;
847*4882a593Smuzhiyun }
848*4882a593Smuzhiyun
get_fuse_conn_super(struct super_block * sb)849*4882a593Smuzhiyun static inline struct fuse_conn *get_fuse_conn_super(struct super_block *sb)
850*4882a593Smuzhiyun {
851*4882a593Smuzhiyun struct fuse_mount *fm = get_fuse_mount_super(sb);
852*4882a593Smuzhiyun
853*4882a593Smuzhiyun return fm ? fm->fc : NULL;
854*4882a593Smuzhiyun }
855*4882a593Smuzhiyun
get_fuse_mount(struct inode * inode)856*4882a593Smuzhiyun static inline struct fuse_mount *get_fuse_mount(struct inode *inode)
857*4882a593Smuzhiyun {
858*4882a593Smuzhiyun return get_fuse_mount_super(inode->i_sb);
859*4882a593Smuzhiyun }
860*4882a593Smuzhiyun
get_fuse_conn(struct inode * inode)861*4882a593Smuzhiyun static inline struct fuse_conn *get_fuse_conn(struct inode *inode)
862*4882a593Smuzhiyun {
863*4882a593Smuzhiyun struct fuse_mount *fm = get_fuse_mount(inode);
864*4882a593Smuzhiyun
865*4882a593Smuzhiyun return fm ? fm->fc : NULL;
866*4882a593Smuzhiyun }
867*4882a593Smuzhiyun
get_fuse_inode(struct inode * inode)868*4882a593Smuzhiyun static inline struct fuse_inode *get_fuse_inode(struct inode *inode)
869*4882a593Smuzhiyun {
870*4882a593Smuzhiyun return container_of(inode, struct fuse_inode, inode);
871*4882a593Smuzhiyun }
872*4882a593Smuzhiyun
get_node_id(struct inode * inode)873*4882a593Smuzhiyun static inline u64 get_node_id(struct inode *inode)
874*4882a593Smuzhiyun {
875*4882a593Smuzhiyun return get_fuse_inode(inode)->nodeid;
876*4882a593Smuzhiyun }
877*4882a593Smuzhiyun
invalid_nodeid(u64 nodeid)878*4882a593Smuzhiyun static inline int invalid_nodeid(u64 nodeid)
879*4882a593Smuzhiyun {
880*4882a593Smuzhiyun return !nodeid || nodeid == FUSE_ROOT_ID;
881*4882a593Smuzhiyun }
882*4882a593Smuzhiyun
fuse_get_attr_version(struct fuse_conn * fc)883*4882a593Smuzhiyun static inline u64 fuse_get_attr_version(struct fuse_conn *fc)
884*4882a593Smuzhiyun {
885*4882a593Smuzhiyun return atomic64_read(&fc->attr_version);
886*4882a593Smuzhiyun }
887*4882a593Smuzhiyun
fuse_stale_inode(const struct inode * inode,int generation,struct fuse_attr * attr)888*4882a593Smuzhiyun static inline bool fuse_stale_inode(const struct inode *inode, int generation,
889*4882a593Smuzhiyun struct fuse_attr *attr)
890*4882a593Smuzhiyun {
891*4882a593Smuzhiyun return inode->i_generation != generation ||
892*4882a593Smuzhiyun inode_wrong_type(inode, attr->mode);
893*4882a593Smuzhiyun }
894*4882a593Smuzhiyun
fuse_make_bad(struct inode * inode)895*4882a593Smuzhiyun static inline void fuse_make_bad(struct inode *inode)
896*4882a593Smuzhiyun {
897*4882a593Smuzhiyun remove_inode_hash(inode);
898*4882a593Smuzhiyun set_bit(FUSE_I_BAD, &get_fuse_inode(inode)->state);
899*4882a593Smuzhiyun }
900*4882a593Smuzhiyun
fuse_is_bad(struct inode * inode)901*4882a593Smuzhiyun static inline bool fuse_is_bad(struct inode *inode)
902*4882a593Smuzhiyun {
903*4882a593Smuzhiyun return unlikely(test_bit(FUSE_I_BAD, &get_fuse_inode(inode)->state));
904*4882a593Smuzhiyun }
905*4882a593Smuzhiyun
906*4882a593Smuzhiyun /** Device operations */
907*4882a593Smuzhiyun extern const struct file_operations fuse_dev_operations;
908*4882a593Smuzhiyun
909*4882a593Smuzhiyun extern const struct dentry_operations fuse_dentry_operations;
910*4882a593Smuzhiyun extern const struct dentry_operations fuse_root_dentry_operations;
911*4882a593Smuzhiyun
912*4882a593Smuzhiyun /**
913*4882a593Smuzhiyun * Get a filled in inode
914*4882a593Smuzhiyun */
915*4882a593Smuzhiyun struct inode *fuse_iget(struct super_block *sb, u64 nodeid,
916*4882a593Smuzhiyun int generation, struct fuse_attr *attr,
917*4882a593Smuzhiyun u64 attr_valid, u64 attr_version);
918*4882a593Smuzhiyun
919*4882a593Smuzhiyun int fuse_lookup_name(struct super_block *sb, u64 nodeid, const struct qstr *name,
920*4882a593Smuzhiyun struct fuse_entry_out *outarg, struct inode **inode);
921*4882a593Smuzhiyun
922*4882a593Smuzhiyun /**
923*4882a593Smuzhiyun * Send FORGET command
924*4882a593Smuzhiyun */
925*4882a593Smuzhiyun void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
926*4882a593Smuzhiyun u64 nodeid, u64 nlookup);
927*4882a593Smuzhiyun
928*4882a593Smuzhiyun struct fuse_forget_link *fuse_alloc_forget(void);
929*4882a593Smuzhiyun
930*4882a593Smuzhiyun struct fuse_forget_link *fuse_dequeue_forget(struct fuse_iqueue *fiq,
931*4882a593Smuzhiyun unsigned int max,
932*4882a593Smuzhiyun unsigned int *countp);
933*4882a593Smuzhiyun
934*4882a593Smuzhiyun /*
935*4882a593Smuzhiyun * Initialize READ or READDIR request
936*4882a593Smuzhiyun */
937*4882a593Smuzhiyun struct fuse_io_args {
938*4882a593Smuzhiyun union {
939*4882a593Smuzhiyun struct {
940*4882a593Smuzhiyun struct fuse_read_in in;
941*4882a593Smuzhiyun u64 attr_ver;
942*4882a593Smuzhiyun } read;
943*4882a593Smuzhiyun struct {
944*4882a593Smuzhiyun struct fuse_write_in in;
945*4882a593Smuzhiyun struct fuse_write_out out;
946*4882a593Smuzhiyun bool page_locked;
947*4882a593Smuzhiyun } write;
948*4882a593Smuzhiyun };
949*4882a593Smuzhiyun struct fuse_args_pages ap;
950*4882a593Smuzhiyun struct fuse_io_priv *io;
951*4882a593Smuzhiyun struct fuse_file *ff;
952*4882a593Smuzhiyun };
953*4882a593Smuzhiyun
954*4882a593Smuzhiyun void fuse_read_args_fill(struct fuse_io_args *ia, struct file *file, loff_t pos,
955*4882a593Smuzhiyun size_t count, int opcode);
956*4882a593Smuzhiyun
957*4882a593Smuzhiyun
958*4882a593Smuzhiyun /**
959*4882a593Smuzhiyun * Send OPEN or OPENDIR request
960*4882a593Smuzhiyun */
961*4882a593Smuzhiyun int fuse_open_common(struct inode *inode, struct file *file, bool isdir);
962*4882a593Smuzhiyun
963*4882a593Smuzhiyun struct fuse_file *fuse_file_alloc(struct fuse_mount *fm);
964*4882a593Smuzhiyun void fuse_file_free(struct fuse_file *ff);
965*4882a593Smuzhiyun void fuse_finish_open(struct inode *inode, struct file *file);
966*4882a593Smuzhiyun
967*4882a593Smuzhiyun void fuse_sync_release(struct fuse_inode *fi, struct fuse_file *ff, int flags);
968*4882a593Smuzhiyun
969*4882a593Smuzhiyun /**
970*4882a593Smuzhiyun * Send RELEASE or RELEASEDIR request
971*4882a593Smuzhiyun */
972*4882a593Smuzhiyun void fuse_release_common(struct file *file, bool isdir);
973*4882a593Smuzhiyun
974*4882a593Smuzhiyun /**
975*4882a593Smuzhiyun * Send FSYNC or FSYNCDIR request
976*4882a593Smuzhiyun */
977*4882a593Smuzhiyun int fuse_fsync_common(struct file *file, loff_t start, loff_t end,
978*4882a593Smuzhiyun int datasync, int opcode);
979*4882a593Smuzhiyun
980*4882a593Smuzhiyun /**
981*4882a593Smuzhiyun * Notify poll wakeup
982*4882a593Smuzhiyun */
983*4882a593Smuzhiyun int fuse_notify_poll_wakeup(struct fuse_conn *fc,
984*4882a593Smuzhiyun struct fuse_notify_poll_wakeup_out *outarg);
985*4882a593Smuzhiyun
986*4882a593Smuzhiyun /**
987*4882a593Smuzhiyun * Initialize file operations on a regular file
988*4882a593Smuzhiyun */
989*4882a593Smuzhiyun void fuse_init_file_inode(struct inode *inode);
990*4882a593Smuzhiyun
991*4882a593Smuzhiyun /**
992*4882a593Smuzhiyun * Initialize inode operations on regular files and special files
993*4882a593Smuzhiyun */
994*4882a593Smuzhiyun void fuse_init_common(struct inode *inode);
995*4882a593Smuzhiyun
996*4882a593Smuzhiyun /**
997*4882a593Smuzhiyun * Initialize inode and file operations on a directory
998*4882a593Smuzhiyun */
999*4882a593Smuzhiyun void fuse_init_dir(struct inode *inode);
1000*4882a593Smuzhiyun
1001*4882a593Smuzhiyun /**
1002*4882a593Smuzhiyun * Initialize inode operations on a symlink
1003*4882a593Smuzhiyun */
1004*4882a593Smuzhiyun void fuse_init_symlink(struct inode *inode);
1005*4882a593Smuzhiyun
1006*4882a593Smuzhiyun /**
1007*4882a593Smuzhiyun * Change attributes of an inode
1008*4882a593Smuzhiyun */
1009*4882a593Smuzhiyun void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr,
1010*4882a593Smuzhiyun u64 attr_valid, u64 attr_version);
1011*4882a593Smuzhiyun
1012*4882a593Smuzhiyun void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr,
1013*4882a593Smuzhiyun u64 attr_valid);
1014*4882a593Smuzhiyun
1015*4882a593Smuzhiyun /**
1016*4882a593Smuzhiyun * Initialize the client device
1017*4882a593Smuzhiyun */
1018*4882a593Smuzhiyun int fuse_dev_init(void);
1019*4882a593Smuzhiyun
1020*4882a593Smuzhiyun /**
1021*4882a593Smuzhiyun * Cleanup the client device
1022*4882a593Smuzhiyun */
1023*4882a593Smuzhiyun void fuse_dev_cleanup(void);
1024*4882a593Smuzhiyun
1025*4882a593Smuzhiyun int fuse_ctl_init(void);
1026*4882a593Smuzhiyun void __exit fuse_ctl_cleanup(void);
1027*4882a593Smuzhiyun
1028*4882a593Smuzhiyun /**
1029*4882a593Smuzhiyun * Simple request sending that does request allocation and freeing
1030*4882a593Smuzhiyun */
1031*4882a593Smuzhiyun ssize_t fuse_simple_request(struct fuse_mount *fm, struct fuse_args *args);
1032*4882a593Smuzhiyun int fuse_simple_background(struct fuse_mount *fm, struct fuse_args *args,
1033*4882a593Smuzhiyun gfp_t gfp_flags);
1034*4882a593Smuzhiyun
1035*4882a593Smuzhiyun /**
1036*4882a593Smuzhiyun * End a finished request
1037*4882a593Smuzhiyun */
1038*4882a593Smuzhiyun void fuse_request_end(struct fuse_req *req);
1039*4882a593Smuzhiyun
1040*4882a593Smuzhiyun /* Abort all requests */
1041*4882a593Smuzhiyun void fuse_abort_conn(struct fuse_conn *fc);
1042*4882a593Smuzhiyun void fuse_wait_aborted(struct fuse_conn *fc);
1043*4882a593Smuzhiyun
1044*4882a593Smuzhiyun /**
1045*4882a593Smuzhiyun * Invalidate inode attributes
1046*4882a593Smuzhiyun */
1047*4882a593Smuzhiyun void fuse_invalidate_attr(struct inode *inode);
1048*4882a593Smuzhiyun
1049*4882a593Smuzhiyun void fuse_invalidate_entry_cache(struct dentry *entry);
1050*4882a593Smuzhiyun
1051*4882a593Smuzhiyun void fuse_invalidate_atime(struct inode *inode);
1052*4882a593Smuzhiyun
1053*4882a593Smuzhiyun u64 entry_attr_timeout(struct fuse_entry_out *o);
1054*4882a593Smuzhiyun void fuse_change_entry_timeout(struct dentry *entry, struct fuse_entry_out *o);
1055*4882a593Smuzhiyun
1056*4882a593Smuzhiyun /**
1057*4882a593Smuzhiyun * Acquire reference to fuse_conn
1058*4882a593Smuzhiyun */
1059*4882a593Smuzhiyun struct fuse_conn *fuse_conn_get(struct fuse_conn *fc);
1060*4882a593Smuzhiyun
1061*4882a593Smuzhiyun /**
1062*4882a593Smuzhiyun * Initialize fuse_conn
1063*4882a593Smuzhiyun */
1064*4882a593Smuzhiyun void fuse_conn_init(struct fuse_conn *fc, struct fuse_mount *fm,
1065*4882a593Smuzhiyun struct user_namespace *user_ns,
1066*4882a593Smuzhiyun const struct fuse_iqueue_ops *fiq_ops, void *fiq_priv);
1067*4882a593Smuzhiyun
1068*4882a593Smuzhiyun /**
1069*4882a593Smuzhiyun * Release reference to fuse_conn
1070*4882a593Smuzhiyun */
1071*4882a593Smuzhiyun void fuse_conn_put(struct fuse_conn *fc);
1072*4882a593Smuzhiyun
1073*4882a593Smuzhiyun /**
1074*4882a593Smuzhiyun * Acquire reference to fuse_mount
1075*4882a593Smuzhiyun */
1076*4882a593Smuzhiyun struct fuse_mount *fuse_mount_get(struct fuse_mount *fm);
1077*4882a593Smuzhiyun
1078*4882a593Smuzhiyun /**
1079*4882a593Smuzhiyun * Release reference to fuse_mount
1080*4882a593Smuzhiyun */
1081*4882a593Smuzhiyun void fuse_mount_put(struct fuse_mount *fm);
1082*4882a593Smuzhiyun
1083*4882a593Smuzhiyun struct fuse_dev *fuse_dev_alloc_install(struct fuse_conn *fc);
1084*4882a593Smuzhiyun struct fuse_dev *fuse_dev_alloc(void);
1085*4882a593Smuzhiyun void fuse_dev_install(struct fuse_dev *fud, struct fuse_conn *fc);
1086*4882a593Smuzhiyun void fuse_dev_free(struct fuse_dev *fud);
1087*4882a593Smuzhiyun void fuse_send_init(struct fuse_mount *fm);
1088*4882a593Smuzhiyun
1089*4882a593Smuzhiyun /**
1090*4882a593Smuzhiyun * Fill in superblock and initialize fuse connection
1091*4882a593Smuzhiyun * @sb: partially-initialized superblock to fill in
1092*4882a593Smuzhiyun * @ctx: mount context
1093*4882a593Smuzhiyun */
1094*4882a593Smuzhiyun int fuse_fill_super_common(struct super_block *sb, struct fuse_fs_context *ctx);
1095*4882a593Smuzhiyun
1096*4882a593Smuzhiyun /*
1097*4882a593Smuzhiyun * Fill in superblock for submounts
1098*4882a593Smuzhiyun * @sb: partially-initialized superblock to fill in
1099*4882a593Smuzhiyun * @parent_fi: The fuse_inode of the parent filesystem where this submount is
1100*4882a593Smuzhiyun * mounted
1101*4882a593Smuzhiyun */
1102*4882a593Smuzhiyun int fuse_fill_super_submount(struct super_block *sb,
1103*4882a593Smuzhiyun struct fuse_inode *parent_fi);
1104*4882a593Smuzhiyun
1105*4882a593Smuzhiyun /*
1106*4882a593Smuzhiyun * Remove the mount from the connection
1107*4882a593Smuzhiyun *
1108*4882a593Smuzhiyun * Returns whether this was the last mount
1109*4882a593Smuzhiyun */
1110*4882a593Smuzhiyun bool fuse_mount_remove(struct fuse_mount *fm);
1111*4882a593Smuzhiyun
1112*4882a593Smuzhiyun /*
1113*4882a593Smuzhiyun * Shut down the connection (possibly sending DESTROY request).
1114*4882a593Smuzhiyun */
1115*4882a593Smuzhiyun void fuse_conn_destroy(struct fuse_mount *fm);
1116*4882a593Smuzhiyun
1117*4882a593Smuzhiyun /**
1118*4882a593Smuzhiyun * Add connection to control filesystem
1119*4882a593Smuzhiyun */
1120*4882a593Smuzhiyun int fuse_ctl_add_conn(struct fuse_conn *fc);
1121*4882a593Smuzhiyun
1122*4882a593Smuzhiyun /**
1123*4882a593Smuzhiyun * Remove connection from control filesystem
1124*4882a593Smuzhiyun */
1125*4882a593Smuzhiyun void fuse_ctl_remove_conn(struct fuse_conn *fc);
1126*4882a593Smuzhiyun
1127*4882a593Smuzhiyun /**
1128*4882a593Smuzhiyun * Is file type valid?
1129*4882a593Smuzhiyun */
1130*4882a593Smuzhiyun int fuse_valid_type(int m);
1131*4882a593Smuzhiyun
1132*4882a593Smuzhiyun bool fuse_invalid_attr(struct fuse_attr *attr);
1133*4882a593Smuzhiyun
1134*4882a593Smuzhiyun /**
1135*4882a593Smuzhiyun * Is current process allowed to perform filesystem operation?
1136*4882a593Smuzhiyun */
1137*4882a593Smuzhiyun int fuse_allow_current_process(struct fuse_conn *fc);
1138*4882a593Smuzhiyun
1139*4882a593Smuzhiyun u64 fuse_lock_owner_id(struct fuse_conn *fc, fl_owner_t id);
1140*4882a593Smuzhiyun
1141*4882a593Smuzhiyun void fuse_flush_time_update(struct inode *inode);
1142*4882a593Smuzhiyun void fuse_update_ctime(struct inode *inode);
1143*4882a593Smuzhiyun
1144*4882a593Smuzhiyun int fuse_update_attributes(struct inode *inode, struct file *file);
1145*4882a593Smuzhiyun
1146*4882a593Smuzhiyun void fuse_flush_writepages(struct inode *inode);
1147*4882a593Smuzhiyun
1148*4882a593Smuzhiyun void fuse_set_nowrite(struct inode *inode);
1149*4882a593Smuzhiyun void fuse_release_nowrite(struct inode *inode);
1150*4882a593Smuzhiyun
1151*4882a593Smuzhiyun /**
1152*4882a593Smuzhiyun * Scan all fuse_mounts belonging to fc to find the first where
1153*4882a593Smuzhiyun * ilookup5() returns a result. Return that result and the
1154*4882a593Smuzhiyun * respective fuse_mount in *fm (unless fm is NULL).
1155*4882a593Smuzhiyun *
1156*4882a593Smuzhiyun * The caller must hold fc->killsb.
1157*4882a593Smuzhiyun */
1158*4882a593Smuzhiyun struct inode *fuse_ilookup(struct fuse_conn *fc, u64 nodeid,
1159*4882a593Smuzhiyun struct fuse_mount **fm);
1160*4882a593Smuzhiyun
1161*4882a593Smuzhiyun /**
1162*4882a593Smuzhiyun * File-system tells the kernel to invalidate cache for the given node id.
1163*4882a593Smuzhiyun */
1164*4882a593Smuzhiyun int fuse_reverse_inval_inode(struct fuse_conn *fc, u64 nodeid,
1165*4882a593Smuzhiyun loff_t offset, loff_t len);
1166*4882a593Smuzhiyun
1167*4882a593Smuzhiyun /**
1168*4882a593Smuzhiyun * File-system tells the kernel to invalidate parent attributes and
1169*4882a593Smuzhiyun * the dentry matching parent/name.
1170*4882a593Smuzhiyun *
1171*4882a593Smuzhiyun * If the child_nodeid is non-zero and:
1172*4882a593Smuzhiyun * - matches the inode number for the dentry matching parent/name,
1173*4882a593Smuzhiyun * - is not a mount point
1174*4882a593Smuzhiyun * - is a file or oan empty directory
1175*4882a593Smuzhiyun * then the dentry is unhashed (d_delete()).
1176*4882a593Smuzhiyun */
1177*4882a593Smuzhiyun int fuse_reverse_inval_entry(struct fuse_conn *fc, u64 parent_nodeid,
1178*4882a593Smuzhiyun u64 child_nodeid, struct qstr *name);
1179*4882a593Smuzhiyun
1180*4882a593Smuzhiyun int fuse_do_open(struct fuse_mount *fm, u64 nodeid, struct file *file,
1181*4882a593Smuzhiyun bool isdir);
1182*4882a593Smuzhiyun
1183*4882a593Smuzhiyun /**
1184*4882a593Smuzhiyun * fuse_direct_io() flags
1185*4882a593Smuzhiyun */
1186*4882a593Smuzhiyun
1187*4882a593Smuzhiyun /** If set, it is WRITE; otherwise - READ */
1188*4882a593Smuzhiyun #define FUSE_DIO_WRITE (1 << 0)
1189*4882a593Smuzhiyun
1190*4882a593Smuzhiyun /** CUSE pass fuse_direct_io() a file which f_mapping->host is not from FUSE */
1191*4882a593Smuzhiyun #define FUSE_DIO_CUSE (1 << 1)
1192*4882a593Smuzhiyun
1193*4882a593Smuzhiyun ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
1194*4882a593Smuzhiyun loff_t *ppos, int flags);
1195*4882a593Smuzhiyun long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
1196*4882a593Smuzhiyun unsigned int flags);
1197*4882a593Smuzhiyun long fuse_ioctl_common(struct file *file, unsigned int cmd,
1198*4882a593Smuzhiyun unsigned long arg, unsigned int flags);
1199*4882a593Smuzhiyun __poll_t fuse_file_poll(struct file *file, poll_table *wait);
1200*4882a593Smuzhiyun int fuse_dev_release(struct inode *inode, struct file *file);
1201*4882a593Smuzhiyun
1202*4882a593Smuzhiyun bool fuse_write_update_size(struct inode *inode, loff_t pos);
1203*4882a593Smuzhiyun
1204*4882a593Smuzhiyun int fuse_flush_times(struct inode *inode, struct fuse_file *ff);
1205*4882a593Smuzhiyun int fuse_write_inode(struct inode *inode, struct writeback_control *wbc);
1206*4882a593Smuzhiyun
1207*4882a593Smuzhiyun int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
1208*4882a593Smuzhiyun struct file *file);
1209*4882a593Smuzhiyun
1210*4882a593Smuzhiyun void fuse_set_initialized(struct fuse_conn *fc);
1211*4882a593Smuzhiyun
1212*4882a593Smuzhiyun void fuse_unlock_inode(struct inode *inode, bool locked);
1213*4882a593Smuzhiyun bool fuse_lock_inode(struct inode *inode);
1214*4882a593Smuzhiyun
1215*4882a593Smuzhiyun int fuse_setxattr(struct inode *inode, const char *name, const void *value,
1216*4882a593Smuzhiyun size_t size, int flags);
1217*4882a593Smuzhiyun ssize_t fuse_getxattr(struct inode *inode, const char *name, void *value,
1218*4882a593Smuzhiyun size_t size);
1219*4882a593Smuzhiyun ssize_t fuse_listxattr(struct dentry *entry, char *list, size_t size);
1220*4882a593Smuzhiyun int fuse_removexattr(struct inode *inode, const char *name);
1221*4882a593Smuzhiyun extern const struct xattr_handler *fuse_xattr_handlers[];
1222*4882a593Smuzhiyun extern const struct xattr_handler *fuse_acl_xattr_handlers[];
1223*4882a593Smuzhiyun extern const struct xattr_handler *fuse_no_acl_xattr_handlers[];
1224*4882a593Smuzhiyun
1225*4882a593Smuzhiyun struct posix_acl;
1226*4882a593Smuzhiyun struct posix_acl *fuse_get_acl(struct inode *inode, int type);
1227*4882a593Smuzhiyun int fuse_set_acl(struct inode *inode, struct posix_acl *acl, int type);
1228*4882a593Smuzhiyun
1229*4882a593Smuzhiyun
1230*4882a593Smuzhiyun /* readdir.c */
1231*4882a593Smuzhiyun int fuse_readdir(struct file *file, struct dir_context *ctx);
1232*4882a593Smuzhiyun
1233*4882a593Smuzhiyun /**
1234*4882a593Smuzhiyun * Return the number of bytes in an arguments list
1235*4882a593Smuzhiyun */
1236*4882a593Smuzhiyun unsigned int fuse_len_args(unsigned int numargs, struct fuse_arg *args);
1237*4882a593Smuzhiyun
1238*4882a593Smuzhiyun /**
1239*4882a593Smuzhiyun * Get the next unique ID for a request
1240*4882a593Smuzhiyun */
1241*4882a593Smuzhiyun u64 fuse_get_unique(struct fuse_iqueue *fiq);
1242*4882a593Smuzhiyun void fuse_free_conn(struct fuse_conn *fc);
1243*4882a593Smuzhiyun
1244*4882a593Smuzhiyun /* dax.c */
1245*4882a593Smuzhiyun
1246*4882a593Smuzhiyun #define FUSE_IS_DAX(inode) (IS_ENABLED(CONFIG_FUSE_DAX) && IS_DAX(inode))
1247*4882a593Smuzhiyun
1248*4882a593Smuzhiyun ssize_t fuse_dax_read_iter(struct kiocb *iocb, struct iov_iter *to);
1249*4882a593Smuzhiyun ssize_t fuse_dax_write_iter(struct kiocb *iocb, struct iov_iter *from);
1250*4882a593Smuzhiyun int fuse_dax_mmap(struct file *file, struct vm_area_struct *vma);
1251*4882a593Smuzhiyun int fuse_dax_break_layouts(struct inode *inode, u64 dmap_start, u64 dmap_end);
1252*4882a593Smuzhiyun int fuse_dax_conn_alloc(struct fuse_conn *fc, struct dax_device *dax_dev);
1253*4882a593Smuzhiyun void fuse_dax_conn_free(struct fuse_conn *fc);
1254*4882a593Smuzhiyun bool fuse_dax_inode_alloc(struct super_block *sb, struct fuse_inode *fi);
1255*4882a593Smuzhiyun void fuse_dax_inode_init(struct inode *inode);
1256*4882a593Smuzhiyun void fuse_dax_inode_cleanup(struct inode *inode);
1257*4882a593Smuzhiyun bool fuse_dax_check_alignment(struct fuse_conn *fc, unsigned int map_alignment);
1258*4882a593Smuzhiyun void fuse_dax_cancel_work(struct fuse_conn *fc);
1259*4882a593Smuzhiyun
1260*4882a593Smuzhiyun /* passthrough.c */
1261*4882a593Smuzhiyun int fuse_passthrough_open(struct fuse_dev *fud, u32 lower_fd);
1262*4882a593Smuzhiyun int fuse_passthrough_setup(struct fuse_conn *fc, struct fuse_file *ff,
1263*4882a593Smuzhiyun struct fuse_open_out *openarg);
1264*4882a593Smuzhiyun void fuse_passthrough_release(struct fuse_passthrough *passthrough);
1265*4882a593Smuzhiyun ssize_t fuse_passthrough_read_iter(struct kiocb *iocb, struct iov_iter *to);
1266*4882a593Smuzhiyun ssize_t fuse_passthrough_write_iter(struct kiocb *iocb, struct iov_iter *from);
1267*4882a593Smuzhiyun ssize_t fuse_passthrough_mmap(struct file *file, struct vm_area_struct *vma);
1268*4882a593Smuzhiyun
1269*4882a593Smuzhiyun #endif /* _FS_FUSE_I_H */
1270