xref: /OK3568_Linux_fs/kernel/fs/fuse/dax.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * dax: direct host memory access
4*4882a593Smuzhiyun  * Copyright (C) 2020 Red Hat, Inc.
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include "fuse_i.h"
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/delay.h>
10*4882a593Smuzhiyun #include <linux/dax.h>
11*4882a593Smuzhiyun #include <linux/uio.h>
12*4882a593Smuzhiyun #include <linux/pfn_t.h>
13*4882a593Smuzhiyun #include <linux/iomap.h>
14*4882a593Smuzhiyun #include <linux/interval_tree.h>
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun /*
17*4882a593Smuzhiyun  * Default memory range size.  A power of 2 so it agrees with common FUSE_INIT
18*4882a593Smuzhiyun  * map_alignment values 4KB and 64KB.
19*4882a593Smuzhiyun  */
20*4882a593Smuzhiyun #define FUSE_DAX_SHIFT	21
21*4882a593Smuzhiyun #define FUSE_DAX_SZ	(1 << FUSE_DAX_SHIFT)
22*4882a593Smuzhiyun #define FUSE_DAX_PAGES	(FUSE_DAX_SZ / PAGE_SIZE)
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun /* Number of ranges reclaimer will try to free in one invocation */
25*4882a593Smuzhiyun #define FUSE_DAX_RECLAIM_CHUNK		(10)
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun /*
28*4882a593Smuzhiyun  * Dax memory reclaim threshold in percetage of total ranges. When free
29*4882a593Smuzhiyun  * number of free ranges drops below this threshold, reclaim can trigger
30*4882a593Smuzhiyun  * Default is 20%
31*4882a593Smuzhiyun  */
32*4882a593Smuzhiyun #define FUSE_DAX_RECLAIM_THRESHOLD	(20)
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun /** Translation information for file offsets to DAX window offsets */
35*4882a593Smuzhiyun struct fuse_dax_mapping {
36*4882a593Smuzhiyun 	/* Pointer to inode where this memory range is mapped */
37*4882a593Smuzhiyun 	struct inode *inode;
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun 	/* Will connect in fcd->free_ranges to keep track of free memory */
40*4882a593Smuzhiyun 	struct list_head list;
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun 	/* For interval tree in file/inode */
43*4882a593Smuzhiyun 	struct interval_tree_node itn;
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun 	/* Will connect in fc->busy_ranges to keep track busy memory */
46*4882a593Smuzhiyun 	struct list_head busy_list;
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun 	/** Position in DAX window */
49*4882a593Smuzhiyun 	u64 window_offset;
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	/** Length of mapping, in bytes */
52*4882a593Smuzhiyun 	loff_t length;
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun 	/* Is this mapping read-only or read-write */
55*4882a593Smuzhiyun 	bool writable;
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun 	/* reference count when the mapping is used by dax iomap. */
58*4882a593Smuzhiyun 	refcount_t refcnt;
59*4882a593Smuzhiyun };
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun /* Per-inode dax map */
62*4882a593Smuzhiyun struct fuse_inode_dax {
63*4882a593Smuzhiyun 	/* Semaphore to protect modifications to the dmap tree */
64*4882a593Smuzhiyun 	struct rw_semaphore sem;
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun 	/* Sorted rb tree of struct fuse_dax_mapping elements */
67*4882a593Smuzhiyun 	struct rb_root_cached tree;
68*4882a593Smuzhiyun 	unsigned long nr;
69*4882a593Smuzhiyun };
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun struct fuse_conn_dax {
72*4882a593Smuzhiyun 	/* DAX device */
73*4882a593Smuzhiyun 	struct dax_device *dev;
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 	/* Lock protecting accessess to  members of this structure */
76*4882a593Smuzhiyun 	spinlock_t lock;
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun 	/* List of memory ranges which are busy */
79*4882a593Smuzhiyun 	unsigned long nr_busy_ranges;
80*4882a593Smuzhiyun 	struct list_head busy_ranges;
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 	/* Worker to free up memory ranges */
83*4882a593Smuzhiyun 	struct delayed_work free_work;
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	/* Wait queue for a dax range to become free */
86*4882a593Smuzhiyun 	wait_queue_head_t range_waitq;
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 	/* DAX Window Free Ranges */
89*4882a593Smuzhiyun 	long nr_free_ranges;
90*4882a593Smuzhiyun 	struct list_head free_ranges;
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	unsigned long nr_ranges;
93*4882a593Smuzhiyun };
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun static inline struct fuse_dax_mapping *
node_to_dmap(struct interval_tree_node * node)96*4882a593Smuzhiyun node_to_dmap(struct interval_tree_node *node)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun 	if (!node)
99*4882a593Smuzhiyun 		return NULL;
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	return container_of(node, struct fuse_dax_mapping, itn);
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun static struct fuse_dax_mapping *
105*4882a593Smuzhiyun alloc_dax_mapping_reclaim(struct fuse_conn_dax *fcd, struct inode *inode);
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun static void
__kick_dmap_free_worker(struct fuse_conn_dax * fcd,unsigned long delay_ms)108*4882a593Smuzhiyun __kick_dmap_free_worker(struct fuse_conn_dax *fcd, unsigned long delay_ms)
109*4882a593Smuzhiyun {
110*4882a593Smuzhiyun 	unsigned long free_threshold;
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	/* If number of free ranges are below threshold, start reclaim */
113*4882a593Smuzhiyun 	free_threshold = max_t(unsigned long, fcd->nr_ranges * FUSE_DAX_RECLAIM_THRESHOLD / 100,
114*4882a593Smuzhiyun 			     1);
115*4882a593Smuzhiyun 	if (fcd->nr_free_ranges < free_threshold)
116*4882a593Smuzhiyun 		queue_delayed_work(system_long_wq, &fcd->free_work,
117*4882a593Smuzhiyun 				   msecs_to_jiffies(delay_ms));
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun 
kick_dmap_free_worker(struct fuse_conn_dax * fcd,unsigned long delay_ms)120*4882a593Smuzhiyun static void kick_dmap_free_worker(struct fuse_conn_dax *fcd,
121*4882a593Smuzhiyun 				  unsigned long delay_ms)
122*4882a593Smuzhiyun {
123*4882a593Smuzhiyun 	spin_lock(&fcd->lock);
124*4882a593Smuzhiyun 	__kick_dmap_free_worker(fcd, delay_ms);
125*4882a593Smuzhiyun 	spin_unlock(&fcd->lock);
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun 
alloc_dax_mapping(struct fuse_conn_dax * fcd)128*4882a593Smuzhiyun static struct fuse_dax_mapping *alloc_dax_mapping(struct fuse_conn_dax *fcd)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun 	struct fuse_dax_mapping *dmap;
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	spin_lock(&fcd->lock);
133*4882a593Smuzhiyun 	dmap = list_first_entry_or_null(&fcd->free_ranges,
134*4882a593Smuzhiyun 					struct fuse_dax_mapping, list);
135*4882a593Smuzhiyun 	if (dmap) {
136*4882a593Smuzhiyun 		list_del_init(&dmap->list);
137*4882a593Smuzhiyun 		WARN_ON(fcd->nr_free_ranges <= 0);
138*4882a593Smuzhiyun 		fcd->nr_free_ranges--;
139*4882a593Smuzhiyun 	}
140*4882a593Smuzhiyun 	spin_unlock(&fcd->lock);
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	kick_dmap_free_worker(fcd, 0);
143*4882a593Smuzhiyun 	return dmap;
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun /* This assumes fcd->lock is held */
__dmap_remove_busy_list(struct fuse_conn_dax * fcd,struct fuse_dax_mapping * dmap)147*4882a593Smuzhiyun static void __dmap_remove_busy_list(struct fuse_conn_dax *fcd,
148*4882a593Smuzhiyun 				    struct fuse_dax_mapping *dmap)
149*4882a593Smuzhiyun {
150*4882a593Smuzhiyun 	list_del_init(&dmap->busy_list);
151*4882a593Smuzhiyun 	WARN_ON(fcd->nr_busy_ranges == 0);
152*4882a593Smuzhiyun 	fcd->nr_busy_ranges--;
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun 
dmap_remove_busy_list(struct fuse_conn_dax * fcd,struct fuse_dax_mapping * dmap)155*4882a593Smuzhiyun static void dmap_remove_busy_list(struct fuse_conn_dax *fcd,
156*4882a593Smuzhiyun 				  struct fuse_dax_mapping *dmap)
157*4882a593Smuzhiyun {
158*4882a593Smuzhiyun 	spin_lock(&fcd->lock);
159*4882a593Smuzhiyun 	__dmap_remove_busy_list(fcd, dmap);
160*4882a593Smuzhiyun 	spin_unlock(&fcd->lock);
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun /* This assumes fcd->lock is held */
__dmap_add_to_free_pool(struct fuse_conn_dax * fcd,struct fuse_dax_mapping * dmap)164*4882a593Smuzhiyun static void __dmap_add_to_free_pool(struct fuse_conn_dax *fcd,
165*4882a593Smuzhiyun 				struct fuse_dax_mapping *dmap)
166*4882a593Smuzhiyun {
167*4882a593Smuzhiyun 	list_add_tail(&dmap->list, &fcd->free_ranges);
168*4882a593Smuzhiyun 	fcd->nr_free_ranges++;
169*4882a593Smuzhiyun 	wake_up(&fcd->range_waitq);
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun 
dmap_add_to_free_pool(struct fuse_conn_dax * fcd,struct fuse_dax_mapping * dmap)172*4882a593Smuzhiyun static void dmap_add_to_free_pool(struct fuse_conn_dax *fcd,
173*4882a593Smuzhiyun 				struct fuse_dax_mapping *dmap)
174*4882a593Smuzhiyun {
175*4882a593Smuzhiyun 	/* Return fuse_dax_mapping to free list */
176*4882a593Smuzhiyun 	spin_lock(&fcd->lock);
177*4882a593Smuzhiyun 	__dmap_add_to_free_pool(fcd, dmap);
178*4882a593Smuzhiyun 	spin_unlock(&fcd->lock);
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun 
fuse_setup_one_mapping(struct inode * inode,unsigned long start_idx,struct fuse_dax_mapping * dmap,bool writable,bool upgrade)181*4882a593Smuzhiyun static int fuse_setup_one_mapping(struct inode *inode, unsigned long start_idx,
182*4882a593Smuzhiyun 				  struct fuse_dax_mapping *dmap, bool writable,
183*4882a593Smuzhiyun 				  bool upgrade)
184*4882a593Smuzhiyun {
185*4882a593Smuzhiyun 	struct fuse_mount *fm = get_fuse_mount(inode);
186*4882a593Smuzhiyun 	struct fuse_conn_dax *fcd = fm->fc->dax;
187*4882a593Smuzhiyun 	struct fuse_inode *fi = get_fuse_inode(inode);
188*4882a593Smuzhiyun 	struct fuse_setupmapping_in inarg;
189*4882a593Smuzhiyun 	loff_t offset = start_idx << FUSE_DAX_SHIFT;
190*4882a593Smuzhiyun 	FUSE_ARGS(args);
191*4882a593Smuzhiyun 	ssize_t err;
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 	WARN_ON(fcd->nr_free_ranges < 0);
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 	/* Ask fuse daemon to setup mapping */
196*4882a593Smuzhiyun 	memset(&inarg, 0, sizeof(inarg));
197*4882a593Smuzhiyun 	inarg.foffset = offset;
198*4882a593Smuzhiyun 	inarg.fh = -1;
199*4882a593Smuzhiyun 	inarg.moffset = dmap->window_offset;
200*4882a593Smuzhiyun 	inarg.len = FUSE_DAX_SZ;
201*4882a593Smuzhiyun 	inarg.flags |= FUSE_SETUPMAPPING_FLAG_READ;
202*4882a593Smuzhiyun 	if (writable)
203*4882a593Smuzhiyun 		inarg.flags |= FUSE_SETUPMAPPING_FLAG_WRITE;
204*4882a593Smuzhiyun 	args.opcode = FUSE_SETUPMAPPING;
205*4882a593Smuzhiyun 	args.nodeid = fi->nodeid;
206*4882a593Smuzhiyun 	args.in_numargs = 1;
207*4882a593Smuzhiyun 	args.in_args[0].size = sizeof(inarg);
208*4882a593Smuzhiyun 	args.in_args[0].value = &inarg;
209*4882a593Smuzhiyun 	err = fuse_simple_request(fm, &args);
210*4882a593Smuzhiyun 	if (err < 0)
211*4882a593Smuzhiyun 		return err;
212*4882a593Smuzhiyun 	dmap->writable = writable;
213*4882a593Smuzhiyun 	if (!upgrade) {
214*4882a593Smuzhiyun 		/*
215*4882a593Smuzhiyun 		 * We don't take a refernce on inode. inode is valid right now
216*4882a593Smuzhiyun 		 * and when inode is going away, cleanup logic should first
217*4882a593Smuzhiyun 		 * cleanup dmap entries.
218*4882a593Smuzhiyun 		 */
219*4882a593Smuzhiyun 		dmap->inode = inode;
220*4882a593Smuzhiyun 		dmap->itn.start = dmap->itn.last = start_idx;
221*4882a593Smuzhiyun 		/* Protected by fi->dax->sem */
222*4882a593Smuzhiyun 		interval_tree_insert(&dmap->itn, &fi->dax->tree);
223*4882a593Smuzhiyun 		fi->dax->nr++;
224*4882a593Smuzhiyun 		spin_lock(&fcd->lock);
225*4882a593Smuzhiyun 		list_add_tail(&dmap->busy_list, &fcd->busy_ranges);
226*4882a593Smuzhiyun 		fcd->nr_busy_ranges++;
227*4882a593Smuzhiyun 		spin_unlock(&fcd->lock);
228*4882a593Smuzhiyun 	}
229*4882a593Smuzhiyun 	return 0;
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun 
fuse_send_removemapping(struct inode * inode,struct fuse_removemapping_in * inargp,struct fuse_removemapping_one * remove_one)232*4882a593Smuzhiyun static int fuse_send_removemapping(struct inode *inode,
233*4882a593Smuzhiyun 				   struct fuse_removemapping_in *inargp,
234*4882a593Smuzhiyun 				   struct fuse_removemapping_one *remove_one)
235*4882a593Smuzhiyun {
236*4882a593Smuzhiyun 	struct fuse_inode *fi = get_fuse_inode(inode);
237*4882a593Smuzhiyun 	struct fuse_mount *fm = get_fuse_mount(inode);
238*4882a593Smuzhiyun 	FUSE_ARGS(args);
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	args.opcode = FUSE_REMOVEMAPPING;
241*4882a593Smuzhiyun 	args.nodeid = fi->nodeid;
242*4882a593Smuzhiyun 	args.in_numargs = 2;
243*4882a593Smuzhiyun 	args.in_args[0].size = sizeof(*inargp);
244*4882a593Smuzhiyun 	args.in_args[0].value = inargp;
245*4882a593Smuzhiyun 	args.in_args[1].size = inargp->count * sizeof(*remove_one);
246*4882a593Smuzhiyun 	args.in_args[1].value = remove_one;
247*4882a593Smuzhiyun 	return fuse_simple_request(fm, &args);
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun 
dmap_removemapping_list(struct inode * inode,unsigned int num,struct list_head * to_remove)250*4882a593Smuzhiyun static int dmap_removemapping_list(struct inode *inode, unsigned int num,
251*4882a593Smuzhiyun 				   struct list_head *to_remove)
252*4882a593Smuzhiyun {
253*4882a593Smuzhiyun 	struct fuse_removemapping_one *remove_one, *ptr;
254*4882a593Smuzhiyun 	struct fuse_removemapping_in inarg;
255*4882a593Smuzhiyun 	struct fuse_dax_mapping *dmap;
256*4882a593Smuzhiyun 	int ret, i = 0, nr_alloc;
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 	nr_alloc = min_t(unsigned int, num, FUSE_REMOVEMAPPING_MAX_ENTRY);
259*4882a593Smuzhiyun 	remove_one = kmalloc_array(nr_alloc, sizeof(*remove_one), GFP_NOFS);
260*4882a593Smuzhiyun 	if (!remove_one)
261*4882a593Smuzhiyun 		return -ENOMEM;
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	ptr = remove_one;
264*4882a593Smuzhiyun 	list_for_each_entry(dmap, to_remove, list) {
265*4882a593Smuzhiyun 		ptr->moffset = dmap->window_offset;
266*4882a593Smuzhiyun 		ptr->len = dmap->length;
267*4882a593Smuzhiyun 		ptr++;
268*4882a593Smuzhiyun 		i++;
269*4882a593Smuzhiyun 		num--;
270*4882a593Smuzhiyun 		if (i >= nr_alloc || num == 0) {
271*4882a593Smuzhiyun 			memset(&inarg, 0, sizeof(inarg));
272*4882a593Smuzhiyun 			inarg.count = i;
273*4882a593Smuzhiyun 			ret = fuse_send_removemapping(inode, &inarg,
274*4882a593Smuzhiyun 						      remove_one);
275*4882a593Smuzhiyun 			if (ret)
276*4882a593Smuzhiyun 				goto out;
277*4882a593Smuzhiyun 			ptr = remove_one;
278*4882a593Smuzhiyun 			i = 0;
279*4882a593Smuzhiyun 		}
280*4882a593Smuzhiyun 	}
281*4882a593Smuzhiyun out:
282*4882a593Smuzhiyun 	kfree(remove_one);
283*4882a593Smuzhiyun 	return ret;
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun /*
287*4882a593Smuzhiyun  * Cleanup dmap entry and add back to free list. This should be called with
288*4882a593Smuzhiyun  * fcd->lock held.
289*4882a593Smuzhiyun  */
dmap_reinit_add_to_free_pool(struct fuse_conn_dax * fcd,struct fuse_dax_mapping * dmap)290*4882a593Smuzhiyun static void dmap_reinit_add_to_free_pool(struct fuse_conn_dax *fcd,
291*4882a593Smuzhiyun 					    struct fuse_dax_mapping *dmap)
292*4882a593Smuzhiyun {
293*4882a593Smuzhiyun 	pr_debug("fuse: freeing memory range start_idx=0x%lx end_idx=0x%lx window_offset=0x%llx length=0x%llx\n",
294*4882a593Smuzhiyun 		 dmap->itn.start, dmap->itn.last, dmap->window_offset,
295*4882a593Smuzhiyun 		 dmap->length);
296*4882a593Smuzhiyun 	__dmap_remove_busy_list(fcd, dmap);
297*4882a593Smuzhiyun 	dmap->inode = NULL;
298*4882a593Smuzhiyun 	dmap->itn.start = dmap->itn.last = 0;
299*4882a593Smuzhiyun 	__dmap_add_to_free_pool(fcd, dmap);
300*4882a593Smuzhiyun }
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun /*
303*4882a593Smuzhiyun  * Free inode dmap entries whose range falls inside [start, end].
304*4882a593Smuzhiyun  * Does not take any locks. At this point of time it should only be
305*4882a593Smuzhiyun  * called from evict_inode() path where we know all dmap entries can be
306*4882a593Smuzhiyun  * reclaimed.
307*4882a593Smuzhiyun  */
inode_reclaim_dmap_range(struct fuse_conn_dax * fcd,struct inode * inode,loff_t start,loff_t end)308*4882a593Smuzhiyun static void inode_reclaim_dmap_range(struct fuse_conn_dax *fcd,
309*4882a593Smuzhiyun 				     struct inode *inode,
310*4882a593Smuzhiyun 				     loff_t start, loff_t end)
311*4882a593Smuzhiyun {
312*4882a593Smuzhiyun 	struct fuse_inode *fi = get_fuse_inode(inode);
313*4882a593Smuzhiyun 	struct fuse_dax_mapping *dmap, *n;
314*4882a593Smuzhiyun 	int err, num = 0;
315*4882a593Smuzhiyun 	LIST_HEAD(to_remove);
316*4882a593Smuzhiyun 	unsigned long start_idx = start >> FUSE_DAX_SHIFT;
317*4882a593Smuzhiyun 	unsigned long end_idx = end >> FUSE_DAX_SHIFT;
318*4882a593Smuzhiyun 	struct interval_tree_node *node;
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 	while (1) {
321*4882a593Smuzhiyun 		node = interval_tree_iter_first(&fi->dax->tree, start_idx,
322*4882a593Smuzhiyun 						end_idx);
323*4882a593Smuzhiyun 		if (!node)
324*4882a593Smuzhiyun 			break;
325*4882a593Smuzhiyun 		dmap = node_to_dmap(node);
326*4882a593Smuzhiyun 		/* inode is going away. There should not be any users of dmap */
327*4882a593Smuzhiyun 		WARN_ON(refcount_read(&dmap->refcnt) > 1);
328*4882a593Smuzhiyun 		interval_tree_remove(&dmap->itn, &fi->dax->tree);
329*4882a593Smuzhiyun 		num++;
330*4882a593Smuzhiyun 		list_add(&dmap->list, &to_remove);
331*4882a593Smuzhiyun 	}
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	/* Nothing to remove */
334*4882a593Smuzhiyun 	if (list_empty(&to_remove))
335*4882a593Smuzhiyun 		return;
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 	WARN_ON(fi->dax->nr < num);
338*4882a593Smuzhiyun 	fi->dax->nr -= num;
339*4882a593Smuzhiyun 	err = dmap_removemapping_list(inode, num, &to_remove);
340*4882a593Smuzhiyun 	if (err && err != -ENOTCONN) {
341*4882a593Smuzhiyun 		pr_warn("Failed to removemappings. start=0x%llx end=0x%llx\n",
342*4882a593Smuzhiyun 			start, end);
343*4882a593Smuzhiyun 	}
344*4882a593Smuzhiyun 	spin_lock(&fcd->lock);
345*4882a593Smuzhiyun 	list_for_each_entry_safe(dmap, n, &to_remove, list) {
346*4882a593Smuzhiyun 		list_del_init(&dmap->list);
347*4882a593Smuzhiyun 		dmap_reinit_add_to_free_pool(fcd, dmap);
348*4882a593Smuzhiyun 	}
349*4882a593Smuzhiyun 	spin_unlock(&fcd->lock);
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun 
dmap_removemapping_one(struct inode * inode,struct fuse_dax_mapping * dmap)352*4882a593Smuzhiyun static int dmap_removemapping_one(struct inode *inode,
353*4882a593Smuzhiyun 				  struct fuse_dax_mapping *dmap)
354*4882a593Smuzhiyun {
355*4882a593Smuzhiyun 	struct fuse_removemapping_one forget_one;
356*4882a593Smuzhiyun 	struct fuse_removemapping_in inarg;
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 	memset(&inarg, 0, sizeof(inarg));
359*4882a593Smuzhiyun 	inarg.count = 1;
360*4882a593Smuzhiyun 	memset(&forget_one, 0, sizeof(forget_one));
361*4882a593Smuzhiyun 	forget_one.moffset = dmap->window_offset;
362*4882a593Smuzhiyun 	forget_one.len = dmap->length;
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun 	return fuse_send_removemapping(inode, &inarg, &forget_one);
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun /*
368*4882a593Smuzhiyun  * It is called from evict_inode() and by that time inode is going away. So
369*4882a593Smuzhiyun  * this function does not take any locks like fi->dax->sem for traversing
370*4882a593Smuzhiyun  * that fuse inode interval tree. If that lock is taken then lock validator
371*4882a593Smuzhiyun  * complains of deadlock situation w.r.t fs_reclaim lock.
372*4882a593Smuzhiyun  */
fuse_dax_inode_cleanup(struct inode * inode)373*4882a593Smuzhiyun void fuse_dax_inode_cleanup(struct inode *inode)
374*4882a593Smuzhiyun {
375*4882a593Smuzhiyun 	struct fuse_conn *fc = get_fuse_conn(inode);
376*4882a593Smuzhiyun 	struct fuse_inode *fi = get_fuse_inode(inode);
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 	/*
379*4882a593Smuzhiyun 	 * fuse_evict_inode() has already called truncate_inode_pages_final()
380*4882a593Smuzhiyun 	 * before we arrive here. So we should not have to worry about any
381*4882a593Smuzhiyun 	 * pages/exception entries still associated with inode.
382*4882a593Smuzhiyun 	 */
383*4882a593Smuzhiyun 	inode_reclaim_dmap_range(fc->dax, inode, 0, -1);
384*4882a593Smuzhiyun 	WARN_ON(fi->dax->nr);
385*4882a593Smuzhiyun }
386*4882a593Smuzhiyun 
fuse_fill_iomap_hole(struct iomap * iomap,loff_t length)387*4882a593Smuzhiyun static void fuse_fill_iomap_hole(struct iomap *iomap, loff_t length)
388*4882a593Smuzhiyun {
389*4882a593Smuzhiyun 	iomap->addr = IOMAP_NULL_ADDR;
390*4882a593Smuzhiyun 	iomap->length = length;
391*4882a593Smuzhiyun 	iomap->type = IOMAP_HOLE;
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun 
fuse_fill_iomap(struct inode * inode,loff_t pos,loff_t length,struct iomap * iomap,struct fuse_dax_mapping * dmap,unsigned int flags)394*4882a593Smuzhiyun static void fuse_fill_iomap(struct inode *inode, loff_t pos, loff_t length,
395*4882a593Smuzhiyun 			    struct iomap *iomap, struct fuse_dax_mapping *dmap,
396*4882a593Smuzhiyun 			    unsigned int flags)
397*4882a593Smuzhiyun {
398*4882a593Smuzhiyun 	loff_t offset, len;
399*4882a593Smuzhiyun 	loff_t i_size = i_size_read(inode);
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 	offset = pos - (dmap->itn.start << FUSE_DAX_SHIFT);
402*4882a593Smuzhiyun 	len = min(length, dmap->length - offset);
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun 	/* If length is beyond end of file, truncate further */
405*4882a593Smuzhiyun 	if (pos + len > i_size)
406*4882a593Smuzhiyun 		len = i_size - pos;
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 	if (len > 0) {
409*4882a593Smuzhiyun 		iomap->addr = dmap->window_offset + offset;
410*4882a593Smuzhiyun 		iomap->length = len;
411*4882a593Smuzhiyun 		if (flags & IOMAP_FAULT)
412*4882a593Smuzhiyun 			iomap->length = ALIGN(len, PAGE_SIZE);
413*4882a593Smuzhiyun 		iomap->type = IOMAP_MAPPED;
414*4882a593Smuzhiyun 		/*
415*4882a593Smuzhiyun 		 * increace refcnt so that reclaim code knows this dmap is in
416*4882a593Smuzhiyun 		 * use. This assumes fi->dax->sem mutex is held either
417*4882a593Smuzhiyun 		 * shared/exclusive.
418*4882a593Smuzhiyun 		 */
419*4882a593Smuzhiyun 		refcount_inc(&dmap->refcnt);
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun 		/* iomap->private should be NULL */
422*4882a593Smuzhiyun 		WARN_ON_ONCE(iomap->private);
423*4882a593Smuzhiyun 		iomap->private = dmap;
424*4882a593Smuzhiyun 	} else {
425*4882a593Smuzhiyun 		/* Mapping beyond end of file is hole */
426*4882a593Smuzhiyun 		fuse_fill_iomap_hole(iomap, length);
427*4882a593Smuzhiyun 	}
428*4882a593Smuzhiyun }
429*4882a593Smuzhiyun 
fuse_setup_new_dax_mapping(struct inode * inode,loff_t pos,loff_t length,unsigned int flags,struct iomap * iomap)430*4882a593Smuzhiyun static int fuse_setup_new_dax_mapping(struct inode *inode, loff_t pos,
431*4882a593Smuzhiyun 				      loff_t length, unsigned int flags,
432*4882a593Smuzhiyun 				      struct iomap *iomap)
433*4882a593Smuzhiyun {
434*4882a593Smuzhiyun 	struct fuse_inode *fi = get_fuse_inode(inode);
435*4882a593Smuzhiyun 	struct fuse_conn *fc = get_fuse_conn(inode);
436*4882a593Smuzhiyun 	struct fuse_conn_dax *fcd = fc->dax;
437*4882a593Smuzhiyun 	struct fuse_dax_mapping *dmap, *alloc_dmap = NULL;
438*4882a593Smuzhiyun 	int ret;
439*4882a593Smuzhiyun 	bool writable = flags & IOMAP_WRITE;
440*4882a593Smuzhiyun 	unsigned long start_idx = pos >> FUSE_DAX_SHIFT;
441*4882a593Smuzhiyun 	struct interval_tree_node *node;
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun 	/*
444*4882a593Smuzhiyun 	 * Can't do inline reclaim in fault path. We call
445*4882a593Smuzhiyun 	 * dax_layout_busy_page() before we free a range. And
446*4882a593Smuzhiyun 	 * fuse_wait_dax_page() drops fi->i_mmap_sem lock and requires it.
447*4882a593Smuzhiyun 	 * In fault path we enter with fi->i_mmap_sem held and can't drop
448*4882a593Smuzhiyun 	 * it. Also in fault path we hold fi->i_mmap_sem shared and not
449*4882a593Smuzhiyun 	 * exclusive, so that creates further issues with fuse_wait_dax_page().
450*4882a593Smuzhiyun 	 * Hence return -EAGAIN and fuse_dax_fault() will wait for a memory
451*4882a593Smuzhiyun 	 * range to become free and retry.
452*4882a593Smuzhiyun 	 */
453*4882a593Smuzhiyun 	if (flags & IOMAP_FAULT) {
454*4882a593Smuzhiyun 		alloc_dmap = alloc_dax_mapping(fcd);
455*4882a593Smuzhiyun 		if (!alloc_dmap)
456*4882a593Smuzhiyun 			return -EAGAIN;
457*4882a593Smuzhiyun 	} else {
458*4882a593Smuzhiyun 		alloc_dmap = alloc_dax_mapping_reclaim(fcd, inode);
459*4882a593Smuzhiyun 		if (IS_ERR(alloc_dmap))
460*4882a593Smuzhiyun 			return PTR_ERR(alloc_dmap);
461*4882a593Smuzhiyun 	}
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun 	/* If we are here, we should have memory allocated */
464*4882a593Smuzhiyun 	if (WARN_ON(!alloc_dmap))
465*4882a593Smuzhiyun 		return -EIO;
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun 	/*
468*4882a593Smuzhiyun 	 * Take write lock so that only one caller can try to setup mapping
469*4882a593Smuzhiyun 	 * and other waits.
470*4882a593Smuzhiyun 	 */
471*4882a593Smuzhiyun 	down_write(&fi->dax->sem);
472*4882a593Smuzhiyun 	/*
473*4882a593Smuzhiyun 	 * We dropped lock. Check again if somebody else setup
474*4882a593Smuzhiyun 	 * mapping already.
475*4882a593Smuzhiyun 	 */
476*4882a593Smuzhiyun 	node = interval_tree_iter_first(&fi->dax->tree, start_idx, start_idx);
477*4882a593Smuzhiyun 	if (node) {
478*4882a593Smuzhiyun 		dmap = node_to_dmap(node);
479*4882a593Smuzhiyun 		fuse_fill_iomap(inode, pos, length, iomap, dmap, flags);
480*4882a593Smuzhiyun 		dmap_add_to_free_pool(fcd, alloc_dmap);
481*4882a593Smuzhiyun 		up_write(&fi->dax->sem);
482*4882a593Smuzhiyun 		return 0;
483*4882a593Smuzhiyun 	}
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun 	/* Setup one mapping */
486*4882a593Smuzhiyun 	ret = fuse_setup_one_mapping(inode, pos >> FUSE_DAX_SHIFT, alloc_dmap,
487*4882a593Smuzhiyun 				     writable, false);
488*4882a593Smuzhiyun 	if (ret < 0) {
489*4882a593Smuzhiyun 		dmap_add_to_free_pool(fcd, alloc_dmap);
490*4882a593Smuzhiyun 		up_write(&fi->dax->sem);
491*4882a593Smuzhiyun 		return ret;
492*4882a593Smuzhiyun 	}
493*4882a593Smuzhiyun 	fuse_fill_iomap(inode, pos, length, iomap, alloc_dmap, flags);
494*4882a593Smuzhiyun 	up_write(&fi->dax->sem);
495*4882a593Smuzhiyun 	return 0;
496*4882a593Smuzhiyun }
497*4882a593Smuzhiyun 
fuse_upgrade_dax_mapping(struct inode * inode,loff_t pos,loff_t length,unsigned int flags,struct iomap * iomap)498*4882a593Smuzhiyun static int fuse_upgrade_dax_mapping(struct inode *inode, loff_t pos,
499*4882a593Smuzhiyun 				    loff_t length, unsigned int flags,
500*4882a593Smuzhiyun 				    struct iomap *iomap)
501*4882a593Smuzhiyun {
502*4882a593Smuzhiyun 	struct fuse_inode *fi = get_fuse_inode(inode);
503*4882a593Smuzhiyun 	struct fuse_dax_mapping *dmap;
504*4882a593Smuzhiyun 	int ret;
505*4882a593Smuzhiyun 	unsigned long idx = pos >> FUSE_DAX_SHIFT;
506*4882a593Smuzhiyun 	struct interval_tree_node *node;
507*4882a593Smuzhiyun 
508*4882a593Smuzhiyun 	/*
509*4882a593Smuzhiyun 	 * Take exclusive lock so that only one caller can try to setup
510*4882a593Smuzhiyun 	 * mapping and others wait.
511*4882a593Smuzhiyun 	 */
512*4882a593Smuzhiyun 	down_write(&fi->dax->sem);
513*4882a593Smuzhiyun 	node = interval_tree_iter_first(&fi->dax->tree, idx, idx);
514*4882a593Smuzhiyun 
515*4882a593Smuzhiyun 	/* We are holding either inode lock or i_mmap_sem, and that should
516*4882a593Smuzhiyun 	 * ensure that dmap can't be truncated. We are holding a reference
517*4882a593Smuzhiyun 	 * on dmap and that should make sure it can't be reclaimed. So dmap
518*4882a593Smuzhiyun 	 * should still be there in tree despite the fact we dropped and
519*4882a593Smuzhiyun 	 * re-acquired the fi->dax->sem lock.
520*4882a593Smuzhiyun 	 */
521*4882a593Smuzhiyun 	ret = -EIO;
522*4882a593Smuzhiyun 	if (WARN_ON(!node))
523*4882a593Smuzhiyun 		goto out_err;
524*4882a593Smuzhiyun 
525*4882a593Smuzhiyun 	dmap = node_to_dmap(node);
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun 	/* We took an extra reference on dmap to make sure its not reclaimd.
528*4882a593Smuzhiyun 	 * Now we hold fi->dax->sem lock and that reference is not needed
529*4882a593Smuzhiyun 	 * anymore. Drop it.
530*4882a593Smuzhiyun 	 */
531*4882a593Smuzhiyun 	if (refcount_dec_and_test(&dmap->refcnt)) {
532*4882a593Smuzhiyun 		/* refcount should not hit 0. This object only goes
533*4882a593Smuzhiyun 		 * away when fuse connection goes away
534*4882a593Smuzhiyun 		 */
535*4882a593Smuzhiyun 		WARN_ON_ONCE(1);
536*4882a593Smuzhiyun 	}
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun 	/* Maybe another thread already upgraded mapping while we were not
539*4882a593Smuzhiyun 	 * holding lock.
540*4882a593Smuzhiyun 	 */
541*4882a593Smuzhiyun 	if (dmap->writable) {
542*4882a593Smuzhiyun 		ret = 0;
543*4882a593Smuzhiyun 		goto out_fill_iomap;
544*4882a593Smuzhiyun 	}
545*4882a593Smuzhiyun 
546*4882a593Smuzhiyun 	ret = fuse_setup_one_mapping(inode, pos >> FUSE_DAX_SHIFT, dmap, true,
547*4882a593Smuzhiyun 				     true);
548*4882a593Smuzhiyun 	if (ret < 0)
549*4882a593Smuzhiyun 		goto out_err;
550*4882a593Smuzhiyun out_fill_iomap:
551*4882a593Smuzhiyun 	fuse_fill_iomap(inode, pos, length, iomap, dmap, flags);
552*4882a593Smuzhiyun out_err:
553*4882a593Smuzhiyun 	up_write(&fi->dax->sem);
554*4882a593Smuzhiyun 	return ret;
555*4882a593Smuzhiyun }
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun /* This is just for DAX and the mapping is ephemeral, do not use it for other
558*4882a593Smuzhiyun  * purposes since there is no block device with a permanent mapping.
559*4882a593Smuzhiyun  */
fuse_iomap_begin(struct inode * inode,loff_t pos,loff_t length,unsigned int flags,struct iomap * iomap,struct iomap * srcmap)560*4882a593Smuzhiyun static int fuse_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
561*4882a593Smuzhiyun 			    unsigned int flags, struct iomap *iomap,
562*4882a593Smuzhiyun 			    struct iomap *srcmap)
563*4882a593Smuzhiyun {
564*4882a593Smuzhiyun 	struct fuse_inode *fi = get_fuse_inode(inode);
565*4882a593Smuzhiyun 	struct fuse_conn *fc = get_fuse_conn(inode);
566*4882a593Smuzhiyun 	struct fuse_dax_mapping *dmap;
567*4882a593Smuzhiyun 	bool writable = flags & IOMAP_WRITE;
568*4882a593Smuzhiyun 	unsigned long start_idx = pos >> FUSE_DAX_SHIFT;
569*4882a593Smuzhiyun 	struct interval_tree_node *node;
570*4882a593Smuzhiyun 
571*4882a593Smuzhiyun 	/* We don't support FIEMAP */
572*4882a593Smuzhiyun 	if (WARN_ON(flags & IOMAP_REPORT))
573*4882a593Smuzhiyun 		return -EIO;
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun 	iomap->offset = pos;
576*4882a593Smuzhiyun 	iomap->flags = 0;
577*4882a593Smuzhiyun 	iomap->bdev = NULL;
578*4882a593Smuzhiyun 	iomap->dax_dev = fc->dax->dev;
579*4882a593Smuzhiyun 
580*4882a593Smuzhiyun 	/*
581*4882a593Smuzhiyun 	 * Both read/write and mmap path can race here. So we need something
582*4882a593Smuzhiyun 	 * to make sure if we are setting up mapping, then other path waits
583*4882a593Smuzhiyun 	 *
584*4882a593Smuzhiyun 	 * For now, use a semaphore for this. It probably needs to be
585*4882a593Smuzhiyun 	 * optimized later.
586*4882a593Smuzhiyun 	 */
587*4882a593Smuzhiyun 	down_read(&fi->dax->sem);
588*4882a593Smuzhiyun 	node = interval_tree_iter_first(&fi->dax->tree, start_idx, start_idx);
589*4882a593Smuzhiyun 	if (node) {
590*4882a593Smuzhiyun 		dmap = node_to_dmap(node);
591*4882a593Smuzhiyun 		if (writable && !dmap->writable) {
592*4882a593Smuzhiyun 			/* Upgrade read-only mapping to read-write. This will
593*4882a593Smuzhiyun 			 * require exclusive fi->dax->sem lock as we don't want
594*4882a593Smuzhiyun 			 * two threads to be trying to this simultaneously
595*4882a593Smuzhiyun 			 * for same dmap. So drop shared lock and acquire
596*4882a593Smuzhiyun 			 * exclusive lock.
597*4882a593Smuzhiyun 			 *
598*4882a593Smuzhiyun 			 * Before dropping fi->dax->sem lock, take reference
599*4882a593Smuzhiyun 			 * on dmap so that its not freed by range reclaim.
600*4882a593Smuzhiyun 			 */
601*4882a593Smuzhiyun 			refcount_inc(&dmap->refcnt);
602*4882a593Smuzhiyun 			up_read(&fi->dax->sem);
603*4882a593Smuzhiyun 			pr_debug("%s: Upgrading mapping at offset 0x%llx length 0x%llx\n",
604*4882a593Smuzhiyun 				 __func__, pos, length);
605*4882a593Smuzhiyun 			return fuse_upgrade_dax_mapping(inode, pos, length,
606*4882a593Smuzhiyun 							flags, iomap);
607*4882a593Smuzhiyun 		} else {
608*4882a593Smuzhiyun 			fuse_fill_iomap(inode, pos, length, iomap, dmap, flags);
609*4882a593Smuzhiyun 			up_read(&fi->dax->sem);
610*4882a593Smuzhiyun 			return 0;
611*4882a593Smuzhiyun 		}
612*4882a593Smuzhiyun 	} else {
613*4882a593Smuzhiyun 		up_read(&fi->dax->sem);
614*4882a593Smuzhiyun 		pr_debug("%s: no mapping at offset 0x%llx length 0x%llx\n",
615*4882a593Smuzhiyun 				__func__, pos, length);
616*4882a593Smuzhiyun 		if (pos >= i_size_read(inode))
617*4882a593Smuzhiyun 			goto iomap_hole;
618*4882a593Smuzhiyun 
619*4882a593Smuzhiyun 		return fuse_setup_new_dax_mapping(inode, pos, length, flags,
620*4882a593Smuzhiyun 						  iomap);
621*4882a593Smuzhiyun 	}
622*4882a593Smuzhiyun 
623*4882a593Smuzhiyun 	/*
624*4882a593Smuzhiyun 	 * If read beyond end of file happnes, fs code seems to return
625*4882a593Smuzhiyun 	 * it as hole
626*4882a593Smuzhiyun 	 */
627*4882a593Smuzhiyun iomap_hole:
628*4882a593Smuzhiyun 	fuse_fill_iomap_hole(iomap, length);
629*4882a593Smuzhiyun 	pr_debug("%s returning hole mapping. pos=0x%llx length_asked=0x%llx length_returned=0x%llx\n",
630*4882a593Smuzhiyun 		 __func__, pos, length, iomap->length);
631*4882a593Smuzhiyun 	return 0;
632*4882a593Smuzhiyun }
633*4882a593Smuzhiyun 
fuse_iomap_end(struct inode * inode,loff_t pos,loff_t length,ssize_t written,unsigned int flags,struct iomap * iomap)634*4882a593Smuzhiyun static int fuse_iomap_end(struct inode *inode, loff_t pos, loff_t length,
635*4882a593Smuzhiyun 			  ssize_t written, unsigned int flags,
636*4882a593Smuzhiyun 			  struct iomap *iomap)
637*4882a593Smuzhiyun {
638*4882a593Smuzhiyun 	struct fuse_dax_mapping *dmap = iomap->private;
639*4882a593Smuzhiyun 
640*4882a593Smuzhiyun 	if (dmap) {
641*4882a593Smuzhiyun 		if (refcount_dec_and_test(&dmap->refcnt)) {
642*4882a593Smuzhiyun 			/* refcount should not hit 0. This object only goes
643*4882a593Smuzhiyun 			 * away when fuse connection goes away
644*4882a593Smuzhiyun 			 */
645*4882a593Smuzhiyun 			WARN_ON_ONCE(1);
646*4882a593Smuzhiyun 		}
647*4882a593Smuzhiyun 	}
648*4882a593Smuzhiyun 
649*4882a593Smuzhiyun 	/* DAX writes beyond end-of-file aren't handled using iomap, so the
650*4882a593Smuzhiyun 	 * file size is unchanged and there is nothing to do here.
651*4882a593Smuzhiyun 	 */
652*4882a593Smuzhiyun 	return 0;
653*4882a593Smuzhiyun }
654*4882a593Smuzhiyun 
655*4882a593Smuzhiyun static const struct iomap_ops fuse_iomap_ops = {
656*4882a593Smuzhiyun 	.iomap_begin = fuse_iomap_begin,
657*4882a593Smuzhiyun 	.iomap_end = fuse_iomap_end,
658*4882a593Smuzhiyun };
659*4882a593Smuzhiyun 
fuse_wait_dax_page(struct inode * inode)660*4882a593Smuzhiyun static void fuse_wait_dax_page(struct inode *inode)
661*4882a593Smuzhiyun {
662*4882a593Smuzhiyun 	struct fuse_inode *fi = get_fuse_inode(inode);
663*4882a593Smuzhiyun 
664*4882a593Smuzhiyun 	up_write(&fi->i_mmap_sem);
665*4882a593Smuzhiyun 	schedule();
666*4882a593Smuzhiyun 	down_write(&fi->i_mmap_sem);
667*4882a593Smuzhiyun }
668*4882a593Smuzhiyun 
669*4882a593Smuzhiyun /* Should be called with fi->i_mmap_sem lock held exclusively */
__fuse_dax_break_layouts(struct inode * inode,bool * retry,loff_t start,loff_t end)670*4882a593Smuzhiyun static int __fuse_dax_break_layouts(struct inode *inode, bool *retry,
671*4882a593Smuzhiyun 				    loff_t start, loff_t end)
672*4882a593Smuzhiyun {
673*4882a593Smuzhiyun 	struct page *page;
674*4882a593Smuzhiyun 
675*4882a593Smuzhiyun 	page = dax_layout_busy_page_range(inode->i_mapping, start, end);
676*4882a593Smuzhiyun 	if (!page)
677*4882a593Smuzhiyun 		return 0;
678*4882a593Smuzhiyun 
679*4882a593Smuzhiyun 	*retry = true;
680*4882a593Smuzhiyun 	return ___wait_var_event(&page->_refcount,
681*4882a593Smuzhiyun 			atomic_read(&page->_refcount) == 1, TASK_INTERRUPTIBLE,
682*4882a593Smuzhiyun 			0, 0, fuse_wait_dax_page(inode));
683*4882a593Smuzhiyun }
684*4882a593Smuzhiyun 
685*4882a593Smuzhiyun /* dmap_end == 0 leads to unmapping of whole file */
fuse_dax_break_layouts(struct inode * inode,u64 dmap_start,u64 dmap_end)686*4882a593Smuzhiyun int fuse_dax_break_layouts(struct inode *inode, u64 dmap_start,
687*4882a593Smuzhiyun 				  u64 dmap_end)
688*4882a593Smuzhiyun {
689*4882a593Smuzhiyun 	bool	retry;
690*4882a593Smuzhiyun 	int	ret;
691*4882a593Smuzhiyun 
692*4882a593Smuzhiyun 	do {
693*4882a593Smuzhiyun 		retry = false;
694*4882a593Smuzhiyun 		ret = __fuse_dax_break_layouts(inode, &retry, dmap_start,
695*4882a593Smuzhiyun 					       dmap_end);
696*4882a593Smuzhiyun 	} while (ret == 0 && retry);
697*4882a593Smuzhiyun 
698*4882a593Smuzhiyun 	return ret;
699*4882a593Smuzhiyun }
700*4882a593Smuzhiyun 
fuse_dax_read_iter(struct kiocb * iocb,struct iov_iter * to)701*4882a593Smuzhiyun ssize_t fuse_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
702*4882a593Smuzhiyun {
703*4882a593Smuzhiyun 	struct inode *inode = file_inode(iocb->ki_filp);
704*4882a593Smuzhiyun 	ssize_t ret;
705*4882a593Smuzhiyun 
706*4882a593Smuzhiyun 	if (iocb->ki_flags & IOCB_NOWAIT) {
707*4882a593Smuzhiyun 		if (!inode_trylock_shared(inode))
708*4882a593Smuzhiyun 			return -EAGAIN;
709*4882a593Smuzhiyun 	} else {
710*4882a593Smuzhiyun 		inode_lock_shared(inode);
711*4882a593Smuzhiyun 	}
712*4882a593Smuzhiyun 
713*4882a593Smuzhiyun 	ret = dax_iomap_rw(iocb, to, &fuse_iomap_ops);
714*4882a593Smuzhiyun 	inode_unlock_shared(inode);
715*4882a593Smuzhiyun 
716*4882a593Smuzhiyun 	/* TODO file_accessed(iocb->f_filp) */
717*4882a593Smuzhiyun 	return ret;
718*4882a593Smuzhiyun }
719*4882a593Smuzhiyun 
file_extending_write(struct kiocb * iocb,struct iov_iter * from)720*4882a593Smuzhiyun static bool file_extending_write(struct kiocb *iocb, struct iov_iter *from)
721*4882a593Smuzhiyun {
722*4882a593Smuzhiyun 	struct inode *inode = file_inode(iocb->ki_filp);
723*4882a593Smuzhiyun 
724*4882a593Smuzhiyun 	return (iov_iter_rw(from) == WRITE &&
725*4882a593Smuzhiyun 		((iocb->ki_pos) >= i_size_read(inode) ||
726*4882a593Smuzhiyun 		  (iocb->ki_pos + iov_iter_count(from) > i_size_read(inode))));
727*4882a593Smuzhiyun }
728*4882a593Smuzhiyun 
fuse_dax_direct_write(struct kiocb * iocb,struct iov_iter * from)729*4882a593Smuzhiyun static ssize_t fuse_dax_direct_write(struct kiocb *iocb, struct iov_iter *from)
730*4882a593Smuzhiyun {
731*4882a593Smuzhiyun 	struct inode *inode = file_inode(iocb->ki_filp);
732*4882a593Smuzhiyun 	struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb);
733*4882a593Smuzhiyun 	ssize_t ret;
734*4882a593Smuzhiyun 
735*4882a593Smuzhiyun 	ret = fuse_direct_io(&io, from, &iocb->ki_pos, FUSE_DIO_WRITE);
736*4882a593Smuzhiyun 	if (ret < 0)
737*4882a593Smuzhiyun 		return ret;
738*4882a593Smuzhiyun 
739*4882a593Smuzhiyun 	fuse_invalidate_attr(inode);
740*4882a593Smuzhiyun 	fuse_write_update_size(inode, iocb->ki_pos);
741*4882a593Smuzhiyun 	return ret;
742*4882a593Smuzhiyun }
743*4882a593Smuzhiyun 
fuse_dax_write_iter(struct kiocb * iocb,struct iov_iter * from)744*4882a593Smuzhiyun ssize_t fuse_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
745*4882a593Smuzhiyun {
746*4882a593Smuzhiyun 	struct inode *inode = file_inode(iocb->ki_filp);
747*4882a593Smuzhiyun 	ssize_t ret;
748*4882a593Smuzhiyun 
749*4882a593Smuzhiyun 	if (iocb->ki_flags & IOCB_NOWAIT) {
750*4882a593Smuzhiyun 		if (!inode_trylock(inode))
751*4882a593Smuzhiyun 			return -EAGAIN;
752*4882a593Smuzhiyun 	} else {
753*4882a593Smuzhiyun 		inode_lock(inode);
754*4882a593Smuzhiyun 	}
755*4882a593Smuzhiyun 
756*4882a593Smuzhiyun 	ret = generic_write_checks(iocb, from);
757*4882a593Smuzhiyun 	if (ret <= 0)
758*4882a593Smuzhiyun 		goto out;
759*4882a593Smuzhiyun 
760*4882a593Smuzhiyun 	ret = file_remove_privs(iocb->ki_filp);
761*4882a593Smuzhiyun 	if (ret)
762*4882a593Smuzhiyun 		goto out;
763*4882a593Smuzhiyun 	/* TODO file_update_time() but we don't want metadata I/O */
764*4882a593Smuzhiyun 
765*4882a593Smuzhiyun 	/* Do not use dax for file extending writes as write and on
766*4882a593Smuzhiyun 	 * disk i_size increase are not atomic otherwise.
767*4882a593Smuzhiyun 	 */
768*4882a593Smuzhiyun 	if (file_extending_write(iocb, from))
769*4882a593Smuzhiyun 		ret = fuse_dax_direct_write(iocb, from);
770*4882a593Smuzhiyun 	else
771*4882a593Smuzhiyun 		ret = dax_iomap_rw(iocb, from, &fuse_iomap_ops);
772*4882a593Smuzhiyun 
773*4882a593Smuzhiyun out:
774*4882a593Smuzhiyun 	inode_unlock(inode);
775*4882a593Smuzhiyun 
776*4882a593Smuzhiyun 	if (ret > 0)
777*4882a593Smuzhiyun 		ret = generic_write_sync(iocb, ret);
778*4882a593Smuzhiyun 	return ret;
779*4882a593Smuzhiyun }
780*4882a593Smuzhiyun 
fuse_dax_writepages(struct address_space * mapping,struct writeback_control * wbc)781*4882a593Smuzhiyun static int fuse_dax_writepages(struct address_space *mapping,
782*4882a593Smuzhiyun 			       struct writeback_control *wbc)
783*4882a593Smuzhiyun {
784*4882a593Smuzhiyun 
785*4882a593Smuzhiyun 	struct inode *inode = mapping->host;
786*4882a593Smuzhiyun 	struct fuse_conn *fc = get_fuse_conn(inode);
787*4882a593Smuzhiyun 
788*4882a593Smuzhiyun 	return dax_writeback_mapping_range(mapping, fc->dax->dev, wbc);
789*4882a593Smuzhiyun }
790*4882a593Smuzhiyun 
__fuse_dax_fault(struct vm_fault * vmf,enum page_entry_size pe_size,bool write)791*4882a593Smuzhiyun static vm_fault_t __fuse_dax_fault(struct vm_fault *vmf,
792*4882a593Smuzhiyun 				   enum page_entry_size pe_size, bool write)
793*4882a593Smuzhiyun {
794*4882a593Smuzhiyun 	vm_fault_t ret;
795*4882a593Smuzhiyun 	struct inode *inode = file_inode(vmf->vma->vm_file);
796*4882a593Smuzhiyun 	struct super_block *sb = inode->i_sb;
797*4882a593Smuzhiyun 	pfn_t pfn;
798*4882a593Smuzhiyun 	int error = 0;
799*4882a593Smuzhiyun 	struct fuse_conn *fc = get_fuse_conn(inode);
800*4882a593Smuzhiyun 	struct fuse_conn_dax *fcd = fc->dax;
801*4882a593Smuzhiyun 	bool retry = false;
802*4882a593Smuzhiyun 
803*4882a593Smuzhiyun 	if (write)
804*4882a593Smuzhiyun 		sb_start_pagefault(sb);
805*4882a593Smuzhiyun retry:
806*4882a593Smuzhiyun 	if (retry && !(fcd->nr_free_ranges > 0))
807*4882a593Smuzhiyun 		wait_event(fcd->range_waitq, (fcd->nr_free_ranges > 0));
808*4882a593Smuzhiyun 
809*4882a593Smuzhiyun 	/*
810*4882a593Smuzhiyun 	 * We need to serialize against not only truncate but also against
811*4882a593Smuzhiyun 	 * fuse dax memory range reclaim. While a range is being reclaimed,
812*4882a593Smuzhiyun 	 * we do not want any read/write/mmap to make progress and try
813*4882a593Smuzhiyun 	 * to populate page cache or access memory we are trying to free.
814*4882a593Smuzhiyun 	 */
815*4882a593Smuzhiyun 	down_read(&get_fuse_inode(inode)->i_mmap_sem);
816*4882a593Smuzhiyun 	ret = dax_iomap_fault(vmf, pe_size, &pfn, &error, &fuse_iomap_ops);
817*4882a593Smuzhiyun 	if ((ret & VM_FAULT_ERROR) && error == -EAGAIN) {
818*4882a593Smuzhiyun 		error = 0;
819*4882a593Smuzhiyun 		retry = true;
820*4882a593Smuzhiyun 		up_read(&get_fuse_inode(inode)->i_mmap_sem);
821*4882a593Smuzhiyun 		goto retry;
822*4882a593Smuzhiyun 	}
823*4882a593Smuzhiyun 
824*4882a593Smuzhiyun 	if (ret & VM_FAULT_NEEDDSYNC)
825*4882a593Smuzhiyun 		ret = dax_finish_sync_fault(vmf, pe_size, pfn);
826*4882a593Smuzhiyun 	up_read(&get_fuse_inode(inode)->i_mmap_sem);
827*4882a593Smuzhiyun 
828*4882a593Smuzhiyun 	if (write)
829*4882a593Smuzhiyun 		sb_end_pagefault(sb);
830*4882a593Smuzhiyun 
831*4882a593Smuzhiyun 	return ret;
832*4882a593Smuzhiyun }
833*4882a593Smuzhiyun 
fuse_dax_fault(struct vm_fault * vmf)834*4882a593Smuzhiyun static vm_fault_t fuse_dax_fault(struct vm_fault *vmf)
835*4882a593Smuzhiyun {
836*4882a593Smuzhiyun 	return __fuse_dax_fault(vmf, PE_SIZE_PTE,
837*4882a593Smuzhiyun 				vmf->flags & FAULT_FLAG_WRITE);
838*4882a593Smuzhiyun }
839*4882a593Smuzhiyun 
fuse_dax_huge_fault(struct vm_fault * vmf,enum page_entry_size pe_size)840*4882a593Smuzhiyun static vm_fault_t fuse_dax_huge_fault(struct vm_fault *vmf,
841*4882a593Smuzhiyun 			       enum page_entry_size pe_size)
842*4882a593Smuzhiyun {
843*4882a593Smuzhiyun 	return __fuse_dax_fault(vmf, pe_size, vmf->flags & FAULT_FLAG_WRITE);
844*4882a593Smuzhiyun }
845*4882a593Smuzhiyun 
fuse_dax_page_mkwrite(struct vm_fault * vmf)846*4882a593Smuzhiyun static vm_fault_t fuse_dax_page_mkwrite(struct vm_fault *vmf)
847*4882a593Smuzhiyun {
848*4882a593Smuzhiyun 	return __fuse_dax_fault(vmf, PE_SIZE_PTE, true);
849*4882a593Smuzhiyun }
850*4882a593Smuzhiyun 
fuse_dax_pfn_mkwrite(struct vm_fault * vmf)851*4882a593Smuzhiyun static vm_fault_t fuse_dax_pfn_mkwrite(struct vm_fault *vmf)
852*4882a593Smuzhiyun {
853*4882a593Smuzhiyun 	return __fuse_dax_fault(vmf, PE_SIZE_PTE, true);
854*4882a593Smuzhiyun }
855*4882a593Smuzhiyun 
856*4882a593Smuzhiyun static const struct vm_operations_struct fuse_dax_vm_ops = {
857*4882a593Smuzhiyun 	.fault		= fuse_dax_fault,
858*4882a593Smuzhiyun 	.huge_fault	= fuse_dax_huge_fault,
859*4882a593Smuzhiyun 	.page_mkwrite	= fuse_dax_page_mkwrite,
860*4882a593Smuzhiyun 	.pfn_mkwrite	= fuse_dax_pfn_mkwrite,
861*4882a593Smuzhiyun };
862*4882a593Smuzhiyun 
fuse_dax_mmap(struct file * file,struct vm_area_struct * vma)863*4882a593Smuzhiyun int fuse_dax_mmap(struct file *file, struct vm_area_struct *vma)
864*4882a593Smuzhiyun {
865*4882a593Smuzhiyun 	file_accessed(file);
866*4882a593Smuzhiyun 	vma->vm_ops = &fuse_dax_vm_ops;
867*4882a593Smuzhiyun 	vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
868*4882a593Smuzhiyun 	return 0;
869*4882a593Smuzhiyun }
870*4882a593Smuzhiyun 
dmap_writeback_invalidate(struct inode * inode,struct fuse_dax_mapping * dmap)871*4882a593Smuzhiyun static int dmap_writeback_invalidate(struct inode *inode,
872*4882a593Smuzhiyun 				     struct fuse_dax_mapping *dmap)
873*4882a593Smuzhiyun {
874*4882a593Smuzhiyun 	int ret;
875*4882a593Smuzhiyun 	loff_t start_pos = dmap->itn.start << FUSE_DAX_SHIFT;
876*4882a593Smuzhiyun 	loff_t end_pos = (start_pos + FUSE_DAX_SZ - 1);
877*4882a593Smuzhiyun 
878*4882a593Smuzhiyun 	ret = filemap_fdatawrite_range(inode->i_mapping, start_pos, end_pos);
879*4882a593Smuzhiyun 	if (ret) {
880*4882a593Smuzhiyun 		pr_debug("fuse: filemap_fdatawrite_range() failed. err=%d start_pos=0x%llx, end_pos=0x%llx\n",
881*4882a593Smuzhiyun 			 ret, start_pos, end_pos);
882*4882a593Smuzhiyun 		return ret;
883*4882a593Smuzhiyun 	}
884*4882a593Smuzhiyun 
885*4882a593Smuzhiyun 	ret = invalidate_inode_pages2_range(inode->i_mapping,
886*4882a593Smuzhiyun 					    start_pos >> PAGE_SHIFT,
887*4882a593Smuzhiyun 					    end_pos >> PAGE_SHIFT);
888*4882a593Smuzhiyun 	if (ret)
889*4882a593Smuzhiyun 		pr_debug("fuse: invalidate_inode_pages2_range() failed err=%d\n",
890*4882a593Smuzhiyun 			 ret);
891*4882a593Smuzhiyun 
892*4882a593Smuzhiyun 	return ret;
893*4882a593Smuzhiyun }
894*4882a593Smuzhiyun 
reclaim_one_dmap_locked(struct inode * inode,struct fuse_dax_mapping * dmap)895*4882a593Smuzhiyun static int reclaim_one_dmap_locked(struct inode *inode,
896*4882a593Smuzhiyun 				   struct fuse_dax_mapping *dmap)
897*4882a593Smuzhiyun {
898*4882a593Smuzhiyun 	int ret;
899*4882a593Smuzhiyun 	struct fuse_inode *fi = get_fuse_inode(inode);
900*4882a593Smuzhiyun 
901*4882a593Smuzhiyun 	/*
902*4882a593Smuzhiyun 	 * igrab() was done to make sure inode won't go under us, and this
903*4882a593Smuzhiyun 	 * further avoids the race with evict().
904*4882a593Smuzhiyun 	 */
905*4882a593Smuzhiyun 	ret = dmap_writeback_invalidate(inode, dmap);
906*4882a593Smuzhiyun 	if (ret)
907*4882a593Smuzhiyun 		return ret;
908*4882a593Smuzhiyun 
909*4882a593Smuzhiyun 	/* Remove dax mapping from inode interval tree now */
910*4882a593Smuzhiyun 	interval_tree_remove(&dmap->itn, &fi->dax->tree);
911*4882a593Smuzhiyun 	fi->dax->nr--;
912*4882a593Smuzhiyun 
913*4882a593Smuzhiyun 	/* It is possible that umount/shutdown has killed the fuse connection
914*4882a593Smuzhiyun 	 * and worker thread is trying to reclaim memory in parallel.  Don't
915*4882a593Smuzhiyun 	 * warn in that case.
916*4882a593Smuzhiyun 	 */
917*4882a593Smuzhiyun 	ret = dmap_removemapping_one(inode, dmap);
918*4882a593Smuzhiyun 	if (ret && ret != -ENOTCONN) {
919*4882a593Smuzhiyun 		pr_warn("Failed to remove mapping. offset=0x%llx len=0x%llx ret=%d\n",
920*4882a593Smuzhiyun 			dmap->window_offset, dmap->length, ret);
921*4882a593Smuzhiyun 	}
922*4882a593Smuzhiyun 	return 0;
923*4882a593Smuzhiyun }
924*4882a593Smuzhiyun 
925*4882a593Smuzhiyun /* Find first mapped dmap for an inode and return file offset. Caller needs
926*4882a593Smuzhiyun  * to hold fi->dax->sem lock either shared or exclusive.
927*4882a593Smuzhiyun  */
inode_lookup_first_dmap(struct inode * inode)928*4882a593Smuzhiyun static struct fuse_dax_mapping *inode_lookup_first_dmap(struct inode *inode)
929*4882a593Smuzhiyun {
930*4882a593Smuzhiyun 	struct fuse_inode *fi = get_fuse_inode(inode);
931*4882a593Smuzhiyun 	struct fuse_dax_mapping *dmap;
932*4882a593Smuzhiyun 	struct interval_tree_node *node;
933*4882a593Smuzhiyun 
934*4882a593Smuzhiyun 	for (node = interval_tree_iter_first(&fi->dax->tree, 0, -1); node;
935*4882a593Smuzhiyun 	     node = interval_tree_iter_next(node, 0, -1)) {
936*4882a593Smuzhiyun 		dmap = node_to_dmap(node);
937*4882a593Smuzhiyun 		/* still in use. */
938*4882a593Smuzhiyun 		if (refcount_read(&dmap->refcnt) > 1)
939*4882a593Smuzhiyun 			continue;
940*4882a593Smuzhiyun 
941*4882a593Smuzhiyun 		return dmap;
942*4882a593Smuzhiyun 	}
943*4882a593Smuzhiyun 
944*4882a593Smuzhiyun 	return NULL;
945*4882a593Smuzhiyun }
946*4882a593Smuzhiyun 
947*4882a593Smuzhiyun /*
948*4882a593Smuzhiyun  * Find first mapping in the tree and free it and return it. Do not add
949*4882a593Smuzhiyun  * it back to free pool.
950*4882a593Smuzhiyun  */
951*4882a593Smuzhiyun static struct fuse_dax_mapping *
inode_inline_reclaim_one_dmap(struct fuse_conn_dax * fcd,struct inode * inode,bool * retry)952*4882a593Smuzhiyun inode_inline_reclaim_one_dmap(struct fuse_conn_dax *fcd, struct inode *inode,
953*4882a593Smuzhiyun 			      bool *retry)
954*4882a593Smuzhiyun {
955*4882a593Smuzhiyun 	struct fuse_inode *fi = get_fuse_inode(inode);
956*4882a593Smuzhiyun 	struct fuse_dax_mapping *dmap;
957*4882a593Smuzhiyun 	u64 dmap_start, dmap_end;
958*4882a593Smuzhiyun 	unsigned long start_idx;
959*4882a593Smuzhiyun 	int ret;
960*4882a593Smuzhiyun 	struct interval_tree_node *node;
961*4882a593Smuzhiyun 
962*4882a593Smuzhiyun 	down_write(&fi->i_mmap_sem);
963*4882a593Smuzhiyun 
964*4882a593Smuzhiyun 	/* Lookup a dmap and corresponding file offset to reclaim. */
965*4882a593Smuzhiyun 	down_read(&fi->dax->sem);
966*4882a593Smuzhiyun 	dmap = inode_lookup_first_dmap(inode);
967*4882a593Smuzhiyun 	if (dmap) {
968*4882a593Smuzhiyun 		start_idx = dmap->itn.start;
969*4882a593Smuzhiyun 		dmap_start = start_idx << FUSE_DAX_SHIFT;
970*4882a593Smuzhiyun 		dmap_end = dmap_start + FUSE_DAX_SZ - 1;
971*4882a593Smuzhiyun 	}
972*4882a593Smuzhiyun 	up_read(&fi->dax->sem);
973*4882a593Smuzhiyun 
974*4882a593Smuzhiyun 	if (!dmap)
975*4882a593Smuzhiyun 		goto out_mmap_sem;
976*4882a593Smuzhiyun 	/*
977*4882a593Smuzhiyun 	 * Make sure there are no references to inode pages using
978*4882a593Smuzhiyun 	 * get_user_pages()
979*4882a593Smuzhiyun 	 */
980*4882a593Smuzhiyun 	ret = fuse_dax_break_layouts(inode, dmap_start, dmap_end);
981*4882a593Smuzhiyun 	if (ret) {
982*4882a593Smuzhiyun 		pr_debug("fuse: fuse_dax_break_layouts() failed. err=%d\n",
983*4882a593Smuzhiyun 			 ret);
984*4882a593Smuzhiyun 		dmap = ERR_PTR(ret);
985*4882a593Smuzhiyun 		goto out_mmap_sem;
986*4882a593Smuzhiyun 	}
987*4882a593Smuzhiyun 
988*4882a593Smuzhiyun 	down_write(&fi->dax->sem);
989*4882a593Smuzhiyun 	node = interval_tree_iter_first(&fi->dax->tree, start_idx, start_idx);
990*4882a593Smuzhiyun 	/* Range already got reclaimed by somebody else */
991*4882a593Smuzhiyun 	if (!node) {
992*4882a593Smuzhiyun 		if (retry)
993*4882a593Smuzhiyun 			*retry = true;
994*4882a593Smuzhiyun 		goto out_write_dmap_sem;
995*4882a593Smuzhiyun 	}
996*4882a593Smuzhiyun 
997*4882a593Smuzhiyun 	dmap = node_to_dmap(node);
998*4882a593Smuzhiyun 	/* still in use. */
999*4882a593Smuzhiyun 	if (refcount_read(&dmap->refcnt) > 1) {
1000*4882a593Smuzhiyun 		dmap = NULL;
1001*4882a593Smuzhiyun 		if (retry)
1002*4882a593Smuzhiyun 			*retry = true;
1003*4882a593Smuzhiyun 		goto out_write_dmap_sem;
1004*4882a593Smuzhiyun 	}
1005*4882a593Smuzhiyun 
1006*4882a593Smuzhiyun 	ret = reclaim_one_dmap_locked(inode, dmap);
1007*4882a593Smuzhiyun 	if (ret < 0) {
1008*4882a593Smuzhiyun 		dmap = ERR_PTR(ret);
1009*4882a593Smuzhiyun 		goto out_write_dmap_sem;
1010*4882a593Smuzhiyun 	}
1011*4882a593Smuzhiyun 
1012*4882a593Smuzhiyun 	/* Clean up dmap. Do not add back to free list */
1013*4882a593Smuzhiyun 	dmap_remove_busy_list(fcd, dmap);
1014*4882a593Smuzhiyun 	dmap->inode = NULL;
1015*4882a593Smuzhiyun 	dmap->itn.start = dmap->itn.last = 0;
1016*4882a593Smuzhiyun 
1017*4882a593Smuzhiyun 	pr_debug("fuse: %s: inline reclaimed memory range. inode=%p, window_offset=0x%llx, length=0x%llx\n",
1018*4882a593Smuzhiyun 		 __func__, inode, dmap->window_offset, dmap->length);
1019*4882a593Smuzhiyun 
1020*4882a593Smuzhiyun out_write_dmap_sem:
1021*4882a593Smuzhiyun 	up_write(&fi->dax->sem);
1022*4882a593Smuzhiyun out_mmap_sem:
1023*4882a593Smuzhiyun 	up_write(&fi->i_mmap_sem);
1024*4882a593Smuzhiyun 	return dmap;
1025*4882a593Smuzhiyun }
1026*4882a593Smuzhiyun 
1027*4882a593Smuzhiyun static struct fuse_dax_mapping *
alloc_dax_mapping_reclaim(struct fuse_conn_dax * fcd,struct inode * inode)1028*4882a593Smuzhiyun alloc_dax_mapping_reclaim(struct fuse_conn_dax *fcd, struct inode *inode)
1029*4882a593Smuzhiyun {
1030*4882a593Smuzhiyun 	struct fuse_dax_mapping *dmap;
1031*4882a593Smuzhiyun 	struct fuse_inode *fi = get_fuse_inode(inode);
1032*4882a593Smuzhiyun 
1033*4882a593Smuzhiyun 	while (1) {
1034*4882a593Smuzhiyun 		bool retry = false;
1035*4882a593Smuzhiyun 
1036*4882a593Smuzhiyun 		dmap = alloc_dax_mapping(fcd);
1037*4882a593Smuzhiyun 		if (dmap)
1038*4882a593Smuzhiyun 			return dmap;
1039*4882a593Smuzhiyun 
1040*4882a593Smuzhiyun 		dmap = inode_inline_reclaim_one_dmap(fcd, inode, &retry);
1041*4882a593Smuzhiyun 		/*
1042*4882a593Smuzhiyun 		 * Either we got a mapping or it is an error, return in both
1043*4882a593Smuzhiyun 		 * the cases.
1044*4882a593Smuzhiyun 		 */
1045*4882a593Smuzhiyun 		if (dmap)
1046*4882a593Smuzhiyun 			return dmap;
1047*4882a593Smuzhiyun 
1048*4882a593Smuzhiyun 		/* If we could not reclaim a mapping because it
1049*4882a593Smuzhiyun 		 * had a reference or some other temporary failure,
1050*4882a593Smuzhiyun 		 * Try again. We want to give up inline reclaim only
1051*4882a593Smuzhiyun 		 * if there is no range assigned to this node. Otherwise
1052*4882a593Smuzhiyun 		 * if a deadlock is possible if we sleep with fi->i_mmap_sem
1053*4882a593Smuzhiyun 		 * held and worker to free memory can't make progress due
1054*4882a593Smuzhiyun 		 * to unavailability of fi->i_mmap_sem lock. So sleep
1055*4882a593Smuzhiyun 		 * only if fi->dax->nr=0
1056*4882a593Smuzhiyun 		 */
1057*4882a593Smuzhiyun 		if (retry)
1058*4882a593Smuzhiyun 			continue;
1059*4882a593Smuzhiyun 		/*
1060*4882a593Smuzhiyun 		 * There are no mappings which can be reclaimed. Wait for one.
1061*4882a593Smuzhiyun 		 * We are not holding fi->dax->sem. So it is possible
1062*4882a593Smuzhiyun 		 * that range gets added now. But as we are not holding
1063*4882a593Smuzhiyun 		 * fi->i_mmap_sem, worker should still be able to free up
1064*4882a593Smuzhiyun 		 * a range and wake us up.
1065*4882a593Smuzhiyun 		 */
1066*4882a593Smuzhiyun 		if (!fi->dax->nr && !(fcd->nr_free_ranges > 0)) {
1067*4882a593Smuzhiyun 			if (wait_event_killable_exclusive(fcd->range_waitq,
1068*4882a593Smuzhiyun 					(fcd->nr_free_ranges > 0))) {
1069*4882a593Smuzhiyun 				return ERR_PTR(-EINTR);
1070*4882a593Smuzhiyun 			}
1071*4882a593Smuzhiyun 		}
1072*4882a593Smuzhiyun 	}
1073*4882a593Smuzhiyun }
1074*4882a593Smuzhiyun 
lookup_and_reclaim_dmap_locked(struct fuse_conn_dax * fcd,struct inode * inode,unsigned long start_idx)1075*4882a593Smuzhiyun static int lookup_and_reclaim_dmap_locked(struct fuse_conn_dax *fcd,
1076*4882a593Smuzhiyun 					  struct inode *inode,
1077*4882a593Smuzhiyun 					  unsigned long start_idx)
1078*4882a593Smuzhiyun {
1079*4882a593Smuzhiyun 	int ret;
1080*4882a593Smuzhiyun 	struct fuse_inode *fi = get_fuse_inode(inode);
1081*4882a593Smuzhiyun 	struct fuse_dax_mapping *dmap;
1082*4882a593Smuzhiyun 	struct interval_tree_node *node;
1083*4882a593Smuzhiyun 
1084*4882a593Smuzhiyun 	/* Find fuse dax mapping at file offset inode. */
1085*4882a593Smuzhiyun 	node = interval_tree_iter_first(&fi->dax->tree, start_idx, start_idx);
1086*4882a593Smuzhiyun 
1087*4882a593Smuzhiyun 	/* Range already got cleaned up by somebody else */
1088*4882a593Smuzhiyun 	if (!node)
1089*4882a593Smuzhiyun 		return 0;
1090*4882a593Smuzhiyun 	dmap = node_to_dmap(node);
1091*4882a593Smuzhiyun 
1092*4882a593Smuzhiyun 	/* still in use. */
1093*4882a593Smuzhiyun 	if (refcount_read(&dmap->refcnt) > 1)
1094*4882a593Smuzhiyun 		return 0;
1095*4882a593Smuzhiyun 
1096*4882a593Smuzhiyun 	ret = reclaim_one_dmap_locked(inode, dmap);
1097*4882a593Smuzhiyun 	if (ret < 0)
1098*4882a593Smuzhiyun 		return ret;
1099*4882a593Smuzhiyun 
1100*4882a593Smuzhiyun 	/* Cleanup dmap entry and add back to free list */
1101*4882a593Smuzhiyun 	spin_lock(&fcd->lock);
1102*4882a593Smuzhiyun 	dmap_reinit_add_to_free_pool(fcd, dmap);
1103*4882a593Smuzhiyun 	spin_unlock(&fcd->lock);
1104*4882a593Smuzhiyun 	return ret;
1105*4882a593Smuzhiyun }
1106*4882a593Smuzhiyun 
1107*4882a593Smuzhiyun /*
1108*4882a593Smuzhiyun  * Free a range of memory.
1109*4882a593Smuzhiyun  * Locking:
1110*4882a593Smuzhiyun  * 1. Take fi->i_mmap_sem to block dax faults.
1111*4882a593Smuzhiyun  * 2. Take fi->dax->sem to protect interval tree and also to make sure
1112*4882a593Smuzhiyun  *    read/write can not reuse a dmap which we might be freeing.
1113*4882a593Smuzhiyun  */
lookup_and_reclaim_dmap(struct fuse_conn_dax * fcd,struct inode * inode,unsigned long start_idx,unsigned long end_idx)1114*4882a593Smuzhiyun static int lookup_and_reclaim_dmap(struct fuse_conn_dax *fcd,
1115*4882a593Smuzhiyun 				   struct inode *inode,
1116*4882a593Smuzhiyun 				   unsigned long start_idx,
1117*4882a593Smuzhiyun 				   unsigned long end_idx)
1118*4882a593Smuzhiyun {
1119*4882a593Smuzhiyun 	int ret;
1120*4882a593Smuzhiyun 	struct fuse_inode *fi = get_fuse_inode(inode);
1121*4882a593Smuzhiyun 	loff_t dmap_start = start_idx << FUSE_DAX_SHIFT;
1122*4882a593Smuzhiyun 	loff_t dmap_end = (dmap_start + FUSE_DAX_SZ) - 1;
1123*4882a593Smuzhiyun 
1124*4882a593Smuzhiyun 	down_write(&fi->i_mmap_sem);
1125*4882a593Smuzhiyun 	ret = fuse_dax_break_layouts(inode, dmap_start, dmap_end);
1126*4882a593Smuzhiyun 	if (ret) {
1127*4882a593Smuzhiyun 		pr_debug("virtio_fs: fuse_dax_break_layouts() failed. err=%d\n",
1128*4882a593Smuzhiyun 			 ret);
1129*4882a593Smuzhiyun 		goto out_mmap_sem;
1130*4882a593Smuzhiyun 	}
1131*4882a593Smuzhiyun 
1132*4882a593Smuzhiyun 	down_write(&fi->dax->sem);
1133*4882a593Smuzhiyun 	ret = lookup_and_reclaim_dmap_locked(fcd, inode, start_idx);
1134*4882a593Smuzhiyun 	up_write(&fi->dax->sem);
1135*4882a593Smuzhiyun out_mmap_sem:
1136*4882a593Smuzhiyun 	up_write(&fi->i_mmap_sem);
1137*4882a593Smuzhiyun 	return ret;
1138*4882a593Smuzhiyun }
1139*4882a593Smuzhiyun 
try_to_free_dmap_chunks(struct fuse_conn_dax * fcd,unsigned long nr_to_free)1140*4882a593Smuzhiyun static int try_to_free_dmap_chunks(struct fuse_conn_dax *fcd,
1141*4882a593Smuzhiyun 				   unsigned long nr_to_free)
1142*4882a593Smuzhiyun {
1143*4882a593Smuzhiyun 	struct fuse_dax_mapping *dmap, *pos, *temp;
1144*4882a593Smuzhiyun 	int ret, nr_freed = 0;
1145*4882a593Smuzhiyun 	unsigned long start_idx = 0, end_idx = 0;
1146*4882a593Smuzhiyun 	struct inode *inode = NULL;
1147*4882a593Smuzhiyun 
1148*4882a593Smuzhiyun 	/* Pick first busy range and free it for now*/
1149*4882a593Smuzhiyun 	while (1) {
1150*4882a593Smuzhiyun 		if (nr_freed >= nr_to_free)
1151*4882a593Smuzhiyun 			break;
1152*4882a593Smuzhiyun 
1153*4882a593Smuzhiyun 		dmap = NULL;
1154*4882a593Smuzhiyun 		spin_lock(&fcd->lock);
1155*4882a593Smuzhiyun 
1156*4882a593Smuzhiyun 		if (!fcd->nr_busy_ranges) {
1157*4882a593Smuzhiyun 			spin_unlock(&fcd->lock);
1158*4882a593Smuzhiyun 			return 0;
1159*4882a593Smuzhiyun 		}
1160*4882a593Smuzhiyun 
1161*4882a593Smuzhiyun 		list_for_each_entry_safe(pos, temp, &fcd->busy_ranges,
1162*4882a593Smuzhiyun 						busy_list) {
1163*4882a593Smuzhiyun 			/* skip this range if it's in use. */
1164*4882a593Smuzhiyun 			if (refcount_read(&pos->refcnt) > 1)
1165*4882a593Smuzhiyun 				continue;
1166*4882a593Smuzhiyun 
1167*4882a593Smuzhiyun 			inode = igrab(pos->inode);
1168*4882a593Smuzhiyun 			/*
1169*4882a593Smuzhiyun 			 * This inode is going away. That will free
1170*4882a593Smuzhiyun 			 * up all the ranges anyway, continue to
1171*4882a593Smuzhiyun 			 * next range.
1172*4882a593Smuzhiyun 			 */
1173*4882a593Smuzhiyun 			if (!inode)
1174*4882a593Smuzhiyun 				continue;
1175*4882a593Smuzhiyun 			/*
1176*4882a593Smuzhiyun 			 * Take this element off list and add it tail. If
1177*4882a593Smuzhiyun 			 * this element can't be freed, it will help with
1178*4882a593Smuzhiyun 			 * selecting new element in next iteration of loop.
1179*4882a593Smuzhiyun 			 */
1180*4882a593Smuzhiyun 			dmap = pos;
1181*4882a593Smuzhiyun 			list_move_tail(&dmap->busy_list, &fcd->busy_ranges);
1182*4882a593Smuzhiyun 			start_idx = end_idx = dmap->itn.start;
1183*4882a593Smuzhiyun 			break;
1184*4882a593Smuzhiyun 		}
1185*4882a593Smuzhiyun 		spin_unlock(&fcd->lock);
1186*4882a593Smuzhiyun 		if (!dmap)
1187*4882a593Smuzhiyun 			return 0;
1188*4882a593Smuzhiyun 
1189*4882a593Smuzhiyun 		ret = lookup_and_reclaim_dmap(fcd, inode, start_idx, end_idx);
1190*4882a593Smuzhiyun 		iput(inode);
1191*4882a593Smuzhiyun 		if (ret)
1192*4882a593Smuzhiyun 			return ret;
1193*4882a593Smuzhiyun 		nr_freed++;
1194*4882a593Smuzhiyun 	}
1195*4882a593Smuzhiyun 	return 0;
1196*4882a593Smuzhiyun }
1197*4882a593Smuzhiyun 
fuse_dax_free_mem_worker(struct work_struct * work)1198*4882a593Smuzhiyun static void fuse_dax_free_mem_worker(struct work_struct *work)
1199*4882a593Smuzhiyun {
1200*4882a593Smuzhiyun 	int ret;
1201*4882a593Smuzhiyun 	struct fuse_conn_dax *fcd = container_of(work, struct fuse_conn_dax,
1202*4882a593Smuzhiyun 						 free_work.work);
1203*4882a593Smuzhiyun 	ret = try_to_free_dmap_chunks(fcd, FUSE_DAX_RECLAIM_CHUNK);
1204*4882a593Smuzhiyun 	if (ret) {
1205*4882a593Smuzhiyun 		pr_debug("fuse: try_to_free_dmap_chunks() failed with err=%d\n",
1206*4882a593Smuzhiyun 			 ret);
1207*4882a593Smuzhiyun 	}
1208*4882a593Smuzhiyun 
1209*4882a593Smuzhiyun 	/* If number of free ranges are still below threhold, requeue */
1210*4882a593Smuzhiyun 	kick_dmap_free_worker(fcd, 1);
1211*4882a593Smuzhiyun }
1212*4882a593Smuzhiyun 
fuse_free_dax_mem_ranges(struct list_head * mem_list)1213*4882a593Smuzhiyun static void fuse_free_dax_mem_ranges(struct list_head *mem_list)
1214*4882a593Smuzhiyun {
1215*4882a593Smuzhiyun 	struct fuse_dax_mapping *range, *temp;
1216*4882a593Smuzhiyun 
1217*4882a593Smuzhiyun 	/* Free All allocated elements */
1218*4882a593Smuzhiyun 	list_for_each_entry_safe(range, temp, mem_list, list) {
1219*4882a593Smuzhiyun 		list_del(&range->list);
1220*4882a593Smuzhiyun 		if (!list_empty(&range->busy_list))
1221*4882a593Smuzhiyun 			list_del(&range->busy_list);
1222*4882a593Smuzhiyun 		kfree(range);
1223*4882a593Smuzhiyun 	}
1224*4882a593Smuzhiyun }
1225*4882a593Smuzhiyun 
fuse_dax_conn_free(struct fuse_conn * fc)1226*4882a593Smuzhiyun void fuse_dax_conn_free(struct fuse_conn *fc)
1227*4882a593Smuzhiyun {
1228*4882a593Smuzhiyun 	if (fc->dax) {
1229*4882a593Smuzhiyun 		fuse_free_dax_mem_ranges(&fc->dax->free_ranges);
1230*4882a593Smuzhiyun 		kfree(fc->dax);
1231*4882a593Smuzhiyun 	}
1232*4882a593Smuzhiyun }
1233*4882a593Smuzhiyun 
fuse_dax_mem_range_init(struct fuse_conn_dax * fcd)1234*4882a593Smuzhiyun static int fuse_dax_mem_range_init(struct fuse_conn_dax *fcd)
1235*4882a593Smuzhiyun {
1236*4882a593Smuzhiyun 	long nr_pages, nr_ranges;
1237*4882a593Smuzhiyun 	void *kaddr;
1238*4882a593Smuzhiyun 	pfn_t pfn;
1239*4882a593Smuzhiyun 	struct fuse_dax_mapping *range;
1240*4882a593Smuzhiyun 	int ret, id;
1241*4882a593Smuzhiyun 	size_t dax_size = -1;
1242*4882a593Smuzhiyun 	unsigned long i;
1243*4882a593Smuzhiyun 
1244*4882a593Smuzhiyun 	init_waitqueue_head(&fcd->range_waitq);
1245*4882a593Smuzhiyun 	INIT_LIST_HEAD(&fcd->free_ranges);
1246*4882a593Smuzhiyun 	INIT_LIST_HEAD(&fcd->busy_ranges);
1247*4882a593Smuzhiyun 	INIT_DELAYED_WORK(&fcd->free_work, fuse_dax_free_mem_worker);
1248*4882a593Smuzhiyun 
1249*4882a593Smuzhiyun 	id = dax_read_lock();
1250*4882a593Smuzhiyun 	nr_pages = dax_direct_access(fcd->dev, 0, PHYS_PFN(dax_size), &kaddr,
1251*4882a593Smuzhiyun 				     &pfn);
1252*4882a593Smuzhiyun 	dax_read_unlock(id);
1253*4882a593Smuzhiyun 	if (nr_pages < 0) {
1254*4882a593Smuzhiyun 		pr_debug("dax_direct_access() returned %ld\n", nr_pages);
1255*4882a593Smuzhiyun 		return nr_pages;
1256*4882a593Smuzhiyun 	}
1257*4882a593Smuzhiyun 
1258*4882a593Smuzhiyun 	nr_ranges = nr_pages/FUSE_DAX_PAGES;
1259*4882a593Smuzhiyun 	pr_debug("%s: dax mapped %ld pages. nr_ranges=%ld\n",
1260*4882a593Smuzhiyun 		__func__, nr_pages, nr_ranges);
1261*4882a593Smuzhiyun 
1262*4882a593Smuzhiyun 	for (i = 0; i < nr_ranges; i++) {
1263*4882a593Smuzhiyun 		range = kzalloc(sizeof(struct fuse_dax_mapping), GFP_KERNEL);
1264*4882a593Smuzhiyun 		ret = -ENOMEM;
1265*4882a593Smuzhiyun 		if (!range)
1266*4882a593Smuzhiyun 			goto out_err;
1267*4882a593Smuzhiyun 
1268*4882a593Smuzhiyun 		/* TODO: This offset only works if virtio-fs driver is not
1269*4882a593Smuzhiyun 		 * having some memory hidden at the beginning. This needs
1270*4882a593Smuzhiyun 		 * better handling
1271*4882a593Smuzhiyun 		 */
1272*4882a593Smuzhiyun 		range->window_offset = i * FUSE_DAX_SZ;
1273*4882a593Smuzhiyun 		range->length = FUSE_DAX_SZ;
1274*4882a593Smuzhiyun 		INIT_LIST_HEAD(&range->busy_list);
1275*4882a593Smuzhiyun 		refcount_set(&range->refcnt, 1);
1276*4882a593Smuzhiyun 		list_add_tail(&range->list, &fcd->free_ranges);
1277*4882a593Smuzhiyun 	}
1278*4882a593Smuzhiyun 
1279*4882a593Smuzhiyun 	fcd->nr_free_ranges = nr_ranges;
1280*4882a593Smuzhiyun 	fcd->nr_ranges = nr_ranges;
1281*4882a593Smuzhiyun 	return 0;
1282*4882a593Smuzhiyun out_err:
1283*4882a593Smuzhiyun 	/* Free All allocated elements */
1284*4882a593Smuzhiyun 	fuse_free_dax_mem_ranges(&fcd->free_ranges);
1285*4882a593Smuzhiyun 	return ret;
1286*4882a593Smuzhiyun }
1287*4882a593Smuzhiyun 
fuse_dax_conn_alloc(struct fuse_conn * fc,struct dax_device * dax_dev)1288*4882a593Smuzhiyun int fuse_dax_conn_alloc(struct fuse_conn *fc, struct dax_device *dax_dev)
1289*4882a593Smuzhiyun {
1290*4882a593Smuzhiyun 	struct fuse_conn_dax *fcd;
1291*4882a593Smuzhiyun 	int err;
1292*4882a593Smuzhiyun 
1293*4882a593Smuzhiyun 	if (!dax_dev)
1294*4882a593Smuzhiyun 		return 0;
1295*4882a593Smuzhiyun 
1296*4882a593Smuzhiyun 	fcd = kzalloc(sizeof(*fcd), GFP_KERNEL);
1297*4882a593Smuzhiyun 	if (!fcd)
1298*4882a593Smuzhiyun 		return -ENOMEM;
1299*4882a593Smuzhiyun 
1300*4882a593Smuzhiyun 	spin_lock_init(&fcd->lock);
1301*4882a593Smuzhiyun 	fcd->dev = dax_dev;
1302*4882a593Smuzhiyun 	err = fuse_dax_mem_range_init(fcd);
1303*4882a593Smuzhiyun 	if (err) {
1304*4882a593Smuzhiyun 		kfree(fcd);
1305*4882a593Smuzhiyun 		return err;
1306*4882a593Smuzhiyun 	}
1307*4882a593Smuzhiyun 
1308*4882a593Smuzhiyun 	fc->dax = fcd;
1309*4882a593Smuzhiyun 	return 0;
1310*4882a593Smuzhiyun }
1311*4882a593Smuzhiyun 
fuse_dax_inode_alloc(struct super_block * sb,struct fuse_inode * fi)1312*4882a593Smuzhiyun bool fuse_dax_inode_alloc(struct super_block *sb, struct fuse_inode *fi)
1313*4882a593Smuzhiyun {
1314*4882a593Smuzhiyun 	struct fuse_conn *fc = get_fuse_conn_super(sb);
1315*4882a593Smuzhiyun 
1316*4882a593Smuzhiyun 	fi->dax = NULL;
1317*4882a593Smuzhiyun 	if (fc->dax) {
1318*4882a593Smuzhiyun 		fi->dax = kzalloc(sizeof(*fi->dax), GFP_KERNEL_ACCOUNT);
1319*4882a593Smuzhiyun 		if (!fi->dax)
1320*4882a593Smuzhiyun 			return false;
1321*4882a593Smuzhiyun 
1322*4882a593Smuzhiyun 		init_rwsem(&fi->dax->sem);
1323*4882a593Smuzhiyun 		fi->dax->tree = RB_ROOT_CACHED;
1324*4882a593Smuzhiyun 	}
1325*4882a593Smuzhiyun 
1326*4882a593Smuzhiyun 	return true;
1327*4882a593Smuzhiyun }
1328*4882a593Smuzhiyun 
1329*4882a593Smuzhiyun static const struct address_space_operations fuse_dax_file_aops  = {
1330*4882a593Smuzhiyun 	.writepages	= fuse_dax_writepages,
1331*4882a593Smuzhiyun 	.direct_IO	= noop_direct_IO,
1332*4882a593Smuzhiyun 	.set_page_dirty	= noop_set_page_dirty,
1333*4882a593Smuzhiyun 	.invalidatepage	= noop_invalidatepage,
1334*4882a593Smuzhiyun };
1335*4882a593Smuzhiyun 
fuse_dax_inode_init(struct inode * inode)1336*4882a593Smuzhiyun void fuse_dax_inode_init(struct inode *inode)
1337*4882a593Smuzhiyun {
1338*4882a593Smuzhiyun 	struct fuse_conn *fc = get_fuse_conn(inode);
1339*4882a593Smuzhiyun 
1340*4882a593Smuzhiyun 	if (!fc->dax)
1341*4882a593Smuzhiyun 		return;
1342*4882a593Smuzhiyun 
1343*4882a593Smuzhiyun 	inode->i_flags |= S_DAX;
1344*4882a593Smuzhiyun 	inode->i_data.a_ops = &fuse_dax_file_aops;
1345*4882a593Smuzhiyun }
1346*4882a593Smuzhiyun 
fuse_dax_check_alignment(struct fuse_conn * fc,unsigned int map_alignment)1347*4882a593Smuzhiyun bool fuse_dax_check_alignment(struct fuse_conn *fc, unsigned int map_alignment)
1348*4882a593Smuzhiyun {
1349*4882a593Smuzhiyun 	if (fc->dax && (map_alignment > FUSE_DAX_SHIFT)) {
1350*4882a593Smuzhiyun 		pr_warn("FUSE: map_alignment %u incompatible with dax mem range size %u\n",
1351*4882a593Smuzhiyun 			map_alignment, FUSE_DAX_SZ);
1352*4882a593Smuzhiyun 		return false;
1353*4882a593Smuzhiyun 	}
1354*4882a593Smuzhiyun 	return true;
1355*4882a593Smuzhiyun }
1356*4882a593Smuzhiyun 
fuse_dax_cancel_work(struct fuse_conn * fc)1357*4882a593Smuzhiyun void fuse_dax_cancel_work(struct fuse_conn *fc)
1358*4882a593Smuzhiyun {
1359*4882a593Smuzhiyun 	struct fuse_conn_dax *fcd = fc->dax;
1360*4882a593Smuzhiyun 
1361*4882a593Smuzhiyun 	if (fcd)
1362*4882a593Smuzhiyun 		cancel_delayed_work_sync(&fcd->free_work);
1363*4882a593Smuzhiyun 
1364*4882a593Smuzhiyun }
1365*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(fuse_dax_cancel_work);
1366