xref: /OK3568_Linux_fs/kernel/fs/fscache/page.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /* Cache page management and data I/O routines
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved.
5*4882a593Smuzhiyun  * Written by David Howells (dhowells@redhat.com)
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #define FSCACHE_DEBUG_LEVEL PAGE
9*4882a593Smuzhiyun #include <linux/module.h>
10*4882a593Smuzhiyun #include <linux/fscache-cache.h>
11*4882a593Smuzhiyun #include <linux/buffer_head.h>
12*4882a593Smuzhiyun #include <linux/pagevec.h>
13*4882a593Smuzhiyun #include <linux/slab.h>
14*4882a593Smuzhiyun #include "internal.h"
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun /*
17*4882a593Smuzhiyun  * check to see if a page is being written to the cache
18*4882a593Smuzhiyun  */
__fscache_check_page_write(struct fscache_cookie * cookie,struct page * page)19*4882a593Smuzhiyun bool __fscache_check_page_write(struct fscache_cookie *cookie, struct page *page)
20*4882a593Smuzhiyun {
21*4882a593Smuzhiyun 	void *val;
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun 	rcu_read_lock();
24*4882a593Smuzhiyun 	val = radix_tree_lookup(&cookie->stores, page->index);
25*4882a593Smuzhiyun 	rcu_read_unlock();
26*4882a593Smuzhiyun 	trace_fscache_check_page(cookie, page, val, 0);
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun 	return val != NULL;
29*4882a593Smuzhiyun }
30*4882a593Smuzhiyun EXPORT_SYMBOL(__fscache_check_page_write);
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun /*
33*4882a593Smuzhiyun  * wait for a page to finish being written to the cache
34*4882a593Smuzhiyun  */
__fscache_wait_on_page_write(struct fscache_cookie * cookie,struct page * page)35*4882a593Smuzhiyun void __fscache_wait_on_page_write(struct fscache_cookie *cookie, struct page *page)
36*4882a593Smuzhiyun {
37*4882a593Smuzhiyun 	wait_queue_head_t *wq = bit_waitqueue(&cookie->flags, 0);
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun 	trace_fscache_page(cookie, page, fscache_page_write_wait);
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun 	wait_event(*wq, !__fscache_check_page_write(cookie, page));
42*4882a593Smuzhiyun }
43*4882a593Smuzhiyun EXPORT_SYMBOL(__fscache_wait_on_page_write);
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun /*
46*4882a593Smuzhiyun  * wait for a page to finish being written to the cache. Put a timeout here
47*4882a593Smuzhiyun  * since we might be called recursively via parent fs.
48*4882a593Smuzhiyun  */
49*4882a593Smuzhiyun static
release_page_wait_timeout(struct fscache_cookie * cookie,struct page * page)50*4882a593Smuzhiyun bool release_page_wait_timeout(struct fscache_cookie *cookie, struct page *page)
51*4882a593Smuzhiyun {
52*4882a593Smuzhiyun 	wait_queue_head_t *wq = bit_waitqueue(&cookie->flags, 0);
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun 	return wait_event_timeout(*wq, !__fscache_check_page_write(cookie, page),
55*4882a593Smuzhiyun 				  HZ);
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun /*
59*4882a593Smuzhiyun  * decide whether a page can be released, possibly by cancelling a store to it
60*4882a593Smuzhiyun  * - we're allowed to sleep if __GFP_DIRECT_RECLAIM is flagged
61*4882a593Smuzhiyun  */
__fscache_maybe_release_page(struct fscache_cookie * cookie,struct page * page,gfp_t gfp)62*4882a593Smuzhiyun bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
63*4882a593Smuzhiyun 				  struct page *page,
64*4882a593Smuzhiyun 				  gfp_t gfp)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun 	struct page *xpage;
67*4882a593Smuzhiyun 	void *val;
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun 	_enter("%p,%p,%x", cookie, page, gfp);
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun 	trace_fscache_page(cookie, page, fscache_page_maybe_release);
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun try_again:
74*4882a593Smuzhiyun 	rcu_read_lock();
75*4882a593Smuzhiyun 	val = radix_tree_lookup(&cookie->stores, page->index);
76*4882a593Smuzhiyun 	if (!val) {
77*4882a593Smuzhiyun 		rcu_read_unlock();
78*4882a593Smuzhiyun 		fscache_stat(&fscache_n_store_vmscan_not_storing);
79*4882a593Smuzhiyun 		__fscache_uncache_page(cookie, page);
80*4882a593Smuzhiyun 		return true;
81*4882a593Smuzhiyun 	}
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	/* see if the page is actually undergoing storage - if so we can't get
84*4882a593Smuzhiyun 	 * rid of it till the cache has finished with it */
85*4882a593Smuzhiyun 	if (radix_tree_tag_get(&cookie->stores, page->index,
86*4882a593Smuzhiyun 			       FSCACHE_COOKIE_STORING_TAG)) {
87*4882a593Smuzhiyun 		rcu_read_unlock();
88*4882a593Smuzhiyun 		goto page_busy;
89*4882a593Smuzhiyun 	}
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 	/* the page is pending storage, so we attempt to cancel the store and
92*4882a593Smuzhiyun 	 * discard the store request so that the page can be reclaimed */
93*4882a593Smuzhiyun 	spin_lock(&cookie->stores_lock);
94*4882a593Smuzhiyun 	rcu_read_unlock();
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 	if (radix_tree_tag_get(&cookie->stores, page->index,
97*4882a593Smuzhiyun 			       FSCACHE_COOKIE_STORING_TAG)) {
98*4882a593Smuzhiyun 		/* the page started to undergo storage whilst we were looking,
99*4882a593Smuzhiyun 		 * so now we can only wait or return */
100*4882a593Smuzhiyun 		spin_unlock(&cookie->stores_lock);
101*4882a593Smuzhiyun 		goto page_busy;
102*4882a593Smuzhiyun 	}
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 	xpage = radix_tree_delete(&cookie->stores, page->index);
105*4882a593Smuzhiyun 	trace_fscache_page(cookie, page, fscache_page_radix_delete);
106*4882a593Smuzhiyun 	spin_unlock(&cookie->stores_lock);
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	if (xpage) {
109*4882a593Smuzhiyun 		fscache_stat(&fscache_n_store_vmscan_cancelled);
110*4882a593Smuzhiyun 		fscache_stat(&fscache_n_store_radix_deletes);
111*4882a593Smuzhiyun 		ASSERTCMP(xpage, ==, page);
112*4882a593Smuzhiyun 	} else {
113*4882a593Smuzhiyun 		fscache_stat(&fscache_n_store_vmscan_gone);
114*4882a593Smuzhiyun 	}
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 	wake_up_bit(&cookie->flags, 0);
117*4882a593Smuzhiyun 	trace_fscache_wake_cookie(cookie);
118*4882a593Smuzhiyun 	if (xpage)
119*4882a593Smuzhiyun 		put_page(xpage);
120*4882a593Smuzhiyun 	__fscache_uncache_page(cookie, page);
121*4882a593Smuzhiyun 	return true;
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun page_busy:
124*4882a593Smuzhiyun 	/* We will wait here if we're allowed to, but that could deadlock the
125*4882a593Smuzhiyun 	 * allocator as the work threads writing to the cache may all end up
126*4882a593Smuzhiyun 	 * sleeping on memory allocation, so we may need to impose a timeout
127*4882a593Smuzhiyun 	 * too. */
128*4882a593Smuzhiyun 	if (!(gfp & __GFP_DIRECT_RECLAIM) || !(gfp & __GFP_FS)) {
129*4882a593Smuzhiyun 		fscache_stat(&fscache_n_store_vmscan_busy);
130*4882a593Smuzhiyun 		return false;
131*4882a593Smuzhiyun 	}
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	fscache_stat(&fscache_n_store_vmscan_wait);
134*4882a593Smuzhiyun 	if (!release_page_wait_timeout(cookie, page))
135*4882a593Smuzhiyun 		_debug("fscache writeout timeout page: %p{%lx}",
136*4882a593Smuzhiyun 			page, page->index);
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 	gfp &= ~__GFP_DIRECT_RECLAIM;
139*4882a593Smuzhiyun 	goto try_again;
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun EXPORT_SYMBOL(__fscache_maybe_release_page);
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun /*
144*4882a593Smuzhiyun  * note that a page has finished being written to the cache
145*4882a593Smuzhiyun  */
fscache_end_page_write(struct fscache_object * object,struct page * page)146*4882a593Smuzhiyun static void fscache_end_page_write(struct fscache_object *object,
147*4882a593Smuzhiyun 				   struct page *page)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun 	struct fscache_cookie *cookie;
150*4882a593Smuzhiyun 	struct page *xpage = NULL, *val;
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	spin_lock(&object->lock);
153*4882a593Smuzhiyun 	cookie = object->cookie;
154*4882a593Smuzhiyun 	if (cookie) {
155*4882a593Smuzhiyun 		/* delete the page from the tree if it is now no longer
156*4882a593Smuzhiyun 		 * pending */
157*4882a593Smuzhiyun 		spin_lock(&cookie->stores_lock);
158*4882a593Smuzhiyun 		radix_tree_tag_clear(&cookie->stores, page->index,
159*4882a593Smuzhiyun 				     FSCACHE_COOKIE_STORING_TAG);
160*4882a593Smuzhiyun 		trace_fscache_page(cookie, page, fscache_page_radix_clear_store);
161*4882a593Smuzhiyun 		if (!radix_tree_tag_get(&cookie->stores, page->index,
162*4882a593Smuzhiyun 					FSCACHE_COOKIE_PENDING_TAG)) {
163*4882a593Smuzhiyun 			fscache_stat(&fscache_n_store_radix_deletes);
164*4882a593Smuzhiyun 			xpage = radix_tree_delete(&cookie->stores, page->index);
165*4882a593Smuzhiyun 			trace_fscache_page(cookie, page, fscache_page_radix_delete);
166*4882a593Smuzhiyun 			trace_fscache_page(cookie, page, fscache_page_write_end);
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 			val = radix_tree_lookup(&cookie->stores, page->index);
169*4882a593Smuzhiyun 			trace_fscache_check_page(cookie, page, val, 1);
170*4882a593Smuzhiyun 		} else {
171*4882a593Smuzhiyun 			trace_fscache_page(cookie, page, fscache_page_write_end_pend);
172*4882a593Smuzhiyun 		}
173*4882a593Smuzhiyun 		spin_unlock(&cookie->stores_lock);
174*4882a593Smuzhiyun 		wake_up_bit(&cookie->flags, 0);
175*4882a593Smuzhiyun 		trace_fscache_wake_cookie(cookie);
176*4882a593Smuzhiyun 	} else {
177*4882a593Smuzhiyun 		trace_fscache_page(cookie, page, fscache_page_write_end_noc);
178*4882a593Smuzhiyun 	}
179*4882a593Smuzhiyun 	spin_unlock(&object->lock);
180*4882a593Smuzhiyun 	if (xpage)
181*4882a593Smuzhiyun 		put_page(xpage);
182*4882a593Smuzhiyun }
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun /*
185*4882a593Smuzhiyun  * actually apply the changed attributes to a cache object
186*4882a593Smuzhiyun  */
fscache_attr_changed_op(struct fscache_operation * op)187*4882a593Smuzhiyun static void fscache_attr_changed_op(struct fscache_operation *op)
188*4882a593Smuzhiyun {
189*4882a593Smuzhiyun 	struct fscache_object *object = op->object;
190*4882a593Smuzhiyun 	int ret;
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	_enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	fscache_stat(&fscache_n_attr_changed_calls);
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	if (fscache_object_is_active(object)) {
197*4882a593Smuzhiyun 		fscache_stat(&fscache_n_cop_attr_changed);
198*4882a593Smuzhiyun 		ret = object->cache->ops->attr_changed(object);
199*4882a593Smuzhiyun 		fscache_stat_d(&fscache_n_cop_attr_changed);
200*4882a593Smuzhiyun 		if (ret < 0)
201*4882a593Smuzhiyun 			fscache_abort_object(object);
202*4882a593Smuzhiyun 		fscache_op_complete(op, ret < 0);
203*4882a593Smuzhiyun 	} else {
204*4882a593Smuzhiyun 		fscache_op_complete(op, true);
205*4882a593Smuzhiyun 	}
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	_leave("");
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun /*
211*4882a593Smuzhiyun  * notification that the attributes on an object have changed
212*4882a593Smuzhiyun  */
__fscache_attr_changed(struct fscache_cookie * cookie)213*4882a593Smuzhiyun int __fscache_attr_changed(struct fscache_cookie *cookie)
214*4882a593Smuzhiyun {
215*4882a593Smuzhiyun 	struct fscache_operation *op;
216*4882a593Smuzhiyun 	struct fscache_object *object;
217*4882a593Smuzhiyun 	bool wake_cookie = false;
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	_enter("%p", cookie);
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 	fscache_stat(&fscache_n_attr_changed);
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 	op = kzalloc(sizeof(*op), GFP_KERNEL);
226*4882a593Smuzhiyun 	if (!op) {
227*4882a593Smuzhiyun 		fscache_stat(&fscache_n_attr_changed_nomem);
228*4882a593Smuzhiyun 		_leave(" = -ENOMEM");
229*4882a593Smuzhiyun 		return -ENOMEM;
230*4882a593Smuzhiyun 	}
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	fscache_operation_init(cookie, op, fscache_attr_changed_op, NULL, NULL);
233*4882a593Smuzhiyun 	trace_fscache_page_op(cookie, NULL, op, fscache_page_op_attr_changed);
234*4882a593Smuzhiyun 	op->flags = FSCACHE_OP_ASYNC |
235*4882a593Smuzhiyun 		(1 << FSCACHE_OP_EXCLUSIVE) |
236*4882a593Smuzhiyun 		(1 << FSCACHE_OP_UNUSE_COOKIE);
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	spin_lock(&cookie->lock);
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	if (!fscache_cookie_enabled(cookie) ||
241*4882a593Smuzhiyun 	    hlist_empty(&cookie->backing_objects))
242*4882a593Smuzhiyun 		goto nobufs;
243*4882a593Smuzhiyun 	object = hlist_entry(cookie->backing_objects.first,
244*4882a593Smuzhiyun 			     struct fscache_object, cookie_link);
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 	__fscache_use_cookie(cookie);
247*4882a593Smuzhiyun 	if (fscache_submit_exclusive_op(object, op) < 0)
248*4882a593Smuzhiyun 		goto nobufs_dec;
249*4882a593Smuzhiyun 	spin_unlock(&cookie->lock);
250*4882a593Smuzhiyun 	fscache_stat(&fscache_n_attr_changed_ok);
251*4882a593Smuzhiyun 	fscache_put_operation(op);
252*4882a593Smuzhiyun 	_leave(" = 0");
253*4882a593Smuzhiyun 	return 0;
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun nobufs_dec:
256*4882a593Smuzhiyun 	wake_cookie = __fscache_unuse_cookie(cookie);
257*4882a593Smuzhiyun nobufs:
258*4882a593Smuzhiyun 	spin_unlock(&cookie->lock);
259*4882a593Smuzhiyun 	fscache_put_operation(op);
260*4882a593Smuzhiyun 	if (wake_cookie)
261*4882a593Smuzhiyun 		__fscache_wake_unused_cookie(cookie);
262*4882a593Smuzhiyun 	fscache_stat(&fscache_n_attr_changed_nobufs);
263*4882a593Smuzhiyun 	_leave(" = %d", -ENOBUFS);
264*4882a593Smuzhiyun 	return -ENOBUFS;
265*4882a593Smuzhiyun }
266*4882a593Smuzhiyun EXPORT_SYMBOL(__fscache_attr_changed);
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun /*
269*4882a593Smuzhiyun  * Handle cancellation of a pending retrieval op
270*4882a593Smuzhiyun  */
fscache_do_cancel_retrieval(struct fscache_operation * _op)271*4882a593Smuzhiyun static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
272*4882a593Smuzhiyun {
273*4882a593Smuzhiyun 	struct fscache_retrieval *op =
274*4882a593Smuzhiyun 		container_of(_op, struct fscache_retrieval, op);
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 	atomic_set(&op->n_pages, 0);
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun /*
280*4882a593Smuzhiyun  * release a retrieval op reference
281*4882a593Smuzhiyun  */
fscache_release_retrieval_op(struct fscache_operation * _op)282*4882a593Smuzhiyun static void fscache_release_retrieval_op(struct fscache_operation *_op)
283*4882a593Smuzhiyun {
284*4882a593Smuzhiyun 	struct fscache_retrieval *op =
285*4882a593Smuzhiyun 		container_of(_op, struct fscache_retrieval, op);
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 	_enter("{OP%x}", op->op.debug_id);
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 	ASSERTIFCMP(op->op.state != FSCACHE_OP_ST_INITIALISED,
290*4882a593Smuzhiyun 		    atomic_read(&op->n_pages), ==, 0);
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 	fscache_hist(fscache_retrieval_histogram, op->start_time);
293*4882a593Smuzhiyun 	if (op->context)
294*4882a593Smuzhiyun 		fscache_put_context(op->cookie, op->context);
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	_leave("");
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun /*
300*4882a593Smuzhiyun  * allocate a retrieval op
301*4882a593Smuzhiyun  */
fscache_alloc_retrieval(struct fscache_cookie * cookie,struct address_space * mapping,fscache_rw_complete_t end_io_func,void * context)302*4882a593Smuzhiyun static struct fscache_retrieval *fscache_alloc_retrieval(
303*4882a593Smuzhiyun 	struct fscache_cookie *cookie,
304*4882a593Smuzhiyun 	struct address_space *mapping,
305*4882a593Smuzhiyun 	fscache_rw_complete_t end_io_func,
306*4882a593Smuzhiyun 	void *context)
307*4882a593Smuzhiyun {
308*4882a593Smuzhiyun 	struct fscache_retrieval *op;
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	/* allocate a retrieval operation and attempt to submit it */
311*4882a593Smuzhiyun 	op = kzalloc(sizeof(*op), GFP_NOIO);
312*4882a593Smuzhiyun 	if (!op) {
313*4882a593Smuzhiyun 		fscache_stat(&fscache_n_retrievals_nomem);
314*4882a593Smuzhiyun 		return NULL;
315*4882a593Smuzhiyun 	}
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	fscache_operation_init(cookie, &op->op, NULL,
318*4882a593Smuzhiyun 			       fscache_do_cancel_retrieval,
319*4882a593Smuzhiyun 			       fscache_release_retrieval_op);
320*4882a593Smuzhiyun 	op->op.flags	= FSCACHE_OP_MYTHREAD |
321*4882a593Smuzhiyun 		(1UL << FSCACHE_OP_WAITING) |
322*4882a593Smuzhiyun 		(1UL << FSCACHE_OP_UNUSE_COOKIE);
323*4882a593Smuzhiyun 	op->cookie	= cookie;
324*4882a593Smuzhiyun 	op->mapping	= mapping;
325*4882a593Smuzhiyun 	op->end_io_func	= end_io_func;
326*4882a593Smuzhiyun 	op->context	= context;
327*4882a593Smuzhiyun 	op->start_time	= jiffies;
328*4882a593Smuzhiyun 	INIT_LIST_HEAD(&op->to_do);
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 	/* Pin the netfs read context in case we need to do the actual netfs
331*4882a593Smuzhiyun 	 * read because we've encountered a cache read failure.
332*4882a593Smuzhiyun 	 */
333*4882a593Smuzhiyun 	if (context)
334*4882a593Smuzhiyun 		fscache_get_context(op->cookie, context);
335*4882a593Smuzhiyun 	return op;
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun /*
339*4882a593Smuzhiyun  * wait for a deferred lookup to complete
340*4882a593Smuzhiyun  */
fscache_wait_for_deferred_lookup(struct fscache_cookie * cookie)341*4882a593Smuzhiyun int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
342*4882a593Smuzhiyun {
343*4882a593Smuzhiyun 	unsigned long jif;
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 	_enter("");
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 	if (!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags)) {
348*4882a593Smuzhiyun 		_leave(" = 0 [imm]");
349*4882a593Smuzhiyun 		return 0;
350*4882a593Smuzhiyun 	}
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun 	fscache_stat(&fscache_n_retrievals_wait);
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun 	jif = jiffies;
355*4882a593Smuzhiyun 	if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
356*4882a593Smuzhiyun 			TASK_INTERRUPTIBLE) != 0) {
357*4882a593Smuzhiyun 		fscache_stat(&fscache_n_retrievals_intr);
358*4882a593Smuzhiyun 		_leave(" = -ERESTARTSYS");
359*4882a593Smuzhiyun 		return -ERESTARTSYS;
360*4882a593Smuzhiyun 	}
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	ASSERT(!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags));
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun 	smp_rmb();
365*4882a593Smuzhiyun 	fscache_hist(fscache_retrieval_delay_histogram, jif);
366*4882a593Smuzhiyun 	_leave(" = 0 [dly]");
367*4882a593Smuzhiyun 	return 0;
368*4882a593Smuzhiyun }
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun /*
371*4882a593Smuzhiyun  * wait for an object to become active (or dead)
372*4882a593Smuzhiyun  */
fscache_wait_for_operation_activation(struct fscache_object * object,struct fscache_operation * op,atomic_t * stat_op_waits,atomic_t * stat_object_dead)373*4882a593Smuzhiyun int fscache_wait_for_operation_activation(struct fscache_object *object,
374*4882a593Smuzhiyun 					  struct fscache_operation *op,
375*4882a593Smuzhiyun 					  atomic_t *stat_op_waits,
376*4882a593Smuzhiyun 					  atomic_t *stat_object_dead)
377*4882a593Smuzhiyun {
378*4882a593Smuzhiyun 	int ret;
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 	if (!test_bit(FSCACHE_OP_WAITING, &op->flags))
381*4882a593Smuzhiyun 		goto check_if_dead;
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 	_debug(">>> WT");
384*4882a593Smuzhiyun 	if (stat_op_waits)
385*4882a593Smuzhiyun 		fscache_stat(stat_op_waits);
386*4882a593Smuzhiyun 	if (wait_on_bit(&op->flags, FSCACHE_OP_WAITING,
387*4882a593Smuzhiyun 			TASK_INTERRUPTIBLE) != 0) {
388*4882a593Smuzhiyun 		trace_fscache_op(object->cookie, op, fscache_op_signal);
389*4882a593Smuzhiyun 		ret = fscache_cancel_op(op, false);
390*4882a593Smuzhiyun 		if (ret == 0)
391*4882a593Smuzhiyun 			return -ERESTARTSYS;
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun 		/* it's been removed from the pending queue by another party,
394*4882a593Smuzhiyun 		 * so we should get to run shortly */
395*4882a593Smuzhiyun 		wait_on_bit(&op->flags, FSCACHE_OP_WAITING,
396*4882a593Smuzhiyun 			    TASK_UNINTERRUPTIBLE);
397*4882a593Smuzhiyun 	}
398*4882a593Smuzhiyun 	_debug("<<< GO");
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun check_if_dead:
401*4882a593Smuzhiyun 	if (op->state == FSCACHE_OP_ST_CANCELLED) {
402*4882a593Smuzhiyun 		if (stat_object_dead)
403*4882a593Smuzhiyun 			fscache_stat(stat_object_dead);
404*4882a593Smuzhiyun 		_leave(" = -ENOBUFS [cancelled]");
405*4882a593Smuzhiyun 		return -ENOBUFS;
406*4882a593Smuzhiyun 	}
407*4882a593Smuzhiyun 	if (unlikely(fscache_object_is_dying(object) ||
408*4882a593Smuzhiyun 		     fscache_cache_is_broken(object))) {
409*4882a593Smuzhiyun 		enum fscache_operation_state state = op->state;
410*4882a593Smuzhiyun 		trace_fscache_op(object->cookie, op, fscache_op_signal);
411*4882a593Smuzhiyun 		fscache_cancel_op(op, true);
412*4882a593Smuzhiyun 		if (stat_object_dead)
413*4882a593Smuzhiyun 			fscache_stat(stat_object_dead);
414*4882a593Smuzhiyun 		_leave(" = -ENOBUFS [obj dead %d]", state);
415*4882a593Smuzhiyun 		return -ENOBUFS;
416*4882a593Smuzhiyun 	}
417*4882a593Smuzhiyun 	return 0;
418*4882a593Smuzhiyun }
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun /*
421*4882a593Smuzhiyun  * read a page from the cache or allocate a block in which to store it
422*4882a593Smuzhiyun  * - we return:
423*4882a593Smuzhiyun  *   -ENOMEM	- out of memory, nothing done
424*4882a593Smuzhiyun  *   -ERESTARTSYS - interrupted
425*4882a593Smuzhiyun  *   -ENOBUFS	- no backing object available in which to cache the block
426*4882a593Smuzhiyun  *   -ENODATA	- no data available in the backing object for this block
427*4882a593Smuzhiyun  *   0		- dispatched a read - it'll call end_io_func() when finished
428*4882a593Smuzhiyun  */
__fscache_read_or_alloc_page(struct fscache_cookie * cookie,struct page * page,fscache_rw_complete_t end_io_func,void * context,gfp_t gfp)429*4882a593Smuzhiyun int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
430*4882a593Smuzhiyun 				 struct page *page,
431*4882a593Smuzhiyun 				 fscache_rw_complete_t end_io_func,
432*4882a593Smuzhiyun 				 void *context,
433*4882a593Smuzhiyun 				 gfp_t gfp)
434*4882a593Smuzhiyun {
435*4882a593Smuzhiyun 	struct fscache_retrieval *op;
436*4882a593Smuzhiyun 	struct fscache_object *object;
437*4882a593Smuzhiyun 	bool wake_cookie = false;
438*4882a593Smuzhiyun 	int ret;
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun 	_enter("%p,%p,,,", cookie, page);
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun 	fscache_stat(&fscache_n_retrievals);
443*4882a593Smuzhiyun 
444*4882a593Smuzhiyun 	if (hlist_empty(&cookie->backing_objects))
445*4882a593Smuzhiyun 		goto nobufs;
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun 	if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
448*4882a593Smuzhiyun 		_leave(" = -ENOBUFS [invalidating]");
449*4882a593Smuzhiyun 		return -ENOBUFS;
450*4882a593Smuzhiyun 	}
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun 	ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
453*4882a593Smuzhiyun 	ASSERTCMP(page, !=, NULL);
454*4882a593Smuzhiyun 
455*4882a593Smuzhiyun 	if (fscache_wait_for_deferred_lookup(cookie) < 0)
456*4882a593Smuzhiyun 		return -ERESTARTSYS;
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun 	op = fscache_alloc_retrieval(cookie, page->mapping,
459*4882a593Smuzhiyun 				     end_io_func, context);
460*4882a593Smuzhiyun 	if (!op) {
461*4882a593Smuzhiyun 		_leave(" = -ENOMEM");
462*4882a593Smuzhiyun 		return -ENOMEM;
463*4882a593Smuzhiyun 	}
464*4882a593Smuzhiyun 	atomic_set(&op->n_pages, 1);
465*4882a593Smuzhiyun 	trace_fscache_page_op(cookie, page, &op->op, fscache_page_op_retr_one);
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun 	spin_lock(&cookie->lock);
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun 	if (!fscache_cookie_enabled(cookie) ||
470*4882a593Smuzhiyun 	    hlist_empty(&cookie->backing_objects))
471*4882a593Smuzhiyun 		goto nobufs_unlock;
472*4882a593Smuzhiyun 	object = hlist_entry(cookie->backing_objects.first,
473*4882a593Smuzhiyun 			     struct fscache_object, cookie_link);
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun 	ASSERT(test_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags));
476*4882a593Smuzhiyun 
477*4882a593Smuzhiyun 	__fscache_use_cookie(cookie);
478*4882a593Smuzhiyun 	atomic_inc(&object->n_reads);
479*4882a593Smuzhiyun 	__set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun 	if (fscache_submit_op(object, &op->op) < 0)
482*4882a593Smuzhiyun 		goto nobufs_unlock_dec;
483*4882a593Smuzhiyun 	spin_unlock(&cookie->lock);
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun 	fscache_stat(&fscache_n_retrieval_ops);
486*4882a593Smuzhiyun 
487*4882a593Smuzhiyun 	/* we wait for the operation to become active, and then process it
488*4882a593Smuzhiyun 	 * *here*, in this thread, and not in the thread pool */
489*4882a593Smuzhiyun 	ret = fscache_wait_for_operation_activation(
490*4882a593Smuzhiyun 		object, &op->op,
491*4882a593Smuzhiyun 		__fscache_stat(&fscache_n_retrieval_op_waits),
492*4882a593Smuzhiyun 		__fscache_stat(&fscache_n_retrievals_object_dead));
493*4882a593Smuzhiyun 	if (ret < 0)
494*4882a593Smuzhiyun 		goto error;
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun 	/* ask the cache to honour the operation */
497*4882a593Smuzhiyun 	if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
498*4882a593Smuzhiyun 		fscache_stat(&fscache_n_cop_allocate_page);
499*4882a593Smuzhiyun 		ret = object->cache->ops->allocate_page(op, page, gfp);
500*4882a593Smuzhiyun 		fscache_stat_d(&fscache_n_cop_allocate_page);
501*4882a593Smuzhiyun 		if (ret == 0)
502*4882a593Smuzhiyun 			ret = -ENODATA;
503*4882a593Smuzhiyun 	} else {
504*4882a593Smuzhiyun 		fscache_stat(&fscache_n_cop_read_or_alloc_page);
505*4882a593Smuzhiyun 		ret = object->cache->ops->read_or_alloc_page(op, page, gfp);
506*4882a593Smuzhiyun 		fscache_stat_d(&fscache_n_cop_read_or_alloc_page);
507*4882a593Smuzhiyun 	}
508*4882a593Smuzhiyun 
509*4882a593Smuzhiyun error:
510*4882a593Smuzhiyun 	if (ret == -ENOMEM)
511*4882a593Smuzhiyun 		fscache_stat(&fscache_n_retrievals_nomem);
512*4882a593Smuzhiyun 	else if (ret == -ERESTARTSYS)
513*4882a593Smuzhiyun 		fscache_stat(&fscache_n_retrievals_intr);
514*4882a593Smuzhiyun 	else if (ret == -ENODATA)
515*4882a593Smuzhiyun 		fscache_stat(&fscache_n_retrievals_nodata);
516*4882a593Smuzhiyun 	else if (ret < 0)
517*4882a593Smuzhiyun 		fscache_stat(&fscache_n_retrievals_nobufs);
518*4882a593Smuzhiyun 	else
519*4882a593Smuzhiyun 		fscache_stat(&fscache_n_retrievals_ok);
520*4882a593Smuzhiyun 
521*4882a593Smuzhiyun 	fscache_put_retrieval(op);
522*4882a593Smuzhiyun 	_leave(" = %d", ret);
523*4882a593Smuzhiyun 	return ret;
524*4882a593Smuzhiyun 
525*4882a593Smuzhiyun nobufs_unlock_dec:
526*4882a593Smuzhiyun 	atomic_dec(&object->n_reads);
527*4882a593Smuzhiyun 	wake_cookie = __fscache_unuse_cookie(cookie);
528*4882a593Smuzhiyun nobufs_unlock:
529*4882a593Smuzhiyun 	spin_unlock(&cookie->lock);
530*4882a593Smuzhiyun 	if (wake_cookie)
531*4882a593Smuzhiyun 		__fscache_wake_unused_cookie(cookie);
532*4882a593Smuzhiyun 	fscache_put_retrieval(op);
533*4882a593Smuzhiyun nobufs:
534*4882a593Smuzhiyun 	fscache_stat(&fscache_n_retrievals_nobufs);
535*4882a593Smuzhiyun 	_leave(" = -ENOBUFS");
536*4882a593Smuzhiyun 	return -ENOBUFS;
537*4882a593Smuzhiyun }
538*4882a593Smuzhiyun EXPORT_SYMBOL(__fscache_read_or_alloc_page);
539*4882a593Smuzhiyun 
540*4882a593Smuzhiyun /*
541*4882a593Smuzhiyun  * read a list of page from the cache or allocate a block in which to store
542*4882a593Smuzhiyun  * them
543*4882a593Smuzhiyun  * - we return:
544*4882a593Smuzhiyun  *   -ENOMEM	- out of memory, some pages may be being read
545*4882a593Smuzhiyun  *   -ERESTARTSYS - interrupted, some pages may be being read
546*4882a593Smuzhiyun  *   -ENOBUFS	- no backing object or space available in which to cache any
547*4882a593Smuzhiyun  *                pages not being read
548*4882a593Smuzhiyun  *   -ENODATA	- no data available in the backing object for some or all of
549*4882a593Smuzhiyun  *                the pages
550*4882a593Smuzhiyun  *   0		- dispatched a read on all pages
551*4882a593Smuzhiyun  *
552*4882a593Smuzhiyun  * end_io_func() will be called for each page read from the cache as it is
553*4882a593Smuzhiyun  * finishes being read
554*4882a593Smuzhiyun  *
555*4882a593Smuzhiyun  * any pages for which a read is dispatched will be removed from pages and
556*4882a593Smuzhiyun  * nr_pages
557*4882a593Smuzhiyun  */
__fscache_read_or_alloc_pages(struct fscache_cookie * cookie,struct address_space * mapping,struct list_head * pages,unsigned * nr_pages,fscache_rw_complete_t end_io_func,void * context,gfp_t gfp)558*4882a593Smuzhiyun int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
559*4882a593Smuzhiyun 				  struct address_space *mapping,
560*4882a593Smuzhiyun 				  struct list_head *pages,
561*4882a593Smuzhiyun 				  unsigned *nr_pages,
562*4882a593Smuzhiyun 				  fscache_rw_complete_t end_io_func,
563*4882a593Smuzhiyun 				  void *context,
564*4882a593Smuzhiyun 				  gfp_t gfp)
565*4882a593Smuzhiyun {
566*4882a593Smuzhiyun 	struct fscache_retrieval *op;
567*4882a593Smuzhiyun 	struct fscache_object *object;
568*4882a593Smuzhiyun 	bool wake_cookie = false;
569*4882a593Smuzhiyun 	int ret;
570*4882a593Smuzhiyun 
571*4882a593Smuzhiyun 	_enter("%p,,%d,,,", cookie, *nr_pages);
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun 	fscache_stat(&fscache_n_retrievals);
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun 	if (hlist_empty(&cookie->backing_objects))
576*4882a593Smuzhiyun 		goto nobufs;
577*4882a593Smuzhiyun 
578*4882a593Smuzhiyun 	if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
579*4882a593Smuzhiyun 		_leave(" = -ENOBUFS [invalidating]");
580*4882a593Smuzhiyun 		return -ENOBUFS;
581*4882a593Smuzhiyun 	}
582*4882a593Smuzhiyun 
583*4882a593Smuzhiyun 	ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
584*4882a593Smuzhiyun 	ASSERTCMP(*nr_pages, >, 0);
585*4882a593Smuzhiyun 	ASSERT(!list_empty(pages));
586*4882a593Smuzhiyun 
587*4882a593Smuzhiyun 	if (fscache_wait_for_deferred_lookup(cookie) < 0)
588*4882a593Smuzhiyun 		return -ERESTARTSYS;
589*4882a593Smuzhiyun 
590*4882a593Smuzhiyun 	op = fscache_alloc_retrieval(cookie, mapping, end_io_func, context);
591*4882a593Smuzhiyun 	if (!op)
592*4882a593Smuzhiyun 		return -ENOMEM;
593*4882a593Smuzhiyun 	atomic_set(&op->n_pages, *nr_pages);
594*4882a593Smuzhiyun 	trace_fscache_page_op(cookie, NULL, &op->op, fscache_page_op_retr_multi);
595*4882a593Smuzhiyun 
596*4882a593Smuzhiyun 	spin_lock(&cookie->lock);
597*4882a593Smuzhiyun 
598*4882a593Smuzhiyun 	if (!fscache_cookie_enabled(cookie) ||
599*4882a593Smuzhiyun 	    hlist_empty(&cookie->backing_objects))
600*4882a593Smuzhiyun 		goto nobufs_unlock;
601*4882a593Smuzhiyun 	object = hlist_entry(cookie->backing_objects.first,
602*4882a593Smuzhiyun 			     struct fscache_object, cookie_link);
603*4882a593Smuzhiyun 
604*4882a593Smuzhiyun 	__fscache_use_cookie(cookie);
605*4882a593Smuzhiyun 	atomic_inc(&object->n_reads);
606*4882a593Smuzhiyun 	__set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
607*4882a593Smuzhiyun 
608*4882a593Smuzhiyun 	if (fscache_submit_op(object, &op->op) < 0)
609*4882a593Smuzhiyun 		goto nobufs_unlock_dec;
610*4882a593Smuzhiyun 	spin_unlock(&cookie->lock);
611*4882a593Smuzhiyun 
612*4882a593Smuzhiyun 	fscache_stat(&fscache_n_retrieval_ops);
613*4882a593Smuzhiyun 
614*4882a593Smuzhiyun 	/* we wait for the operation to become active, and then process it
615*4882a593Smuzhiyun 	 * *here*, in this thread, and not in the thread pool */
616*4882a593Smuzhiyun 	ret = fscache_wait_for_operation_activation(
617*4882a593Smuzhiyun 		object, &op->op,
618*4882a593Smuzhiyun 		__fscache_stat(&fscache_n_retrieval_op_waits),
619*4882a593Smuzhiyun 		__fscache_stat(&fscache_n_retrievals_object_dead));
620*4882a593Smuzhiyun 	if (ret < 0)
621*4882a593Smuzhiyun 		goto error;
622*4882a593Smuzhiyun 
623*4882a593Smuzhiyun 	/* ask the cache to honour the operation */
624*4882a593Smuzhiyun 	if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
625*4882a593Smuzhiyun 		fscache_stat(&fscache_n_cop_allocate_pages);
626*4882a593Smuzhiyun 		ret = object->cache->ops->allocate_pages(
627*4882a593Smuzhiyun 			op, pages, nr_pages, gfp);
628*4882a593Smuzhiyun 		fscache_stat_d(&fscache_n_cop_allocate_pages);
629*4882a593Smuzhiyun 	} else {
630*4882a593Smuzhiyun 		fscache_stat(&fscache_n_cop_read_or_alloc_pages);
631*4882a593Smuzhiyun 		ret = object->cache->ops->read_or_alloc_pages(
632*4882a593Smuzhiyun 			op, pages, nr_pages, gfp);
633*4882a593Smuzhiyun 		fscache_stat_d(&fscache_n_cop_read_or_alloc_pages);
634*4882a593Smuzhiyun 	}
635*4882a593Smuzhiyun 
636*4882a593Smuzhiyun error:
637*4882a593Smuzhiyun 	if (ret == -ENOMEM)
638*4882a593Smuzhiyun 		fscache_stat(&fscache_n_retrievals_nomem);
639*4882a593Smuzhiyun 	else if (ret == -ERESTARTSYS)
640*4882a593Smuzhiyun 		fscache_stat(&fscache_n_retrievals_intr);
641*4882a593Smuzhiyun 	else if (ret == -ENODATA)
642*4882a593Smuzhiyun 		fscache_stat(&fscache_n_retrievals_nodata);
643*4882a593Smuzhiyun 	else if (ret < 0)
644*4882a593Smuzhiyun 		fscache_stat(&fscache_n_retrievals_nobufs);
645*4882a593Smuzhiyun 	else
646*4882a593Smuzhiyun 		fscache_stat(&fscache_n_retrievals_ok);
647*4882a593Smuzhiyun 
648*4882a593Smuzhiyun 	fscache_put_retrieval(op);
649*4882a593Smuzhiyun 	_leave(" = %d", ret);
650*4882a593Smuzhiyun 	return ret;
651*4882a593Smuzhiyun 
652*4882a593Smuzhiyun nobufs_unlock_dec:
653*4882a593Smuzhiyun 	atomic_dec(&object->n_reads);
654*4882a593Smuzhiyun 	wake_cookie = __fscache_unuse_cookie(cookie);
655*4882a593Smuzhiyun nobufs_unlock:
656*4882a593Smuzhiyun 	spin_unlock(&cookie->lock);
657*4882a593Smuzhiyun 	fscache_put_retrieval(op);
658*4882a593Smuzhiyun 	if (wake_cookie)
659*4882a593Smuzhiyun 		__fscache_wake_unused_cookie(cookie);
660*4882a593Smuzhiyun nobufs:
661*4882a593Smuzhiyun 	fscache_stat(&fscache_n_retrievals_nobufs);
662*4882a593Smuzhiyun 	_leave(" = -ENOBUFS");
663*4882a593Smuzhiyun 	return -ENOBUFS;
664*4882a593Smuzhiyun }
665*4882a593Smuzhiyun EXPORT_SYMBOL(__fscache_read_or_alloc_pages);
666*4882a593Smuzhiyun 
667*4882a593Smuzhiyun /*
668*4882a593Smuzhiyun  * allocate a block in the cache on which to store a page
669*4882a593Smuzhiyun  * - we return:
670*4882a593Smuzhiyun  *   -ENOMEM	- out of memory, nothing done
671*4882a593Smuzhiyun  *   -ERESTARTSYS - interrupted
672*4882a593Smuzhiyun  *   -ENOBUFS	- no backing object available in which to cache the block
673*4882a593Smuzhiyun  *   0		- block allocated
674*4882a593Smuzhiyun  */
__fscache_alloc_page(struct fscache_cookie * cookie,struct page * page,gfp_t gfp)675*4882a593Smuzhiyun int __fscache_alloc_page(struct fscache_cookie *cookie,
676*4882a593Smuzhiyun 			 struct page *page,
677*4882a593Smuzhiyun 			 gfp_t gfp)
678*4882a593Smuzhiyun {
679*4882a593Smuzhiyun 	struct fscache_retrieval *op;
680*4882a593Smuzhiyun 	struct fscache_object *object;
681*4882a593Smuzhiyun 	bool wake_cookie = false;
682*4882a593Smuzhiyun 	int ret;
683*4882a593Smuzhiyun 
684*4882a593Smuzhiyun 	_enter("%p,%p,,,", cookie, page);
685*4882a593Smuzhiyun 
686*4882a593Smuzhiyun 	fscache_stat(&fscache_n_allocs);
687*4882a593Smuzhiyun 
688*4882a593Smuzhiyun 	if (hlist_empty(&cookie->backing_objects))
689*4882a593Smuzhiyun 		goto nobufs;
690*4882a593Smuzhiyun 
691*4882a593Smuzhiyun 	ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
692*4882a593Smuzhiyun 	ASSERTCMP(page, !=, NULL);
693*4882a593Smuzhiyun 
694*4882a593Smuzhiyun 	if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
695*4882a593Smuzhiyun 		_leave(" = -ENOBUFS [invalidating]");
696*4882a593Smuzhiyun 		return -ENOBUFS;
697*4882a593Smuzhiyun 	}
698*4882a593Smuzhiyun 
699*4882a593Smuzhiyun 	if (fscache_wait_for_deferred_lookup(cookie) < 0)
700*4882a593Smuzhiyun 		return -ERESTARTSYS;
701*4882a593Smuzhiyun 
702*4882a593Smuzhiyun 	op = fscache_alloc_retrieval(cookie, page->mapping, NULL, NULL);
703*4882a593Smuzhiyun 	if (!op)
704*4882a593Smuzhiyun 		return -ENOMEM;
705*4882a593Smuzhiyun 	atomic_set(&op->n_pages, 1);
706*4882a593Smuzhiyun 	trace_fscache_page_op(cookie, page, &op->op, fscache_page_op_alloc_one);
707*4882a593Smuzhiyun 
708*4882a593Smuzhiyun 	spin_lock(&cookie->lock);
709*4882a593Smuzhiyun 
710*4882a593Smuzhiyun 	if (!fscache_cookie_enabled(cookie) ||
711*4882a593Smuzhiyun 	    hlist_empty(&cookie->backing_objects))
712*4882a593Smuzhiyun 		goto nobufs_unlock;
713*4882a593Smuzhiyun 	object = hlist_entry(cookie->backing_objects.first,
714*4882a593Smuzhiyun 			     struct fscache_object, cookie_link);
715*4882a593Smuzhiyun 
716*4882a593Smuzhiyun 	__fscache_use_cookie(cookie);
717*4882a593Smuzhiyun 	if (fscache_submit_op(object, &op->op) < 0)
718*4882a593Smuzhiyun 		goto nobufs_unlock_dec;
719*4882a593Smuzhiyun 	spin_unlock(&cookie->lock);
720*4882a593Smuzhiyun 
721*4882a593Smuzhiyun 	fscache_stat(&fscache_n_alloc_ops);
722*4882a593Smuzhiyun 
723*4882a593Smuzhiyun 	ret = fscache_wait_for_operation_activation(
724*4882a593Smuzhiyun 		object, &op->op,
725*4882a593Smuzhiyun 		__fscache_stat(&fscache_n_alloc_op_waits),
726*4882a593Smuzhiyun 		__fscache_stat(&fscache_n_allocs_object_dead));
727*4882a593Smuzhiyun 	if (ret < 0)
728*4882a593Smuzhiyun 		goto error;
729*4882a593Smuzhiyun 
730*4882a593Smuzhiyun 	/* ask the cache to honour the operation */
731*4882a593Smuzhiyun 	fscache_stat(&fscache_n_cop_allocate_page);
732*4882a593Smuzhiyun 	ret = object->cache->ops->allocate_page(op, page, gfp);
733*4882a593Smuzhiyun 	fscache_stat_d(&fscache_n_cop_allocate_page);
734*4882a593Smuzhiyun 
735*4882a593Smuzhiyun error:
736*4882a593Smuzhiyun 	if (ret == -ERESTARTSYS)
737*4882a593Smuzhiyun 		fscache_stat(&fscache_n_allocs_intr);
738*4882a593Smuzhiyun 	else if (ret < 0)
739*4882a593Smuzhiyun 		fscache_stat(&fscache_n_allocs_nobufs);
740*4882a593Smuzhiyun 	else
741*4882a593Smuzhiyun 		fscache_stat(&fscache_n_allocs_ok);
742*4882a593Smuzhiyun 
743*4882a593Smuzhiyun 	fscache_put_retrieval(op);
744*4882a593Smuzhiyun 	_leave(" = %d", ret);
745*4882a593Smuzhiyun 	return ret;
746*4882a593Smuzhiyun 
747*4882a593Smuzhiyun nobufs_unlock_dec:
748*4882a593Smuzhiyun 	wake_cookie = __fscache_unuse_cookie(cookie);
749*4882a593Smuzhiyun nobufs_unlock:
750*4882a593Smuzhiyun 	spin_unlock(&cookie->lock);
751*4882a593Smuzhiyun 	fscache_put_retrieval(op);
752*4882a593Smuzhiyun 	if (wake_cookie)
753*4882a593Smuzhiyun 		__fscache_wake_unused_cookie(cookie);
754*4882a593Smuzhiyun nobufs:
755*4882a593Smuzhiyun 	fscache_stat(&fscache_n_allocs_nobufs);
756*4882a593Smuzhiyun 	_leave(" = -ENOBUFS");
757*4882a593Smuzhiyun 	return -ENOBUFS;
758*4882a593Smuzhiyun }
759*4882a593Smuzhiyun EXPORT_SYMBOL(__fscache_alloc_page);
760*4882a593Smuzhiyun 
761*4882a593Smuzhiyun /*
762*4882a593Smuzhiyun  * Unmark pages allocate in the readahead code path (via:
763*4882a593Smuzhiyun  * fscache_readpages_or_alloc) after delegating to the base filesystem
764*4882a593Smuzhiyun  */
__fscache_readpages_cancel(struct fscache_cookie * cookie,struct list_head * pages)765*4882a593Smuzhiyun void __fscache_readpages_cancel(struct fscache_cookie *cookie,
766*4882a593Smuzhiyun 				struct list_head *pages)
767*4882a593Smuzhiyun {
768*4882a593Smuzhiyun 	struct page *page;
769*4882a593Smuzhiyun 
770*4882a593Smuzhiyun 	list_for_each_entry(page, pages, lru) {
771*4882a593Smuzhiyun 		if (PageFsCache(page))
772*4882a593Smuzhiyun 			__fscache_uncache_page(cookie, page);
773*4882a593Smuzhiyun 	}
774*4882a593Smuzhiyun }
775*4882a593Smuzhiyun EXPORT_SYMBOL(__fscache_readpages_cancel);
776*4882a593Smuzhiyun 
777*4882a593Smuzhiyun /*
778*4882a593Smuzhiyun  * release a write op reference
779*4882a593Smuzhiyun  */
fscache_release_write_op(struct fscache_operation * _op)780*4882a593Smuzhiyun static void fscache_release_write_op(struct fscache_operation *_op)
781*4882a593Smuzhiyun {
782*4882a593Smuzhiyun 	_enter("{OP%x}", _op->debug_id);
783*4882a593Smuzhiyun }
784*4882a593Smuzhiyun 
785*4882a593Smuzhiyun /*
786*4882a593Smuzhiyun  * perform the background storage of a page into the cache
787*4882a593Smuzhiyun  */
fscache_write_op(struct fscache_operation * _op)788*4882a593Smuzhiyun static void fscache_write_op(struct fscache_operation *_op)
789*4882a593Smuzhiyun {
790*4882a593Smuzhiyun 	struct fscache_storage *op =
791*4882a593Smuzhiyun 		container_of(_op, struct fscache_storage, op);
792*4882a593Smuzhiyun 	struct fscache_object *object = op->op.object;
793*4882a593Smuzhiyun 	struct fscache_cookie *cookie;
794*4882a593Smuzhiyun 	struct page *page;
795*4882a593Smuzhiyun 	unsigned n;
796*4882a593Smuzhiyun 	void *results[1];
797*4882a593Smuzhiyun 	int ret;
798*4882a593Smuzhiyun 
799*4882a593Smuzhiyun 	_enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage));
800*4882a593Smuzhiyun 
801*4882a593Smuzhiyun again:
802*4882a593Smuzhiyun 	spin_lock(&object->lock);
803*4882a593Smuzhiyun 	cookie = object->cookie;
804*4882a593Smuzhiyun 
805*4882a593Smuzhiyun 	if (!fscache_object_is_active(object)) {
806*4882a593Smuzhiyun 		/* If we get here, then the on-disk cache object likely no
807*4882a593Smuzhiyun 		 * longer exists, so we should just cancel this write
808*4882a593Smuzhiyun 		 * operation.
809*4882a593Smuzhiyun 		 */
810*4882a593Smuzhiyun 		spin_unlock(&object->lock);
811*4882a593Smuzhiyun 		fscache_op_complete(&op->op, true);
812*4882a593Smuzhiyun 		_leave(" [inactive]");
813*4882a593Smuzhiyun 		return;
814*4882a593Smuzhiyun 	}
815*4882a593Smuzhiyun 
816*4882a593Smuzhiyun 	if (!cookie) {
817*4882a593Smuzhiyun 		/* If we get here, then the cookie belonging to the object was
818*4882a593Smuzhiyun 		 * detached, probably by the cookie being withdrawn due to
819*4882a593Smuzhiyun 		 * memory pressure, which means that the pages we might write
820*4882a593Smuzhiyun 		 * to the cache from no longer exist - therefore, we can just
821*4882a593Smuzhiyun 		 * cancel this write operation.
822*4882a593Smuzhiyun 		 */
823*4882a593Smuzhiyun 		spin_unlock(&object->lock);
824*4882a593Smuzhiyun 		fscache_op_complete(&op->op, true);
825*4882a593Smuzhiyun 		_leave(" [cancel] op{f=%lx s=%u} obj{s=%s f=%lx}",
826*4882a593Smuzhiyun 		       _op->flags, _op->state, object->state->short_name,
827*4882a593Smuzhiyun 		       object->flags);
828*4882a593Smuzhiyun 		return;
829*4882a593Smuzhiyun 	}
830*4882a593Smuzhiyun 
831*4882a593Smuzhiyun 	spin_lock(&cookie->stores_lock);
832*4882a593Smuzhiyun 
833*4882a593Smuzhiyun 	fscache_stat(&fscache_n_store_calls);
834*4882a593Smuzhiyun 
835*4882a593Smuzhiyun 	/* find a page to store */
836*4882a593Smuzhiyun 	results[0] = NULL;
837*4882a593Smuzhiyun 	page = NULL;
838*4882a593Smuzhiyun 	n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0, 1,
839*4882a593Smuzhiyun 				       FSCACHE_COOKIE_PENDING_TAG);
840*4882a593Smuzhiyun 	trace_fscache_gang_lookup(cookie, &op->op, results, n, op->store_limit);
841*4882a593Smuzhiyun 	if (n != 1)
842*4882a593Smuzhiyun 		goto superseded;
843*4882a593Smuzhiyun 	page = results[0];
844*4882a593Smuzhiyun 	_debug("gang %d [%lx]", n, page->index);
845*4882a593Smuzhiyun 
846*4882a593Smuzhiyun 	radix_tree_tag_set(&cookie->stores, page->index,
847*4882a593Smuzhiyun 			   FSCACHE_COOKIE_STORING_TAG);
848*4882a593Smuzhiyun 	radix_tree_tag_clear(&cookie->stores, page->index,
849*4882a593Smuzhiyun 			     FSCACHE_COOKIE_PENDING_TAG);
850*4882a593Smuzhiyun 	trace_fscache_page(cookie, page, fscache_page_radix_pend2store);
851*4882a593Smuzhiyun 
852*4882a593Smuzhiyun 	spin_unlock(&cookie->stores_lock);
853*4882a593Smuzhiyun 	spin_unlock(&object->lock);
854*4882a593Smuzhiyun 
855*4882a593Smuzhiyun 	if (page->index >= op->store_limit)
856*4882a593Smuzhiyun 		goto discard_page;
857*4882a593Smuzhiyun 
858*4882a593Smuzhiyun 	fscache_stat(&fscache_n_store_pages);
859*4882a593Smuzhiyun 	fscache_stat(&fscache_n_cop_write_page);
860*4882a593Smuzhiyun 	ret = object->cache->ops->write_page(op, page);
861*4882a593Smuzhiyun 	fscache_stat_d(&fscache_n_cop_write_page);
862*4882a593Smuzhiyun 	trace_fscache_wrote_page(cookie, page, &op->op, ret);
863*4882a593Smuzhiyun 	fscache_end_page_write(object, page);
864*4882a593Smuzhiyun 	if (ret < 0) {
865*4882a593Smuzhiyun 		fscache_abort_object(object);
866*4882a593Smuzhiyun 		fscache_op_complete(&op->op, true);
867*4882a593Smuzhiyun 	} else {
868*4882a593Smuzhiyun 		fscache_enqueue_operation(&op->op);
869*4882a593Smuzhiyun 	}
870*4882a593Smuzhiyun 
871*4882a593Smuzhiyun 	_leave("");
872*4882a593Smuzhiyun 	return;
873*4882a593Smuzhiyun 
874*4882a593Smuzhiyun discard_page:
875*4882a593Smuzhiyun 	fscache_stat(&fscache_n_store_pages_over_limit);
876*4882a593Smuzhiyun 	trace_fscache_wrote_page(cookie, page, &op->op, -ENOBUFS);
877*4882a593Smuzhiyun 	fscache_end_page_write(object, page);
878*4882a593Smuzhiyun 	goto again;
879*4882a593Smuzhiyun 
880*4882a593Smuzhiyun superseded:
881*4882a593Smuzhiyun 	/* this writer is going away and there aren't any more things to
882*4882a593Smuzhiyun 	 * write */
883*4882a593Smuzhiyun 	_debug("cease");
884*4882a593Smuzhiyun 	spin_unlock(&cookie->stores_lock);
885*4882a593Smuzhiyun 	clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
886*4882a593Smuzhiyun 	spin_unlock(&object->lock);
887*4882a593Smuzhiyun 	fscache_op_complete(&op->op, false);
888*4882a593Smuzhiyun 	_leave("");
889*4882a593Smuzhiyun }
890*4882a593Smuzhiyun 
891*4882a593Smuzhiyun /*
892*4882a593Smuzhiyun  * Clear the pages pending writing for invalidation
893*4882a593Smuzhiyun  */
fscache_invalidate_writes(struct fscache_cookie * cookie)894*4882a593Smuzhiyun void fscache_invalidate_writes(struct fscache_cookie *cookie)
895*4882a593Smuzhiyun {
896*4882a593Smuzhiyun 	struct page *page;
897*4882a593Smuzhiyun 	void *results[16];
898*4882a593Smuzhiyun 	int n, i;
899*4882a593Smuzhiyun 
900*4882a593Smuzhiyun 	_enter("");
901*4882a593Smuzhiyun 
902*4882a593Smuzhiyun 	for (;;) {
903*4882a593Smuzhiyun 		spin_lock(&cookie->stores_lock);
904*4882a593Smuzhiyun 		n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0,
905*4882a593Smuzhiyun 					       ARRAY_SIZE(results),
906*4882a593Smuzhiyun 					       FSCACHE_COOKIE_PENDING_TAG);
907*4882a593Smuzhiyun 		if (n == 0) {
908*4882a593Smuzhiyun 			spin_unlock(&cookie->stores_lock);
909*4882a593Smuzhiyun 			break;
910*4882a593Smuzhiyun 		}
911*4882a593Smuzhiyun 
912*4882a593Smuzhiyun 		for (i = n - 1; i >= 0; i--) {
913*4882a593Smuzhiyun 			page = results[i];
914*4882a593Smuzhiyun 			radix_tree_delete(&cookie->stores, page->index);
915*4882a593Smuzhiyun 			trace_fscache_page(cookie, page, fscache_page_radix_delete);
916*4882a593Smuzhiyun 			trace_fscache_page(cookie, page, fscache_page_inval);
917*4882a593Smuzhiyun 		}
918*4882a593Smuzhiyun 
919*4882a593Smuzhiyun 		spin_unlock(&cookie->stores_lock);
920*4882a593Smuzhiyun 
921*4882a593Smuzhiyun 		for (i = n - 1; i >= 0; i--)
922*4882a593Smuzhiyun 			put_page(results[i]);
923*4882a593Smuzhiyun 	}
924*4882a593Smuzhiyun 
925*4882a593Smuzhiyun 	wake_up_bit(&cookie->flags, 0);
926*4882a593Smuzhiyun 	trace_fscache_wake_cookie(cookie);
927*4882a593Smuzhiyun 
928*4882a593Smuzhiyun 	_leave("");
929*4882a593Smuzhiyun }
930*4882a593Smuzhiyun 
931*4882a593Smuzhiyun /*
932*4882a593Smuzhiyun  * request a page be stored in the cache
933*4882a593Smuzhiyun  * - returns:
934*4882a593Smuzhiyun  *   -ENOMEM	- out of memory, nothing done
935*4882a593Smuzhiyun  *   -ENOBUFS	- no backing object available in which to cache the page
936*4882a593Smuzhiyun  *   0		- dispatched a write - it'll call end_io_func() when finished
937*4882a593Smuzhiyun  *
938*4882a593Smuzhiyun  * if the cookie still has a backing object at this point, that object can be
939*4882a593Smuzhiyun  * in one of a few states with respect to storage processing:
940*4882a593Smuzhiyun  *
941*4882a593Smuzhiyun  *  (1) negative lookup, object not yet created (FSCACHE_COOKIE_CREATING is
942*4882a593Smuzhiyun  *      set)
943*4882a593Smuzhiyun  *
944*4882a593Smuzhiyun  *	(a) no writes yet
945*4882a593Smuzhiyun  *
946*4882a593Smuzhiyun  *	(b) writes deferred till post-creation (mark page for writing and
947*4882a593Smuzhiyun  *	    return immediately)
948*4882a593Smuzhiyun  *
949*4882a593Smuzhiyun  *  (2) negative lookup, object created, initial fill being made from netfs
950*4882a593Smuzhiyun  *
951*4882a593Smuzhiyun  *	(a) fill point not yet reached this page (mark page for writing and
952*4882a593Smuzhiyun  *          return)
953*4882a593Smuzhiyun  *
954*4882a593Smuzhiyun  *	(b) fill point passed this page (queue op to store this page)
955*4882a593Smuzhiyun  *
956*4882a593Smuzhiyun  *  (3) object extant (queue op to store this page)
957*4882a593Smuzhiyun  *
958*4882a593Smuzhiyun  * any other state is invalid
959*4882a593Smuzhiyun  */
__fscache_write_page(struct fscache_cookie * cookie,struct page * page,loff_t object_size,gfp_t gfp)960*4882a593Smuzhiyun int __fscache_write_page(struct fscache_cookie *cookie,
961*4882a593Smuzhiyun 			 struct page *page,
962*4882a593Smuzhiyun 			 loff_t object_size,
963*4882a593Smuzhiyun 			 gfp_t gfp)
964*4882a593Smuzhiyun {
965*4882a593Smuzhiyun 	struct fscache_storage *op;
966*4882a593Smuzhiyun 	struct fscache_object *object;
967*4882a593Smuzhiyun 	bool wake_cookie = false;
968*4882a593Smuzhiyun 	int ret;
969*4882a593Smuzhiyun 
970*4882a593Smuzhiyun 	_enter("%p,%x,", cookie, (u32) page->flags);
971*4882a593Smuzhiyun 
972*4882a593Smuzhiyun 	ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
973*4882a593Smuzhiyun 	ASSERT(PageFsCache(page));
974*4882a593Smuzhiyun 
975*4882a593Smuzhiyun 	fscache_stat(&fscache_n_stores);
976*4882a593Smuzhiyun 
977*4882a593Smuzhiyun 	if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
978*4882a593Smuzhiyun 		_leave(" = -ENOBUFS [invalidating]");
979*4882a593Smuzhiyun 		return -ENOBUFS;
980*4882a593Smuzhiyun 	}
981*4882a593Smuzhiyun 
982*4882a593Smuzhiyun 	op = kzalloc(sizeof(*op), GFP_NOIO | __GFP_NOMEMALLOC | __GFP_NORETRY);
983*4882a593Smuzhiyun 	if (!op)
984*4882a593Smuzhiyun 		goto nomem;
985*4882a593Smuzhiyun 
986*4882a593Smuzhiyun 	fscache_operation_init(cookie, &op->op, fscache_write_op, NULL,
987*4882a593Smuzhiyun 			       fscache_release_write_op);
988*4882a593Smuzhiyun 	op->op.flags = FSCACHE_OP_ASYNC |
989*4882a593Smuzhiyun 		(1 << FSCACHE_OP_WAITING) |
990*4882a593Smuzhiyun 		(1 << FSCACHE_OP_UNUSE_COOKIE);
991*4882a593Smuzhiyun 
992*4882a593Smuzhiyun 	ret = radix_tree_maybe_preload(gfp & ~__GFP_HIGHMEM);
993*4882a593Smuzhiyun 	if (ret < 0)
994*4882a593Smuzhiyun 		goto nomem_free;
995*4882a593Smuzhiyun 
996*4882a593Smuzhiyun 	trace_fscache_page_op(cookie, page, &op->op, fscache_page_op_write_one);
997*4882a593Smuzhiyun 
998*4882a593Smuzhiyun 	ret = -ENOBUFS;
999*4882a593Smuzhiyun 	spin_lock(&cookie->lock);
1000*4882a593Smuzhiyun 
1001*4882a593Smuzhiyun 	if (!fscache_cookie_enabled(cookie) ||
1002*4882a593Smuzhiyun 	    hlist_empty(&cookie->backing_objects))
1003*4882a593Smuzhiyun 		goto nobufs;
1004*4882a593Smuzhiyun 	object = hlist_entry(cookie->backing_objects.first,
1005*4882a593Smuzhiyun 			     struct fscache_object, cookie_link);
1006*4882a593Smuzhiyun 	if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
1007*4882a593Smuzhiyun 		goto nobufs;
1008*4882a593Smuzhiyun 
1009*4882a593Smuzhiyun 	trace_fscache_page(cookie, page, fscache_page_write);
1010*4882a593Smuzhiyun 
1011*4882a593Smuzhiyun 	/* add the page to the pending-storage radix tree on the backing
1012*4882a593Smuzhiyun 	 * object */
1013*4882a593Smuzhiyun 	spin_lock(&object->lock);
1014*4882a593Smuzhiyun 
1015*4882a593Smuzhiyun 	if (object->store_limit_l != object_size)
1016*4882a593Smuzhiyun 		fscache_set_store_limit(object, object_size);
1017*4882a593Smuzhiyun 
1018*4882a593Smuzhiyun 	spin_lock(&cookie->stores_lock);
1019*4882a593Smuzhiyun 
1020*4882a593Smuzhiyun 	_debug("store limit %llx", (unsigned long long) object->store_limit);
1021*4882a593Smuzhiyun 
1022*4882a593Smuzhiyun 	ret = radix_tree_insert(&cookie->stores, page->index, page);
1023*4882a593Smuzhiyun 	if (ret < 0) {
1024*4882a593Smuzhiyun 		if (ret == -EEXIST)
1025*4882a593Smuzhiyun 			goto already_queued;
1026*4882a593Smuzhiyun 		_debug("insert failed %d", ret);
1027*4882a593Smuzhiyun 		goto nobufs_unlock_obj;
1028*4882a593Smuzhiyun 	}
1029*4882a593Smuzhiyun 
1030*4882a593Smuzhiyun 	trace_fscache_page(cookie, page, fscache_page_radix_insert);
1031*4882a593Smuzhiyun 	radix_tree_tag_set(&cookie->stores, page->index,
1032*4882a593Smuzhiyun 			   FSCACHE_COOKIE_PENDING_TAG);
1033*4882a593Smuzhiyun 	trace_fscache_page(cookie, page, fscache_page_radix_set_pend);
1034*4882a593Smuzhiyun 	get_page(page);
1035*4882a593Smuzhiyun 
1036*4882a593Smuzhiyun 	/* we only want one writer at a time, but we do need to queue new
1037*4882a593Smuzhiyun 	 * writers after exclusive ops */
1038*4882a593Smuzhiyun 	if (test_and_set_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags))
1039*4882a593Smuzhiyun 		goto already_pending;
1040*4882a593Smuzhiyun 
1041*4882a593Smuzhiyun 	spin_unlock(&cookie->stores_lock);
1042*4882a593Smuzhiyun 	spin_unlock(&object->lock);
1043*4882a593Smuzhiyun 
1044*4882a593Smuzhiyun 	op->op.debug_id	= atomic_inc_return(&fscache_op_debug_id);
1045*4882a593Smuzhiyun 	op->store_limit = object->store_limit;
1046*4882a593Smuzhiyun 
1047*4882a593Smuzhiyun 	__fscache_use_cookie(cookie);
1048*4882a593Smuzhiyun 	if (fscache_submit_op(object, &op->op) < 0)
1049*4882a593Smuzhiyun 		goto submit_failed;
1050*4882a593Smuzhiyun 
1051*4882a593Smuzhiyun 	spin_unlock(&cookie->lock);
1052*4882a593Smuzhiyun 	radix_tree_preload_end();
1053*4882a593Smuzhiyun 	fscache_stat(&fscache_n_store_ops);
1054*4882a593Smuzhiyun 	fscache_stat(&fscache_n_stores_ok);
1055*4882a593Smuzhiyun 
1056*4882a593Smuzhiyun 	/* the work queue now carries its own ref on the object */
1057*4882a593Smuzhiyun 	fscache_put_operation(&op->op);
1058*4882a593Smuzhiyun 	_leave(" = 0");
1059*4882a593Smuzhiyun 	return 0;
1060*4882a593Smuzhiyun 
1061*4882a593Smuzhiyun already_queued:
1062*4882a593Smuzhiyun 	fscache_stat(&fscache_n_stores_again);
1063*4882a593Smuzhiyun already_pending:
1064*4882a593Smuzhiyun 	spin_unlock(&cookie->stores_lock);
1065*4882a593Smuzhiyun 	spin_unlock(&object->lock);
1066*4882a593Smuzhiyun 	spin_unlock(&cookie->lock);
1067*4882a593Smuzhiyun 	radix_tree_preload_end();
1068*4882a593Smuzhiyun 	fscache_put_operation(&op->op);
1069*4882a593Smuzhiyun 	fscache_stat(&fscache_n_stores_ok);
1070*4882a593Smuzhiyun 	_leave(" = 0");
1071*4882a593Smuzhiyun 	return 0;
1072*4882a593Smuzhiyun 
1073*4882a593Smuzhiyun submit_failed:
1074*4882a593Smuzhiyun 	spin_lock(&cookie->stores_lock);
1075*4882a593Smuzhiyun 	radix_tree_delete(&cookie->stores, page->index);
1076*4882a593Smuzhiyun 	trace_fscache_page(cookie, page, fscache_page_radix_delete);
1077*4882a593Smuzhiyun 	spin_unlock(&cookie->stores_lock);
1078*4882a593Smuzhiyun 	wake_cookie = __fscache_unuse_cookie(cookie);
1079*4882a593Smuzhiyun 	put_page(page);
1080*4882a593Smuzhiyun 	ret = -ENOBUFS;
1081*4882a593Smuzhiyun 	goto nobufs;
1082*4882a593Smuzhiyun 
1083*4882a593Smuzhiyun nobufs_unlock_obj:
1084*4882a593Smuzhiyun 	spin_unlock(&cookie->stores_lock);
1085*4882a593Smuzhiyun 	spin_unlock(&object->lock);
1086*4882a593Smuzhiyun nobufs:
1087*4882a593Smuzhiyun 	spin_unlock(&cookie->lock);
1088*4882a593Smuzhiyun 	radix_tree_preload_end();
1089*4882a593Smuzhiyun 	fscache_put_operation(&op->op);
1090*4882a593Smuzhiyun 	if (wake_cookie)
1091*4882a593Smuzhiyun 		__fscache_wake_unused_cookie(cookie);
1092*4882a593Smuzhiyun 	fscache_stat(&fscache_n_stores_nobufs);
1093*4882a593Smuzhiyun 	_leave(" = -ENOBUFS");
1094*4882a593Smuzhiyun 	return -ENOBUFS;
1095*4882a593Smuzhiyun 
1096*4882a593Smuzhiyun nomem_free:
1097*4882a593Smuzhiyun 	fscache_put_operation(&op->op);
1098*4882a593Smuzhiyun nomem:
1099*4882a593Smuzhiyun 	fscache_stat(&fscache_n_stores_oom);
1100*4882a593Smuzhiyun 	_leave(" = -ENOMEM");
1101*4882a593Smuzhiyun 	return -ENOMEM;
1102*4882a593Smuzhiyun }
1103*4882a593Smuzhiyun EXPORT_SYMBOL(__fscache_write_page);
1104*4882a593Smuzhiyun 
1105*4882a593Smuzhiyun /*
1106*4882a593Smuzhiyun  * remove a page from the cache
1107*4882a593Smuzhiyun  */
__fscache_uncache_page(struct fscache_cookie * cookie,struct page * page)1108*4882a593Smuzhiyun void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
1109*4882a593Smuzhiyun {
1110*4882a593Smuzhiyun 	struct fscache_object *object;
1111*4882a593Smuzhiyun 
1112*4882a593Smuzhiyun 	_enter(",%p", page);
1113*4882a593Smuzhiyun 
1114*4882a593Smuzhiyun 	ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
1115*4882a593Smuzhiyun 	ASSERTCMP(page, !=, NULL);
1116*4882a593Smuzhiyun 
1117*4882a593Smuzhiyun 	fscache_stat(&fscache_n_uncaches);
1118*4882a593Smuzhiyun 
1119*4882a593Smuzhiyun 	/* cache withdrawal may beat us to it */
1120*4882a593Smuzhiyun 	if (!PageFsCache(page))
1121*4882a593Smuzhiyun 		goto done;
1122*4882a593Smuzhiyun 
1123*4882a593Smuzhiyun 	trace_fscache_page(cookie, page, fscache_page_uncache);
1124*4882a593Smuzhiyun 
1125*4882a593Smuzhiyun 	/* get the object */
1126*4882a593Smuzhiyun 	spin_lock(&cookie->lock);
1127*4882a593Smuzhiyun 
1128*4882a593Smuzhiyun 	if (hlist_empty(&cookie->backing_objects)) {
1129*4882a593Smuzhiyun 		ClearPageFsCache(page);
1130*4882a593Smuzhiyun 		goto done_unlock;
1131*4882a593Smuzhiyun 	}
1132*4882a593Smuzhiyun 
1133*4882a593Smuzhiyun 	object = hlist_entry(cookie->backing_objects.first,
1134*4882a593Smuzhiyun 			     struct fscache_object, cookie_link);
1135*4882a593Smuzhiyun 
1136*4882a593Smuzhiyun 	/* there might now be stuff on disk we could read */
1137*4882a593Smuzhiyun 	clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
1138*4882a593Smuzhiyun 
1139*4882a593Smuzhiyun 	/* only invoke the cache backend if we managed to mark the page
1140*4882a593Smuzhiyun 	 * uncached here; this deals with synchronisation vs withdrawal */
1141*4882a593Smuzhiyun 	if (TestClearPageFsCache(page) &&
1142*4882a593Smuzhiyun 	    object->cache->ops->uncache_page) {
1143*4882a593Smuzhiyun 		/* the cache backend releases the cookie lock */
1144*4882a593Smuzhiyun 		fscache_stat(&fscache_n_cop_uncache_page);
1145*4882a593Smuzhiyun 		object->cache->ops->uncache_page(object, page);
1146*4882a593Smuzhiyun 		fscache_stat_d(&fscache_n_cop_uncache_page);
1147*4882a593Smuzhiyun 		goto done;
1148*4882a593Smuzhiyun 	}
1149*4882a593Smuzhiyun 
1150*4882a593Smuzhiyun done_unlock:
1151*4882a593Smuzhiyun 	spin_unlock(&cookie->lock);
1152*4882a593Smuzhiyun done:
1153*4882a593Smuzhiyun 	_leave("");
1154*4882a593Smuzhiyun }
1155*4882a593Smuzhiyun EXPORT_SYMBOL(__fscache_uncache_page);
1156*4882a593Smuzhiyun 
1157*4882a593Smuzhiyun /**
1158*4882a593Smuzhiyun  * fscache_mark_page_cached - Mark a page as being cached
1159*4882a593Smuzhiyun  * @op: The retrieval op pages are being marked for
1160*4882a593Smuzhiyun  * @page: The page to be marked
1161*4882a593Smuzhiyun  *
1162*4882a593Smuzhiyun  * Mark a netfs page as being cached.  After this is called, the netfs
1163*4882a593Smuzhiyun  * must call fscache_uncache_page() to remove the mark.
1164*4882a593Smuzhiyun  */
fscache_mark_page_cached(struct fscache_retrieval * op,struct page * page)1165*4882a593Smuzhiyun void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
1166*4882a593Smuzhiyun {
1167*4882a593Smuzhiyun 	struct fscache_cookie *cookie = op->op.object->cookie;
1168*4882a593Smuzhiyun 
1169*4882a593Smuzhiyun #ifdef CONFIG_FSCACHE_STATS
1170*4882a593Smuzhiyun 	atomic_inc(&fscache_n_marks);
1171*4882a593Smuzhiyun #endif
1172*4882a593Smuzhiyun 
1173*4882a593Smuzhiyun 	trace_fscache_page(cookie, page, fscache_page_cached);
1174*4882a593Smuzhiyun 
1175*4882a593Smuzhiyun 	_debug("- mark %p{%lx}", page, page->index);
1176*4882a593Smuzhiyun 	if (TestSetPageFsCache(page)) {
1177*4882a593Smuzhiyun 		static bool once_only;
1178*4882a593Smuzhiyun 		if (!once_only) {
1179*4882a593Smuzhiyun 			once_only = true;
1180*4882a593Smuzhiyun 			pr_warn("Cookie type %s marked page %lx multiple times\n",
1181*4882a593Smuzhiyun 				cookie->def->name, page->index);
1182*4882a593Smuzhiyun 		}
1183*4882a593Smuzhiyun 	}
1184*4882a593Smuzhiyun 
1185*4882a593Smuzhiyun 	if (cookie->def->mark_page_cached)
1186*4882a593Smuzhiyun 		cookie->def->mark_page_cached(cookie->netfs_data,
1187*4882a593Smuzhiyun 					      op->mapping, page);
1188*4882a593Smuzhiyun }
1189*4882a593Smuzhiyun EXPORT_SYMBOL(fscache_mark_page_cached);
1190*4882a593Smuzhiyun 
1191*4882a593Smuzhiyun /**
1192*4882a593Smuzhiyun  * fscache_mark_pages_cached - Mark pages as being cached
1193*4882a593Smuzhiyun  * @op: The retrieval op pages are being marked for
1194*4882a593Smuzhiyun  * @pagevec: The pages to be marked
1195*4882a593Smuzhiyun  *
1196*4882a593Smuzhiyun  * Mark a bunch of netfs pages as being cached.  After this is called,
1197*4882a593Smuzhiyun  * the netfs must call fscache_uncache_page() to remove the mark.
1198*4882a593Smuzhiyun  */
fscache_mark_pages_cached(struct fscache_retrieval * op,struct pagevec * pagevec)1199*4882a593Smuzhiyun void fscache_mark_pages_cached(struct fscache_retrieval *op,
1200*4882a593Smuzhiyun 			       struct pagevec *pagevec)
1201*4882a593Smuzhiyun {
1202*4882a593Smuzhiyun 	unsigned long loop;
1203*4882a593Smuzhiyun 
1204*4882a593Smuzhiyun 	for (loop = 0; loop < pagevec->nr; loop++)
1205*4882a593Smuzhiyun 		fscache_mark_page_cached(op, pagevec->pages[loop]);
1206*4882a593Smuzhiyun 
1207*4882a593Smuzhiyun 	pagevec_reinit(pagevec);
1208*4882a593Smuzhiyun }
1209*4882a593Smuzhiyun EXPORT_SYMBOL(fscache_mark_pages_cached);
1210*4882a593Smuzhiyun 
1211*4882a593Smuzhiyun /*
1212*4882a593Smuzhiyun  * Uncache all the pages in an inode that are marked PG_fscache, assuming them
1213*4882a593Smuzhiyun  * to be associated with the given cookie.
1214*4882a593Smuzhiyun  */
__fscache_uncache_all_inode_pages(struct fscache_cookie * cookie,struct inode * inode)1215*4882a593Smuzhiyun void __fscache_uncache_all_inode_pages(struct fscache_cookie *cookie,
1216*4882a593Smuzhiyun 				       struct inode *inode)
1217*4882a593Smuzhiyun {
1218*4882a593Smuzhiyun 	struct address_space *mapping = inode->i_mapping;
1219*4882a593Smuzhiyun 	struct pagevec pvec;
1220*4882a593Smuzhiyun 	pgoff_t next;
1221*4882a593Smuzhiyun 	int i;
1222*4882a593Smuzhiyun 
1223*4882a593Smuzhiyun 	_enter("%p,%p", cookie, inode);
1224*4882a593Smuzhiyun 
1225*4882a593Smuzhiyun 	if (!mapping || mapping->nrpages == 0) {
1226*4882a593Smuzhiyun 		_leave(" [no pages]");
1227*4882a593Smuzhiyun 		return;
1228*4882a593Smuzhiyun 	}
1229*4882a593Smuzhiyun 
1230*4882a593Smuzhiyun 	pagevec_init(&pvec);
1231*4882a593Smuzhiyun 	next = 0;
1232*4882a593Smuzhiyun 	do {
1233*4882a593Smuzhiyun 		if (!pagevec_lookup(&pvec, mapping, &next))
1234*4882a593Smuzhiyun 			break;
1235*4882a593Smuzhiyun 		for (i = 0; i < pagevec_count(&pvec); i++) {
1236*4882a593Smuzhiyun 			struct page *page = pvec.pages[i];
1237*4882a593Smuzhiyun 			if (PageFsCache(page)) {
1238*4882a593Smuzhiyun 				__fscache_wait_on_page_write(cookie, page);
1239*4882a593Smuzhiyun 				__fscache_uncache_page(cookie, page);
1240*4882a593Smuzhiyun 			}
1241*4882a593Smuzhiyun 		}
1242*4882a593Smuzhiyun 		pagevec_release(&pvec);
1243*4882a593Smuzhiyun 		cond_resched();
1244*4882a593Smuzhiyun 	} while (next);
1245*4882a593Smuzhiyun 
1246*4882a593Smuzhiyun 	_leave("");
1247*4882a593Smuzhiyun }
1248*4882a593Smuzhiyun EXPORT_SYMBOL(__fscache_uncache_all_inode_pages);
1249