1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /* Storage object read/write
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5*4882a593Smuzhiyun * Written by David Howells (dhowells@redhat.com)
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include <linux/mount.h>
9*4882a593Smuzhiyun #include <linux/slab.h>
10*4882a593Smuzhiyun #include <linux/file.h>
11*4882a593Smuzhiyun #include <linux/swap.h>
12*4882a593Smuzhiyun #include "internal.h"
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun /*
15*4882a593Smuzhiyun * detect wake up events generated by the unlocking of pages in which we're
16*4882a593Smuzhiyun * interested
17*4882a593Smuzhiyun * - we use this to detect read completion of backing pages
18*4882a593Smuzhiyun * - the caller holds the waitqueue lock
19*4882a593Smuzhiyun */
cachefiles_read_waiter(wait_queue_entry_t * wait,unsigned mode,int sync,void * _key)20*4882a593Smuzhiyun static int cachefiles_read_waiter(wait_queue_entry_t *wait, unsigned mode,
21*4882a593Smuzhiyun int sync, void *_key)
22*4882a593Smuzhiyun {
23*4882a593Smuzhiyun struct cachefiles_one_read *monitor =
24*4882a593Smuzhiyun container_of(wait, struct cachefiles_one_read, monitor);
25*4882a593Smuzhiyun struct cachefiles_object *object;
26*4882a593Smuzhiyun struct fscache_retrieval *op = monitor->op;
27*4882a593Smuzhiyun struct wait_page_key *key = _key;
28*4882a593Smuzhiyun struct page *page = wait->private;
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun ASSERT(key);
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun _enter("{%lu},%u,%d,{%p,%u}",
33*4882a593Smuzhiyun monitor->netfs_page->index, mode, sync,
34*4882a593Smuzhiyun key->page, key->bit_nr);
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun if (key->page != page || key->bit_nr != PG_locked)
37*4882a593Smuzhiyun return 0;
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun _debug("--- monitor %p %lx ---", page, page->flags);
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun if (!PageUptodate(page) && !PageError(page)) {
42*4882a593Smuzhiyun /* unlocked, not uptodate and not erronous? */
43*4882a593Smuzhiyun _debug("page probably truncated");
44*4882a593Smuzhiyun }
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun /* remove from the waitqueue */
47*4882a593Smuzhiyun list_del(&wait->entry);
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun /* move onto the action list and queue for FS-Cache thread pool */
50*4882a593Smuzhiyun ASSERT(op);
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun /* We need to temporarily bump the usage count as we don't own a ref
53*4882a593Smuzhiyun * here otherwise cachefiles_read_copier() may free the op between the
54*4882a593Smuzhiyun * monitor being enqueued on the op->to_do list and the op getting
55*4882a593Smuzhiyun * enqueued on the work queue.
56*4882a593Smuzhiyun */
57*4882a593Smuzhiyun fscache_get_retrieval(op);
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun object = container_of(op->op.object, struct cachefiles_object, fscache);
60*4882a593Smuzhiyun spin_lock(&object->work_lock);
61*4882a593Smuzhiyun list_add_tail(&monitor->op_link, &op->to_do);
62*4882a593Smuzhiyun fscache_enqueue_retrieval(op);
63*4882a593Smuzhiyun spin_unlock(&object->work_lock);
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun fscache_put_retrieval(op);
66*4882a593Smuzhiyun return 0;
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun /*
70*4882a593Smuzhiyun * handle a probably truncated page
71*4882a593Smuzhiyun * - check to see if the page is still relevant and reissue the read if
72*4882a593Smuzhiyun * possible
73*4882a593Smuzhiyun * - return -EIO on error, -ENODATA if the page is gone, -EINPROGRESS if we
74*4882a593Smuzhiyun * must wait again and 0 if successful
75*4882a593Smuzhiyun */
cachefiles_read_reissue(struct cachefiles_object * object,struct cachefiles_one_read * monitor)76*4882a593Smuzhiyun static int cachefiles_read_reissue(struct cachefiles_object *object,
77*4882a593Smuzhiyun struct cachefiles_one_read *monitor)
78*4882a593Smuzhiyun {
79*4882a593Smuzhiyun struct address_space *bmapping = d_backing_inode(object->backer)->i_mapping;
80*4882a593Smuzhiyun struct page *backpage = monitor->back_page, *backpage2;
81*4882a593Smuzhiyun int ret;
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun _enter("{ino=%lx},{%lx,%lx}",
84*4882a593Smuzhiyun d_backing_inode(object->backer)->i_ino,
85*4882a593Smuzhiyun backpage->index, backpage->flags);
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun /* skip if the page was truncated away completely */
88*4882a593Smuzhiyun if (backpage->mapping != bmapping) {
89*4882a593Smuzhiyun _leave(" = -ENODATA [mapping]");
90*4882a593Smuzhiyun return -ENODATA;
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun backpage2 = find_get_page(bmapping, backpage->index);
94*4882a593Smuzhiyun if (!backpage2) {
95*4882a593Smuzhiyun _leave(" = -ENODATA [gone]");
96*4882a593Smuzhiyun return -ENODATA;
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun if (backpage != backpage2) {
100*4882a593Smuzhiyun put_page(backpage2);
101*4882a593Smuzhiyun _leave(" = -ENODATA [different]");
102*4882a593Smuzhiyun return -ENODATA;
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun /* the page is still there and we already have a ref on it, so we don't
106*4882a593Smuzhiyun * need a second */
107*4882a593Smuzhiyun put_page(backpage2);
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun INIT_LIST_HEAD(&monitor->op_link);
110*4882a593Smuzhiyun add_page_wait_queue(backpage, &monitor->monitor);
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun if (trylock_page(backpage)) {
113*4882a593Smuzhiyun ret = -EIO;
114*4882a593Smuzhiyun if (PageError(backpage))
115*4882a593Smuzhiyun goto unlock_discard;
116*4882a593Smuzhiyun ret = 0;
117*4882a593Smuzhiyun if (PageUptodate(backpage))
118*4882a593Smuzhiyun goto unlock_discard;
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun _debug("reissue read");
121*4882a593Smuzhiyun ret = bmapping->a_ops->readpage(NULL, backpage);
122*4882a593Smuzhiyun if (ret < 0)
123*4882a593Smuzhiyun goto discard;
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun /* but the page may have been read before the monitor was installed, so
127*4882a593Smuzhiyun * the monitor may miss the event - so we have to ensure that we do get
128*4882a593Smuzhiyun * one in such a case */
129*4882a593Smuzhiyun if (trylock_page(backpage)) {
130*4882a593Smuzhiyun _debug("jumpstart %p {%lx}", backpage, backpage->flags);
131*4882a593Smuzhiyun unlock_page(backpage);
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun /* it'll reappear on the todo list */
135*4882a593Smuzhiyun _leave(" = -EINPROGRESS");
136*4882a593Smuzhiyun return -EINPROGRESS;
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun unlock_discard:
139*4882a593Smuzhiyun unlock_page(backpage);
140*4882a593Smuzhiyun discard:
141*4882a593Smuzhiyun spin_lock_irq(&object->work_lock);
142*4882a593Smuzhiyun list_del(&monitor->op_link);
143*4882a593Smuzhiyun spin_unlock_irq(&object->work_lock);
144*4882a593Smuzhiyun _leave(" = %d", ret);
145*4882a593Smuzhiyun return ret;
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun /*
149*4882a593Smuzhiyun * copy data from backing pages to netfs pages to complete a read operation
150*4882a593Smuzhiyun * - driven by FS-Cache's thread pool
151*4882a593Smuzhiyun */
cachefiles_read_copier(struct fscache_operation * _op)152*4882a593Smuzhiyun static void cachefiles_read_copier(struct fscache_operation *_op)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun struct cachefiles_one_read *monitor;
155*4882a593Smuzhiyun struct cachefiles_object *object;
156*4882a593Smuzhiyun struct fscache_retrieval *op;
157*4882a593Smuzhiyun int error, max;
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun op = container_of(_op, struct fscache_retrieval, op);
160*4882a593Smuzhiyun object = container_of(op->op.object,
161*4882a593Smuzhiyun struct cachefiles_object, fscache);
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun _enter("{ino=%lu}", d_backing_inode(object->backer)->i_ino);
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun max = 8;
166*4882a593Smuzhiyun spin_lock_irq(&object->work_lock);
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun while (!list_empty(&op->to_do)) {
169*4882a593Smuzhiyun monitor = list_entry(op->to_do.next,
170*4882a593Smuzhiyun struct cachefiles_one_read, op_link);
171*4882a593Smuzhiyun list_del(&monitor->op_link);
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun spin_unlock_irq(&object->work_lock);
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun _debug("- copy {%lu}", monitor->back_page->index);
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun recheck:
178*4882a593Smuzhiyun if (test_bit(FSCACHE_COOKIE_INVALIDATING,
179*4882a593Smuzhiyun &object->fscache.cookie->flags)) {
180*4882a593Smuzhiyun error = -ESTALE;
181*4882a593Smuzhiyun } else if (PageUptodate(monitor->back_page)) {
182*4882a593Smuzhiyun copy_highpage(monitor->netfs_page, monitor->back_page);
183*4882a593Smuzhiyun fscache_mark_page_cached(monitor->op,
184*4882a593Smuzhiyun monitor->netfs_page);
185*4882a593Smuzhiyun error = 0;
186*4882a593Smuzhiyun } else if (!PageError(monitor->back_page)) {
187*4882a593Smuzhiyun /* the page has probably been truncated */
188*4882a593Smuzhiyun error = cachefiles_read_reissue(object, monitor);
189*4882a593Smuzhiyun if (error == -EINPROGRESS)
190*4882a593Smuzhiyun goto next;
191*4882a593Smuzhiyun goto recheck;
192*4882a593Smuzhiyun } else {
193*4882a593Smuzhiyun cachefiles_io_error_obj(
194*4882a593Smuzhiyun object,
195*4882a593Smuzhiyun "Readpage failed on backing file %lx",
196*4882a593Smuzhiyun (unsigned long) monitor->back_page->flags);
197*4882a593Smuzhiyun error = -EIO;
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun put_page(monitor->back_page);
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun fscache_end_io(op, monitor->netfs_page, error);
203*4882a593Smuzhiyun put_page(monitor->netfs_page);
204*4882a593Smuzhiyun fscache_retrieval_complete(op, 1);
205*4882a593Smuzhiyun fscache_put_retrieval(op);
206*4882a593Smuzhiyun kfree(monitor);
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun next:
209*4882a593Smuzhiyun /* let the thread pool have some air occasionally */
210*4882a593Smuzhiyun max--;
211*4882a593Smuzhiyun if (max < 0 || need_resched()) {
212*4882a593Smuzhiyun if (!list_empty(&op->to_do))
213*4882a593Smuzhiyun fscache_enqueue_retrieval(op);
214*4882a593Smuzhiyun _leave(" [maxed out]");
215*4882a593Smuzhiyun return;
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun spin_lock_irq(&object->work_lock);
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun spin_unlock_irq(&object->work_lock);
222*4882a593Smuzhiyun _leave("");
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun /*
226*4882a593Smuzhiyun * read the corresponding page to the given set from the backing file
227*4882a593Smuzhiyun * - an uncertain page is simply discarded, to be tried again another time
228*4882a593Smuzhiyun */
cachefiles_read_backing_file_one(struct cachefiles_object * object,struct fscache_retrieval * op,struct page * netpage)229*4882a593Smuzhiyun static int cachefiles_read_backing_file_one(struct cachefiles_object *object,
230*4882a593Smuzhiyun struct fscache_retrieval *op,
231*4882a593Smuzhiyun struct page *netpage)
232*4882a593Smuzhiyun {
233*4882a593Smuzhiyun struct cachefiles_one_read *monitor;
234*4882a593Smuzhiyun struct address_space *bmapping;
235*4882a593Smuzhiyun struct page *newpage, *backpage;
236*4882a593Smuzhiyun int ret;
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun _enter("");
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun _debug("read back %p{%lu,%d}",
241*4882a593Smuzhiyun netpage, netpage->index, page_count(netpage));
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun monitor = kzalloc(sizeof(*monitor), cachefiles_gfp);
244*4882a593Smuzhiyun if (!monitor)
245*4882a593Smuzhiyun goto nomem;
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun monitor->netfs_page = netpage;
248*4882a593Smuzhiyun monitor->op = fscache_get_retrieval(op);
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun init_waitqueue_func_entry(&monitor->monitor, cachefiles_read_waiter);
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun /* attempt to get hold of the backing page */
253*4882a593Smuzhiyun bmapping = d_backing_inode(object->backer)->i_mapping;
254*4882a593Smuzhiyun newpage = NULL;
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun for (;;) {
257*4882a593Smuzhiyun backpage = find_get_page(bmapping, netpage->index);
258*4882a593Smuzhiyun if (backpage)
259*4882a593Smuzhiyun goto backing_page_already_present;
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun if (!newpage) {
262*4882a593Smuzhiyun newpage = __page_cache_alloc(cachefiles_gfp);
263*4882a593Smuzhiyun if (!newpage)
264*4882a593Smuzhiyun goto nomem_monitor;
265*4882a593Smuzhiyun }
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun ret = add_to_page_cache_lru(newpage, bmapping,
268*4882a593Smuzhiyun netpage->index, cachefiles_gfp);
269*4882a593Smuzhiyun if (ret == 0)
270*4882a593Smuzhiyun goto installed_new_backing_page;
271*4882a593Smuzhiyun if (ret != -EEXIST)
272*4882a593Smuzhiyun goto nomem_page;
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun /* we've installed a new backing page, so now we need to start
276*4882a593Smuzhiyun * it reading */
277*4882a593Smuzhiyun installed_new_backing_page:
278*4882a593Smuzhiyun _debug("- new %p", newpage);
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun backpage = newpage;
281*4882a593Smuzhiyun newpage = NULL;
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun read_backing_page:
284*4882a593Smuzhiyun ret = bmapping->a_ops->readpage(NULL, backpage);
285*4882a593Smuzhiyun if (ret < 0)
286*4882a593Smuzhiyun goto read_error;
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun /* set the monitor to transfer the data across */
289*4882a593Smuzhiyun monitor_backing_page:
290*4882a593Smuzhiyun _debug("- monitor add");
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun /* install the monitor */
293*4882a593Smuzhiyun get_page(monitor->netfs_page);
294*4882a593Smuzhiyun get_page(backpage);
295*4882a593Smuzhiyun monitor->back_page = backpage;
296*4882a593Smuzhiyun monitor->monitor.private = backpage;
297*4882a593Smuzhiyun add_page_wait_queue(backpage, &monitor->monitor);
298*4882a593Smuzhiyun monitor = NULL;
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun /* but the page may have been read before the monitor was installed, so
301*4882a593Smuzhiyun * the monitor may miss the event - so we have to ensure that we do get
302*4882a593Smuzhiyun * one in such a case */
303*4882a593Smuzhiyun if (trylock_page(backpage)) {
304*4882a593Smuzhiyun _debug("jumpstart %p {%lx}", backpage, backpage->flags);
305*4882a593Smuzhiyun unlock_page(backpage);
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun goto success;
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun /* if the backing page is already present, it can be in one of
310*4882a593Smuzhiyun * three states: read in progress, read failed or read okay */
311*4882a593Smuzhiyun backing_page_already_present:
312*4882a593Smuzhiyun _debug("- present");
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun if (newpage) {
315*4882a593Smuzhiyun put_page(newpage);
316*4882a593Smuzhiyun newpage = NULL;
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun if (PageError(backpage))
320*4882a593Smuzhiyun goto io_error;
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun if (PageUptodate(backpage))
323*4882a593Smuzhiyun goto backing_page_already_uptodate;
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun if (!trylock_page(backpage))
326*4882a593Smuzhiyun goto monitor_backing_page;
327*4882a593Smuzhiyun _debug("read %p {%lx}", backpage, backpage->flags);
328*4882a593Smuzhiyun goto read_backing_page;
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun /* the backing page is already up to date, attach the netfs
331*4882a593Smuzhiyun * page to the pagecache and LRU and copy the data across */
332*4882a593Smuzhiyun backing_page_already_uptodate:
333*4882a593Smuzhiyun _debug("- uptodate");
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun fscache_mark_page_cached(op, netpage);
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun copy_highpage(netpage, backpage);
338*4882a593Smuzhiyun fscache_end_io(op, netpage, 0);
339*4882a593Smuzhiyun fscache_retrieval_complete(op, 1);
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun success:
342*4882a593Smuzhiyun _debug("success");
343*4882a593Smuzhiyun ret = 0;
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun out:
346*4882a593Smuzhiyun if (backpage)
347*4882a593Smuzhiyun put_page(backpage);
348*4882a593Smuzhiyun if (monitor) {
349*4882a593Smuzhiyun fscache_put_retrieval(monitor->op);
350*4882a593Smuzhiyun kfree(monitor);
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun _leave(" = %d", ret);
353*4882a593Smuzhiyun return ret;
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun read_error:
356*4882a593Smuzhiyun _debug("read error %d", ret);
357*4882a593Smuzhiyun if (ret == -ENOMEM) {
358*4882a593Smuzhiyun fscache_retrieval_complete(op, 1);
359*4882a593Smuzhiyun goto out;
360*4882a593Smuzhiyun }
361*4882a593Smuzhiyun io_error:
362*4882a593Smuzhiyun cachefiles_io_error_obj(object, "Page read error on backing file");
363*4882a593Smuzhiyun fscache_retrieval_complete(op, 1);
364*4882a593Smuzhiyun ret = -ENOBUFS;
365*4882a593Smuzhiyun goto out;
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun nomem_page:
368*4882a593Smuzhiyun put_page(newpage);
369*4882a593Smuzhiyun nomem_monitor:
370*4882a593Smuzhiyun fscache_put_retrieval(monitor->op);
371*4882a593Smuzhiyun kfree(monitor);
372*4882a593Smuzhiyun nomem:
373*4882a593Smuzhiyun fscache_retrieval_complete(op, 1);
374*4882a593Smuzhiyun _leave(" = -ENOMEM");
375*4882a593Smuzhiyun return -ENOMEM;
376*4882a593Smuzhiyun }
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun /*
379*4882a593Smuzhiyun * read a page from the cache or allocate a block in which to store it
380*4882a593Smuzhiyun * - cache withdrawal is prevented by the caller
381*4882a593Smuzhiyun * - returns -EINTR if interrupted
382*4882a593Smuzhiyun * - returns -ENOMEM if ran out of memory
383*4882a593Smuzhiyun * - returns -ENOBUFS if no buffers can be made available
384*4882a593Smuzhiyun * - returns -ENOBUFS if page is beyond EOF
385*4882a593Smuzhiyun * - if the page is backed by a block in the cache:
386*4882a593Smuzhiyun * - a read will be started which will call the callback on completion
387*4882a593Smuzhiyun * - 0 will be returned
388*4882a593Smuzhiyun * - else if the page is unbacked:
389*4882a593Smuzhiyun * - the metadata will be retained
390*4882a593Smuzhiyun * - -ENODATA will be returned
391*4882a593Smuzhiyun */
cachefiles_read_or_alloc_page(struct fscache_retrieval * op,struct page * page,gfp_t gfp)392*4882a593Smuzhiyun int cachefiles_read_or_alloc_page(struct fscache_retrieval *op,
393*4882a593Smuzhiyun struct page *page,
394*4882a593Smuzhiyun gfp_t gfp)
395*4882a593Smuzhiyun {
396*4882a593Smuzhiyun struct cachefiles_object *object;
397*4882a593Smuzhiyun struct cachefiles_cache *cache;
398*4882a593Smuzhiyun struct inode *inode;
399*4882a593Smuzhiyun sector_t block;
400*4882a593Smuzhiyun unsigned shift;
401*4882a593Smuzhiyun int ret, ret2;
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun object = container_of(op->op.object,
404*4882a593Smuzhiyun struct cachefiles_object, fscache);
405*4882a593Smuzhiyun cache = container_of(object->fscache.cache,
406*4882a593Smuzhiyun struct cachefiles_cache, cache);
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun _enter("{%p},{%lx},,,", object, page->index);
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun if (!object->backer)
411*4882a593Smuzhiyun goto enobufs;
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun inode = d_backing_inode(object->backer);
414*4882a593Smuzhiyun ASSERT(S_ISREG(inode->i_mode));
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun /* calculate the shift required to use bmap */
417*4882a593Smuzhiyun shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun op->op.flags &= FSCACHE_OP_KEEP_FLAGS;
420*4882a593Smuzhiyun op->op.flags |= FSCACHE_OP_ASYNC;
421*4882a593Smuzhiyun op->op.processor = cachefiles_read_copier;
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun /* we assume the absence or presence of the first block is a good
424*4882a593Smuzhiyun * enough indication for the page as a whole
425*4882a593Smuzhiyun * - TODO: don't use bmap() for this as it is _not_ actually good
426*4882a593Smuzhiyun * enough for this as it doesn't indicate errors, but it's all we've
427*4882a593Smuzhiyun * got for the moment
428*4882a593Smuzhiyun */
429*4882a593Smuzhiyun block = page->index;
430*4882a593Smuzhiyun block <<= shift;
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun ret2 = bmap(inode, &block);
433*4882a593Smuzhiyun ASSERT(ret2 == 0);
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun _debug("%llx -> %llx",
436*4882a593Smuzhiyun (unsigned long long) (page->index << shift),
437*4882a593Smuzhiyun (unsigned long long) block);
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun if (block) {
440*4882a593Smuzhiyun /* submit the apparently valid page to the backing fs to be
441*4882a593Smuzhiyun * read from disk */
442*4882a593Smuzhiyun ret = cachefiles_read_backing_file_one(object, op, page);
443*4882a593Smuzhiyun } else if (cachefiles_has_space(cache, 0, 1) == 0) {
444*4882a593Smuzhiyun /* there's space in the cache we can use */
445*4882a593Smuzhiyun fscache_mark_page_cached(op, page);
446*4882a593Smuzhiyun fscache_retrieval_complete(op, 1);
447*4882a593Smuzhiyun ret = -ENODATA;
448*4882a593Smuzhiyun } else {
449*4882a593Smuzhiyun goto enobufs;
450*4882a593Smuzhiyun }
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun _leave(" = %d", ret);
453*4882a593Smuzhiyun return ret;
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun enobufs:
456*4882a593Smuzhiyun fscache_retrieval_complete(op, 1);
457*4882a593Smuzhiyun _leave(" = -ENOBUFS");
458*4882a593Smuzhiyun return -ENOBUFS;
459*4882a593Smuzhiyun }
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun /*
462*4882a593Smuzhiyun * read the corresponding pages to the given set from the backing file
463*4882a593Smuzhiyun * - any uncertain pages are simply discarded, to be tried again another time
464*4882a593Smuzhiyun */
cachefiles_read_backing_file(struct cachefiles_object * object,struct fscache_retrieval * op,struct list_head * list)465*4882a593Smuzhiyun static int cachefiles_read_backing_file(struct cachefiles_object *object,
466*4882a593Smuzhiyun struct fscache_retrieval *op,
467*4882a593Smuzhiyun struct list_head *list)
468*4882a593Smuzhiyun {
469*4882a593Smuzhiyun struct cachefiles_one_read *monitor = NULL;
470*4882a593Smuzhiyun struct address_space *bmapping = d_backing_inode(object->backer)->i_mapping;
471*4882a593Smuzhiyun struct page *newpage = NULL, *netpage, *_n, *backpage = NULL;
472*4882a593Smuzhiyun int ret = 0;
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun _enter("");
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun list_for_each_entry_safe(netpage, _n, list, lru) {
477*4882a593Smuzhiyun list_del(&netpage->lru);
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun _debug("read back %p{%lu,%d}",
480*4882a593Smuzhiyun netpage, netpage->index, page_count(netpage));
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun if (!monitor) {
483*4882a593Smuzhiyun monitor = kzalloc(sizeof(*monitor), cachefiles_gfp);
484*4882a593Smuzhiyun if (!monitor)
485*4882a593Smuzhiyun goto nomem;
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun monitor->op = fscache_get_retrieval(op);
488*4882a593Smuzhiyun init_waitqueue_func_entry(&monitor->monitor,
489*4882a593Smuzhiyun cachefiles_read_waiter);
490*4882a593Smuzhiyun }
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun for (;;) {
493*4882a593Smuzhiyun backpage = find_get_page(bmapping, netpage->index);
494*4882a593Smuzhiyun if (backpage)
495*4882a593Smuzhiyun goto backing_page_already_present;
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun if (!newpage) {
498*4882a593Smuzhiyun newpage = __page_cache_alloc(cachefiles_gfp);
499*4882a593Smuzhiyun if (!newpage)
500*4882a593Smuzhiyun goto nomem;
501*4882a593Smuzhiyun }
502*4882a593Smuzhiyun
503*4882a593Smuzhiyun ret = add_to_page_cache_lru(newpage, bmapping,
504*4882a593Smuzhiyun netpage->index,
505*4882a593Smuzhiyun cachefiles_gfp);
506*4882a593Smuzhiyun if (ret == 0)
507*4882a593Smuzhiyun goto installed_new_backing_page;
508*4882a593Smuzhiyun if (ret != -EEXIST)
509*4882a593Smuzhiyun goto nomem;
510*4882a593Smuzhiyun }
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun /* we've installed a new backing page, so now we need
513*4882a593Smuzhiyun * to start it reading */
514*4882a593Smuzhiyun installed_new_backing_page:
515*4882a593Smuzhiyun _debug("- new %p", newpage);
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun backpage = newpage;
518*4882a593Smuzhiyun newpage = NULL;
519*4882a593Smuzhiyun
520*4882a593Smuzhiyun reread_backing_page:
521*4882a593Smuzhiyun ret = bmapping->a_ops->readpage(NULL, backpage);
522*4882a593Smuzhiyun if (ret < 0)
523*4882a593Smuzhiyun goto read_error;
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun /* add the netfs page to the pagecache and LRU, and set the
526*4882a593Smuzhiyun * monitor to transfer the data across */
527*4882a593Smuzhiyun monitor_backing_page:
528*4882a593Smuzhiyun _debug("- monitor add");
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun ret = add_to_page_cache_lru(netpage, op->mapping,
531*4882a593Smuzhiyun netpage->index, cachefiles_gfp);
532*4882a593Smuzhiyun if (ret < 0) {
533*4882a593Smuzhiyun if (ret == -EEXIST) {
534*4882a593Smuzhiyun put_page(backpage);
535*4882a593Smuzhiyun backpage = NULL;
536*4882a593Smuzhiyun put_page(netpage);
537*4882a593Smuzhiyun netpage = NULL;
538*4882a593Smuzhiyun fscache_retrieval_complete(op, 1);
539*4882a593Smuzhiyun continue;
540*4882a593Smuzhiyun }
541*4882a593Smuzhiyun goto nomem;
542*4882a593Smuzhiyun }
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun /* install a monitor */
545*4882a593Smuzhiyun get_page(netpage);
546*4882a593Smuzhiyun monitor->netfs_page = netpage;
547*4882a593Smuzhiyun
548*4882a593Smuzhiyun get_page(backpage);
549*4882a593Smuzhiyun monitor->back_page = backpage;
550*4882a593Smuzhiyun monitor->monitor.private = backpage;
551*4882a593Smuzhiyun add_page_wait_queue(backpage, &monitor->monitor);
552*4882a593Smuzhiyun monitor = NULL;
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun /* but the page may have been read before the monitor was
555*4882a593Smuzhiyun * installed, so the monitor may miss the event - so we have to
556*4882a593Smuzhiyun * ensure that we do get one in such a case */
557*4882a593Smuzhiyun if (trylock_page(backpage)) {
558*4882a593Smuzhiyun _debug("2unlock %p {%lx}", backpage, backpage->flags);
559*4882a593Smuzhiyun unlock_page(backpage);
560*4882a593Smuzhiyun }
561*4882a593Smuzhiyun
562*4882a593Smuzhiyun put_page(backpage);
563*4882a593Smuzhiyun backpage = NULL;
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun put_page(netpage);
566*4882a593Smuzhiyun netpage = NULL;
567*4882a593Smuzhiyun continue;
568*4882a593Smuzhiyun
569*4882a593Smuzhiyun /* if the backing page is already present, it can be in one of
570*4882a593Smuzhiyun * three states: read in progress, read failed or read okay */
571*4882a593Smuzhiyun backing_page_already_present:
572*4882a593Smuzhiyun _debug("- present %p", backpage);
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun if (PageError(backpage))
575*4882a593Smuzhiyun goto io_error;
576*4882a593Smuzhiyun
577*4882a593Smuzhiyun if (PageUptodate(backpage))
578*4882a593Smuzhiyun goto backing_page_already_uptodate;
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun _debug("- not ready %p{%lx}", backpage, backpage->flags);
581*4882a593Smuzhiyun
582*4882a593Smuzhiyun if (!trylock_page(backpage))
583*4882a593Smuzhiyun goto monitor_backing_page;
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun if (PageError(backpage)) {
586*4882a593Smuzhiyun _debug("error %lx", backpage->flags);
587*4882a593Smuzhiyun unlock_page(backpage);
588*4882a593Smuzhiyun goto io_error;
589*4882a593Smuzhiyun }
590*4882a593Smuzhiyun
591*4882a593Smuzhiyun if (PageUptodate(backpage))
592*4882a593Smuzhiyun goto backing_page_already_uptodate_unlock;
593*4882a593Smuzhiyun
594*4882a593Smuzhiyun /* we've locked a page that's neither up to date nor erroneous,
595*4882a593Smuzhiyun * so we need to attempt to read it again */
596*4882a593Smuzhiyun goto reread_backing_page;
597*4882a593Smuzhiyun
598*4882a593Smuzhiyun /* the backing page is already up to date, attach the netfs
599*4882a593Smuzhiyun * page to the pagecache and LRU and copy the data across */
600*4882a593Smuzhiyun backing_page_already_uptodate_unlock:
601*4882a593Smuzhiyun _debug("uptodate %lx", backpage->flags);
602*4882a593Smuzhiyun unlock_page(backpage);
603*4882a593Smuzhiyun backing_page_already_uptodate:
604*4882a593Smuzhiyun _debug("- uptodate");
605*4882a593Smuzhiyun
606*4882a593Smuzhiyun ret = add_to_page_cache_lru(netpage, op->mapping,
607*4882a593Smuzhiyun netpage->index, cachefiles_gfp);
608*4882a593Smuzhiyun if (ret < 0) {
609*4882a593Smuzhiyun if (ret == -EEXIST) {
610*4882a593Smuzhiyun put_page(backpage);
611*4882a593Smuzhiyun backpage = NULL;
612*4882a593Smuzhiyun put_page(netpage);
613*4882a593Smuzhiyun netpage = NULL;
614*4882a593Smuzhiyun fscache_retrieval_complete(op, 1);
615*4882a593Smuzhiyun continue;
616*4882a593Smuzhiyun }
617*4882a593Smuzhiyun goto nomem;
618*4882a593Smuzhiyun }
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun copy_highpage(netpage, backpage);
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun put_page(backpage);
623*4882a593Smuzhiyun backpage = NULL;
624*4882a593Smuzhiyun
625*4882a593Smuzhiyun fscache_mark_page_cached(op, netpage);
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun /* the netpage is unlocked and marked up to date here */
628*4882a593Smuzhiyun fscache_end_io(op, netpage, 0);
629*4882a593Smuzhiyun put_page(netpage);
630*4882a593Smuzhiyun netpage = NULL;
631*4882a593Smuzhiyun fscache_retrieval_complete(op, 1);
632*4882a593Smuzhiyun continue;
633*4882a593Smuzhiyun }
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun netpage = NULL;
636*4882a593Smuzhiyun
637*4882a593Smuzhiyun _debug("out");
638*4882a593Smuzhiyun
639*4882a593Smuzhiyun out:
640*4882a593Smuzhiyun /* tidy up */
641*4882a593Smuzhiyun if (newpage)
642*4882a593Smuzhiyun put_page(newpage);
643*4882a593Smuzhiyun if (netpage)
644*4882a593Smuzhiyun put_page(netpage);
645*4882a593Smuzhiyun if (backpage)
646*4882a593Smuzhiyun put_page(backpage);
647*4882a593Smuzhiyun if (monitor) {
648*4882a593Smuzhiyun fscache_put_retrieval(op);
649*4882a593Smuzhiyun kfree(monitor);
650*4882a593Smuzhiyun }
651*4882a593Smuzhiyun
652*4882a593Smuzhiyun list_for_each_entry_safe(netpage, _n, list, lru) {
653*4882a593Smuzhiyun list_del(&netpage->lru);
654*4882a593Smuzhiyun put_page(netpage);
655*4882a593Smuzhiyun fscache_retrieval_complete(op, 1);
656*4882a593Smuzhiyun }
657*4882a593Smuzhiyun
658*4882a593Smuzhiyun _leave(" = %d", ret);
659*4882a593Smuzhiyun return ret;
660*4882a593Smuzhiyun
661*4882a593Smuzhiyun nomem:
662*4882a593Smuzhiyun _debug("nomem");
663*4882a593Smuzhiyun ret = -ENOMEM;
664*4882a593Smuzhiyun goto record_page_complete;
665*4882a593Smuzhiyun
666*4882a593Smuzhiyun read_error:
667*4882a593Smuzhiyun _debug("read error %d", ret);
668*4882a593Smuzhiyun if (ret == -ENOMEM)
669*4882a593Smuzhiyun goto record_page_complete;
670*4882a593Smuzhiyun io_error:
671*4882a593Smuzhiyun cachefiles_io_error_obj(object, "Page read error on backing file");
672*4882a593Smuzhiyun ret = -ENOBUFS;
673*4882a593Smuzhiyun record_page_complete:
674*4882a593Smuzhiyun fscache_retrieval_complete(op, 1);
675*4882a593Smuzhiyun goto out;
676*4882a593Smuzhiyun }
677*4882a593Smuzhiyun
678*4882a593Smuzhiyun /*
679*4882a593Smuzhiyun * read a list of pages from the cache or allocate blocks in which to store
680*4882a593Smuzhiyun * them
681*4882a593Smuzhiyun */
cachefiles_read_or_alloc_pages(struct fscache_retrieval * op,struct list_head * pages,unsigned * nr_pages,gfp_t gfp)682*4882a593Smuzhiyun int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op,
683*4882a593Smuzhiyun struct list_head *pages,
684*4882a593Smuzhiyun unsigned *nr_pages,
685*4882a593Smuzhiyun gfp_t gfp)
686*4882a593Smuzhiyun {
687*4882a593Smuzhiyun struct cachefiles_object *object;
688*4882a593Smuzhiyun struct cachefiles_cache *cache;
689*4882a593Smuzhiyun struct list_head backpages;
690*4882a593Smuzhiyun struct pagevec pagevec;
691*4882a593Smuzhiyun struct inode *inode;
692*4882a593Smuzhiyun struct page *page, *_n;
693*4882a593Smuzhiyun unsigned shift, nrbackpages;
694*4882a593Smuzhiyun int ret, ret2, space;
695*4882a593Smuzhiyun
696*4882a593Smuzhiyun object = container_of(op->op.object,
697*4882a593Smuzhiyun struct cachefiles_object, fscache);
698*4882a593Smuzhiyun cache = container_of(object->fscache.cache,
699*4882a593Smuzhiyun struct cachefiles_cache, cache);
700*4882a593Smuzhiyun
701*4882a593Smuzhiyun _enter("{OBJ%x,%d},,%d,,",
702*4882a593Smuzhiyun object->fscache.debug_id, atomic_read(&op->op.usage),
703*4882a593Smuzhiyun *nr_pages);
704*4882a593Smuzhiyun
705*4882a593Smuzhiyun if (!object->backer)
706*4882a593Smuzhiyun goto all_enobufs;
707*4882a593Smuzhiyun
708*4882a593Smuzhiyun space = 1;
709*4882a593Smuzhiyun if (cachefiles_has_space(cache, 0, *nr_pages) < 0)
710*4882a593Smuzhiyun space = 0;
711*4882a593Smuzhiyun
712*4882a593Smuzhiyun inode = d_backing_inode(object->backer);
713*4882a593Smuzhiyun ASSERT(S_ISREG(inode->i_mode));
714*4882a593Smuzhiyun
715*4882a593Smuzhiyun /* calculate the shift required to use bmap */
716*4882a593Smuzhiyun shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
717*4882a593Smuzhiyun
718*4882a593Smuzhiyun pagevec_init(&pagevec);
719*4882a593Smuzhiyun
720*4882a593Smuzhiyun op->op.flags &= FSCACHE_OP_KEEP_FLAGS;
721*4882a593Smuzhiyun op->op.flags |= FSCACHE_OP_ASYNC;
722*4882a593Smuzhiyun op->op.processor = cachefiles_read_copier;
723*4882a593Smuzhiyun
724*4882a593Smuzhiyun INIT_LIST_HEAD(&backpages);
725*4882a593Smuzhiyun nrbackpages = 0;
726*4882a593Smuzhiyun
727*4882a593Smuzhiyun ret = space ? -ENODATA : -ENOBUFS;
728*4882a593Smuzhiyun list_for_each_entry_safe(page, _n, pages, lru) {
729*4882a593Smuzhiyun sector_t block;
730*4882a593Smuzhiyun
731*4882a593Smuzhiyun /* we assume the absence or presence of the first block is a
732*4882a593Smuzhiyun * good enough indication for the page as a whole
733*4882a593Smuzhiyun * - TODO: don't use bmap() for this as it is _not_ actually
734*4882a593Smuzhiyun * good enough for this as it doesn't indicate errors, but
735*4882a593Smuzhiyun * it's all we've got for the moment
736*4882a593Smuzhiyun */
737*4882a593Smuzhiyun block = page->index;
738*4882a593Smuzhiyun block <<= shift;
739*4882a593Smuzhiyun
740*4882a593Smuzhiyun ret2 = bmap(inode, &block);
741*4882a593Smuzhiyun ASSERT(ret2 == 0);
742*4882a593Smuzhiyun
743*4882a593Smuzhiyun _debug("%llx -> %llx",
744*4882a593Smuzhiyun (unsigned long long) (page->index << shift),
745*4882a593Smuzhiyun (unsigned long long) block);
746*4882a593Smuzhiyun
747*4882a593Smuzhiyun if (block) {
748*4882a593Smuzhiyun /* we have data - add it to the list to give to the
749*4882a593Smuzhiyun * backing fs */
750*4882a593Smuzhiyun list_move(&page->lru, &backpages);
751*4882a593Smuzhiyun (*nr_pages)--;
752*4882a593Smuzhiyun nrbackpages++;
753*4882a593Smuzhiyun } else if (space && pagevec_add(&pagevec, page) == 0) {
754*4882a593Smuzhiyun fscache_mark_pages_cached(op, &pagevec);
755*4882a593Smuzhiyun fscache_retrieval_complete(op, 1);
756*4882a593Smuzhiyun ret = -ENODATA;
757*4882a593Smuzhiyun } else {
758*4882a593Smuzhiyun fscache_retrieval_complete(op, 1);
759*4882a593Smuzhiyun }
760*4882a593Smuzhiyun }
761*4882a593Smuzhiyun
762*4882a593Smuzhiyun if (pagevec_count(&pagevec) > 0)
763*4882a593Smuzhiyun fscache_mark_pages_cached(op, &pagevec);
764*4882a593Smuzhiyun
765*4882a593Smuzhiyun if (list_empty(pages))
766*4882a593Smuzhiyun ret = 0;
767*4882a593Smuzhiyun
768*4882a593Smuzhiyun /* submit the apparently valid pages to the backing fs to be read from
769*4882a593Smuzhiyun * disk */
770*4882a593Smuzhiyun if (nrbackpages > 0) {
771*4882a593Smuzhiyun ret2 = cachefiles_read_backing_file(object, op, &backpages);
772*4882a593Smuzhiyun if (ret2 == -ENOMEM || ret2 == -EINTR)
773*4882a593Smuzhiyun ret = ret2;
774*4882a593Smuzhiyun }
775*4882a593Smuzhiyun
776*4882a593Smuzhiyun _leave(" = %d [nr=%u%s]",
777*4882a593Smuzhiyun ret, *nr_pages, list_empty(pages) ? " empty" : "");
778*4882a593Smuzhiyun return ret;
779*4882a593Smuzhiyun
780*4882a593Smuzhiyun all_enobufs:
781*4882a593Smuzhiyun fscache_retrieval_complete(op, *nr_pages);
782*4882a593Smuzhiyun return -ENOBUFS;
783*4882a593Smuzhiyun }
784*4882a593Smuzhiyun
785*4882a593Smuzhiyun /*
786*4882a593Smuzhiyun * allocate a block in the cache in which to store a page
787*4882a593Smuzhiyun * - cache withdrawal is prevented by the caller
788*4882a593Smuzhiyun * - returns -EINTR if interrupted
789*4882a593Smuzhiyun * - returns -ENOMEM if ran out of memory
790*4882a593Smuzhiyun * - returns -ENOBUFS if no buffers can be made available
791*4882a593Smuzhiyun * - returns -ENOBUFS if page is beyond EOF
792*4882a593Smuzhiyun * - otherwise:
793*4882a593Smuzhiyun * - the metadata will be retained
794*4882a593Smuzhiyun * - 0 will be returned
795*4882a593Smuzhiyun */
cachefiles_allocate_page(struct fscache_retrieval * op,struct page * page,gfp_t gfp)796*4882a593Smuzhiyun int cachefiles_allocate_page(struct fscache_retrieval *op,
797*4882a593Smuzhiyun struct page *page,
798*4882a593Smuzhiyun gfp_t gfp)
799*4882a593Smuzhiyun {
800*4882a593Smuzhiyun struct cachefiles_object *object;
801*4882a593Smuzhiyun struct cachefiles_cache *cache;
802*4882a593Smuzhiyun int ret;
803*4882a593Smuzhiyun
804*4882a593Smuzhiyun object = container_of(op->op.object,
805*4882a593Smuzhiyun struct cachefiles_object, fscache);
806*4882a593Smuzhiyun cache = container_of(object->fscache.cache,
807*4882a593Smuzhiyun struct cachefiles_cache, cache);
808*4882a593Smuzhiyun
809*4882a593Smuzhiyun _enter("%p,{%lx},", object, page->index);
810*4882a593Smuzhiyun
811*4882a593Smuzhiyun ret = cachefiles_has_space(cache, 0, 1);
812*4882a593Smuzhiyun if (ret == 0)
813*4882a593Smuzhiyun fscache_mark_page_cached(op, page);
814*4882a593Smuzhiyun else
815*4882a593Smuzhiyun ret = -ENOBUFS;
816*4882a593Smuzhiyun
817*4882a593Smuzhiyun fscache_retrieval_complete(op, 1);
818*4882a593Smuzhiyun _leave(" = %d", ret);
819*4882a593Smuzhiyun return ret;
820*4882a593Smuzhiyun }
821*4882a593Smuzhiyun
822*4882a593Smuzhiyun /*
823*4882a593Smuzhiyun * allocate blocks in the cache in which to store a set of pages
824*4882a593Smuzhiyun * - cache withdrawal is prevented by the caller
825*4882a593Smuzhiyun * - returns -EINTR if interrupted
826*4882a593Smuzhiyun * - returns -ENOMEM if ran out of memory
827*4882a593Smuzhiyun * - returns -ENOBUFS if some buffers couldn't be made available
828*4882a593Smuzhiyun * - returns -ENOBUFS if some pages are beyond EOF
829*4882a593Smuzhiyun * - otherwise:
830*4882a593Smuzhiyun * - -ENODATA will be returned
831*4882a593Smuzhiyun * - metadata will be retained for any page marked
832*4882a593Smuzhiyun */
cachefiles_allocate_pages(struct fscache_retrieval * op,struct list_head * pages,unsigned * nr_pages,gfp_t gfp)833*4882a593Smuzhiyun int cachefiles_allocate_pages(struct fscache_retrieval *op,
834*4882a593Smuzhiyun struct list_head *pages,
835*4882a593Smuzhiyun unsigned *nr_pages,
836*4882a593Smuzhiyun gfp_t gfp)
837*4882a593Smuzhiyun {
838*4882a593Smuzhiyun struct cachefiles_object *object;
839*4882a593Smuzhiyun struct cachefiles_cache *cache;
840*4882a593Smuzhiyun struct pagevec pagevec;
841*4882a593Smuzhiyun struct page *page;
842*4882a593Smuzhiyun int ret;
843*4882a593Smuzhiyun
844*4882a593Smuzhiyun object = container_of(op->op.object,
845*4882a593Smuzhiyun struct cachefiles_object, fscache);
846*4882a593Smuzhiyun cache = container_of(object->fscache.cache,
847*4882a593Smuzhiyun struct cachefiles_cache, cache);
848*4882a593Smuzhiyun
849*4882a593Smuzhiyun _enter("%p,,,%d,", object, *nr_pages);
850*4882a593Smuzhiyun
851*4882a593Smuzhiyun ret = cachefiles_has_space(cache, 0, *nr_pages);
852*4882a593Smuzhiyun if (ret == 0) {
853*4882a593Smuzhiyun pagevec_init(&pagevec);
854*4882a593Smuzhiyun
855*4882a593Smuzhiyun list_for_each_entry(page, pages, lru) {
856*4882a593Smuzhiyun if (pagevec_add(&pagevec, page) == 0)
857*4882a593Smuzhiyun fscache_mark_pages_cached(op, &pagevec);
858*4882a593Smuzhiyun }
859*4882a593Smuzhiyun
860*4882a593Smuzhiyun if (pagevec_count(&pagevec) > 0)
861*4882a593Smuzhiyun fscache_mark_pages_cached(op, &pagevec);
862*4882a593Smuzhiyun ret = -ENODATA;
863*4882a593Smuzhiyun } else {
864*4882a593Smuzhiyun ret = -ENOBUFS;
865*4882a593Smuzhiyun }
866*4882a593Smuzhiyun
867*4882a593Smuzhiyun fscache_retrieval_complete(op, *nr_pages);
868*4882a593Smuzhiyun _leave(" = %d", ret);
869*4882a593Smuzhiyun return ret;
870*4882a593Smuzhiyun }
871*4882a593Smuzhiyun
872*4882a593Smuzhiyun /*
873*4882a593Smuzhiyun * request a page be stored in the cache
874*4882a593Smuzhiyun * - cache withdrawal is prevented by the caller
875*4882a593Smuzhiyun * - this request may be ignored if there's no cache block available, in which
876*4882a593Smuzhiyun * case -ENOBUFS will be returned
877*4882a593Smuzhiyun * - if the op is in progress, 0 will be returned
878*4882a593Smuzhiyun */
cachefiles_write_page(struct fscache_storage * op,struct page * page)879*4882a593Smuzhiyun int cachefiles_write_page(struct fscache_storage *op, struct page *page)
880*4882a593Smuzhiyun {
881*4882a593Smuzhiyun struct cachefiles_object *object;
882*4882a593Smuzhiyun struct cachefiles_cache *cache;
883*4882a593Smuzhiyun struct file *file;
884*4882a593Smuzhiyun struct path path;
885*4882a593Smuzhiyun loff_t pos, eof;
886*4882a593Smuzhiyun size_t len;
887*4882a593Smuzhiyun void *data;
888*4882a593Smuzhiyun int ret = -ENOBUFS;
889*4882a593Smuzhiyun
890*4882a593Smuzhiyun ASSERT(op != NULL);
891*4882a593Smuzhiyun ASSERT(page != NULL);
892*4882a593Smuzhiyun
893*4882a593Smuzhiyun object = container_of(op->op.object,
894*4882a593Smuzhiyun struct cachefiles_object, fscache);
895*4882a593Smuzhiyun
896*4882a593Smuzhiyun _enter("%p,%p{%lx},,,", object, page, page->index);
897*4882a593Smuzhiyun
898*4882a593Smuzhiyun if (!object->backer) {
899*4882a593Smuzhiyun _leave(" = -ENOBUFS");
900*4882a593Smuzhiyun return -ENOBUFS;
901*4882a593Smuzhiyun }
902*4882a593Smuzhiyun
903*4882a593Smuzhiyun ASSERT(d_is_reg(object->backer));
904*4882a593Smuzhiyun
905*4882a593Smuzhiyun cache = container_of(object->fscache.cache,
906*4882a593Smuzhiyun struct cachefiles_cache, cache);
907*4882a593Smuzhiyun
908*4882a593Smuzhiyun pos = (loff_t)page->index << PAGE_SHIFT;
909*4882a593Smuzhiyun
910*4882a593Smuzhiyun /* We mustn't write more data than we have, so we have to beware of a
911*4882a593Smuzhiyun * partial page at EOF.
912*4882a593Smuzhiyun */
913*4882a593Smuzhiyun eof = object->fscache.store_limit_l;
914*4882a593Smuzhiyun if (pos >= eof)
915*4882a593Smuzhiyun goto error;
916*4882a593Smuzhiyun
917*4882a593Smuzhiyun /* write the page to the backing filesystem and let it store it in its
918*4882a593Smuzhiyun * own time */
919*4882a593Smuzhiyun path.mnt = cache->mnt;
920*4882a593Smuzhiyun path.dentry = object->backer;
921*4882a593Smuzhiyun file = dentry_open(&path, O_RDWR | O_LARGEFILE, cache->cache_cred);
922*4882a593Smuzhiyun if (IS_ERR(file)) {
923*4882a593Smuzhiyun ret = PTR_ERR(file);
924*4882a593Smuzhiyun goto error_2;
925*4882a593Smuzhiyun }
926*4882a593Smuzhiyun
927*4882a593Smuzhiyun len = PAGE_SIZE;
928*4882a593Smuzhiyun if (eof & ~PAGE_MASK) {
929*4882a593Smuzhiyun if (eof - pos < PAGE_SIZE) {
930*4882a593Smuzhiyun _debug("cut short %llx to %llx",
931*4882a593Smuzhiyun pos, eof);
932*4882a593Smuzhiyun len = eof - pos;
933*4882a593Smuzhiyun ASSERTCMP(pos + len, ==, eof);
934*4882a593Smuzhiyun }
935*4882a593Smuzhiyun }
936*4882a593Smuzhiyun
937*4882a593Smuzhiyun data = kmap(page);
938*4882a593Smuzhiyun ret = kernel_write(file, data, len, &pos);
939*4882a593Smuzhiyun kunmap(page);
940*4882a593Smuzhiyun fput(file);
941*4882a593Smuzhiyun if (ret != len)
942*4882a593Smuzhiyun goto error_eio;
943*4882a593Smuzhiyun
944*4882a593Smuzhiyun _leave(" = 0");
945*4882a593Smuzhiyun return 0;
946*4882a593Smuzhiyun
947*4882a593Smuzhiyun error_eio:
948*4882a593Smuzhiyun ret = -EIO;
949*4882a593Smuzhiyun error_2:
950*4882a593Smuzhiyun if (ret == -EIO)
951*4882a593Smuzhiyun cachefiles_io_error_obj(object,
952*4882a593Smuzhiyun "Write page to backing file failed");
953*4882a593Smuzhiyun error:
954*4882a593Smuzhiyun _leave(" = -ENOBUFS [%d]", ret);
955*4882a593Smuzhiyun return -ENOBUFS;
956*4882a593Smuzhiyun }
957*4882a593Smuzhiyun
958*4882a593Smuzhiyun /*
959*4882a593Smuzhiyun * detach a backing block from a page
960*4882a593Smuzhiyun * - cache withdrawal is prevented by the caller
961*4882a593Smuzhiyun */
cachefiles_uncache_page(struct fscache_object * _object,struct page * page)962*4882a593Smuzhiyun void cachefiles_uncache_page(struct fscache_object *_object, struct page *page)
963*4882a593Smuzhiyun __releases(&object->fscache.cookie->lock)
964*4882a593Smuzhiyun {
965*4882a593Smuzhiyun struct cachefiles_object *object;
966*4882a593Smuzhiyun
967*4882a593Smuzhiyun object = container_of(_object, struct cachefiles_object, fscache);
968*4882a593Smuzhiyun
969*4882a593Smuzhiyun _enter("%p,{%lu}", object, page->index);
970*4882a593Smuzhiyun
971*4882a593Smuzhiyun spin_unlock(&object->fscache.cookie->lock);
972*4882a593Smuzhiyun }
973