1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /* CacheFiles path walking and related routines
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5*4882a593Smuzhiyun * Written by David Howells (dhowells@redhat.com)
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include <linux/module.h>
9*4882a593Smuzhiyun #include <linux/sched.h>
10*4882a593Smuzhiyun #include <linux/file.h>
11*4882a593Smuzhiyun #include <linux/fs.h>
12*4882a593Smuzhiyun #include <linux/fsnotify.h>
13*4882a593Smuzhiyun #include <linux/quotaops.h>
14*4882a593Smuzhiyun #include <linux/xattr.h>
15*4882a593Smuzhiyun #include <linux/mount.h>
16*4882a593Smuzhiyun #include <linux/namei.h>
17*4882a593Smuzhiyun #include <linux/security.h>
18*4882a593Smuzhiyun #include <linux/slab.h>
19*4882a593Smuzhiyun #include "internal.h"
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun #define CACHEFILES_KEYBUF_SIZE 512
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun /*
24*4882a593Smuzhiyun * dump debugging info about an object
25*4882a593Smuzhiyun */
26*4882a593Smuzhiyun static noinline
__cachefiles_printk_object(struct cachefiles_object * object,const char * prefix)27*4882a593Smuzhiyun void __cachefiles_printk_object(struct cachefiles_object *object,
28*4882a593Smuzhiyun const char *prefix)
29*4882a593Smuzhiyun {
30*4882a593Smuzhiyun struct fscache_cookie *cookie;
31*4882a593Smuzhiyun const u8 *k;
32*4882a593Smuzhiyun unsigned loop;
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun pr_err("%sobject: OBJ%x\n", prefix, object->fscache.debug_id);
35*4882a593Smuzhiyun pr_err("%sobjstate=%s fl=%lx wbusy=%x ev=%lx[%lx]\n",
36*4882a593Smuzhiyun prefix, object->fscache.state->name,
37*4882a593Smuzhiyun object->fscache.flags, work_busy(&object->fscache.work),
38*4882a593Smuzhiyun object->fscache.events, object->fscache.event_mask);
39*4882a593Smuzhiyun pr_err("%sops=%u inp=%u exc=%u\n",
40*4882a593Smuzhiyun prefix, object->fscache.n_ops, object->fscache.n_in_progress,
41*4882a593Smuzhiyun object->fscache.n_exclusive);
42*4882a593Smuzhiyun pr_err("%sparent=%p\n",
43*4882a593Smuzhiyun prefix, object->fscache.parent);
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun spin_lock(&object->fscache.lock);
46*4882a593Smuzhiyun cookie = object->fscache.cookie;
47*4882a593Smuzhiyun if (cookie) {
48*4882a593Smuzhiyun pr_err("%scookie=%p [pr=%p nd=%p fl=%lx]\n",
49*4882a593Smuzhiyun prefix,
50*4882a593Smuzhiyun object->fscache.cookie,
51*4882a593Smuzhiyun object->fscache.cookie->parent,
52*4882a593Smuzhiyun object->fscache.cookie->netfs_data,
53*4882a593Smuzhiyun object->fscache.cookie->flags);
54*4882a593Smuzhiyun pr_err("%skey=[%u] '", prefix, cookie->key_len);
55*4882a593Smuzhiyun k = (cookie->key_len <= sizeof(cookie->inline_key)) ?
56*4882a593Smuzhiyun cookie->inline_key : cookie->key;
57*4882a593Smuzhiyun for (loop = 0; loop < cookie->key_len; loop++)
58*4882a593Smuzhiyun pr_cont("%02x", k[loop]);
59*4882a593Smuzhiyun pr_cont("'\n");
60*4882a593Smuzhiyun } else {
61*4882a593Smuzhiyun pr_err("%scookie=NULL\n", prefix);
62*4882a593Smuzhiyun }
63*4882a593Smuzhiyun spin_unlock(&object->fscache.lock);
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun /*
67*4882a593Smuzhiyun * dump debugging info about a pair of objects
68*4882a593Smuzhiyun */
cachefiles_printk_object(struct cachefiles_object * object,struct cachefiles_object * xobject)69*4882a593Smuzhiyun static noinline void cachefiles_printk_object(struct cachefiles_object *object,
70*4882a593Smuzhiyun struct cachefiles_object *xobject)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun if (object)
73*4882a593Smuzhiyun __cachefiles_printk_object(object, "");
74*4882a593Smuzhiyun if (xobject)
75*4882a593Smuzhiyun __cachefiles_printk_object(xobject, "x");
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun /*
79*4882a593Smuzhiyun * mark the owner of a dentry, if there is one, to indicate that that dentry
80*4882a593Smuzhiyun * has been preemptively deleted
81*4882a593Smuzhiyun * - the caller must hold the i_mutex on the dentry's parent as required to
82*4882a593Smuzhiyun * call vfs_unlink(), vfs_rmdir() or vfs_rename()
83*4882a593Smuzhiyun */
cachefiles_mark_object_buried(struct cachefiles_cache * cache,struct dentry * dentry,enum fscache_why_object_killed why)84*4882a593Smuzhiyun static void cachefiles_mark_object_buried(struct cachefiles_cache *cache,
85*4882a593Smuzhiyun struct dentry *dentry,
86*4882a593Smuzhiyun enum fscache_why_object_killed why)
87*4882a593Smuzhiyun {
88*4882a593Smuzhiyun struct cachefiles_object *object;
89*4882a593Smuzhiyun struct rb_node *p;
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun _enter(",'%pd'", dentry);
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun write_lock(&cache->active_lock);
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun p = cache->active_nodes.rb_node;
96*4882a593Smuzhiyun while (p) {
97*4882a593Smuzhiyun object = rb_entry(p, struct cachefiles_object, active_node);
98*4882a593Smuzhiyun if (object->dentry > dentry)
99*4882a593Smuzhiyun p = p->rb_left;
100*4882a593Smuzhiyun else if (object->dentry < dentry)
101*4882a593Smuzhiyun p = p->rb_right;
102*4882a593Smuzhiyun else
103*4882a593Smuzhiyun goto found_dentry;
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun write_unlock(&cache->active_lock);
107*4882a593Smuzhiyun trace_cachefiles_mark_buried(NULL, dentry, why);
108*4882a593Smuzhiyun _leave(" [no owner]");
109*4882a593Smuzhiyun return;
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun /* found the dentry for */
112*4882a593Smuzhiyun found_dentry:
113*4882a593Smuzhiyun kdebug("preemptive burial: OBJ%x [%s] %p",
114*4882a593Smuzhiyun object->fscache.debug_id,
115*4882a593Smuzhiyun object->fscache.state->name,
116*4882a593Smuzhiyun dentry);
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun trace_cachefiles_mark_buried(object, dentry, why);
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun if (fscache_object_is_live(&object->fscache)) {
121*4882a593Smuzhiyun pr_err("\n");
122*4882a593Smuzhiyun pr_err("Error: Can't preemptively bury live object\n");
123*4882a593Smuzhiyun cachefiles_printk_object(object, NULL);
124*4882a593Smuzhiyun } else {
125*4882a593Smuzhiyun if (why != FSCACHE_OBJECT_IS_STALE)
126*4882a593Smuzhiyun fscache_object_mark_killed(&object->fscache, why);
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun write_unlock(&cache->active_lock);
130*4882a593Smuzhiyun _leave(" [owner marked]");
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun /*
134*4882a593Smuzhiyun * record the fact that an object is now active
135*4882a593Smuzhiyun */
cachefiles_mark_object_active(struct cachefiles_cache * cache,struct cachefiles_object * object)136*4882a593Smuzhiyun static int cachefiles_mark_object_active(struct cachefiles_cache *cache,
137*4882a593Smuzhiyun struct cachefiles_object *object)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun struct cachefiles_object *xobject;
140*4882a593Smuzhiyun struct rb_node **_p, *_parent = NULL;
141*4882a593Smuzhiyun struct dentry *dentry;
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun _enter(",%p", object);
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun try_again:
146*4882a593Smuzhiyun write_lock(&cache->active_lock);
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun dentry = object->dentry;
149*4882a593Smuzhiyun trace_cachefiles_mark_active(object, dentry);
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun if (test_and_set_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags)) {
152*4882a593Smuzhiyun pr_err("Error: Object already active\n");
153*4882a593Smuzhiyun cachefiles_printk_object(object, NULL);
154*4882a593Smuzhiyun BUG();
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun _p = &cache->active_nodes.rb_node;
158*4882a593Smuzhiyun while (*_p) {
159*4882a593Smuzhiyun _parent = *_p;
160*4882a593Smuzhiyun xobject = rb_entry(_parent,
161*4882a593Smuzhiyun struct cachefiles_object, active_node);
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun ASSERT(xobject != object);
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun if (xobject->dentry > dentry)
166*4882a593Smuzhiyun _p = &(*_p)->rb_left;
167*4882a593Smuzhiyun else if (xobject->dentry < dentry)
168*4882a593Smuzhiyun _p = &(*_p)->rb_right;
169*4882a593Smuzhiyun else
170*4882a593Smuzhiyun goto wait_for_old_object;
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun rb_link_node(&object->active_node, _parent, _p);
174*4882a593Smuzhiyun rb_insert_color(&object->active_node, &cache->active_nodes);
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun write_unlock(&cache->active_lock);
177*4882a593Smuzhiyun _leave(" = 0");
178*4882a593Smuzhiyun return 0;
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun /* an old object from a previous incarnation is hogging the slot - we
181*4882a593Smuzhiyun * need to wait for it to be destroyed */
182*4882a593Smuzhiyun wait_for_old_object:
183*4882a593Smuzhiyun trace_cachefiles_wait_active(object, dentry, xobject);
184*4882a593Smuzhiyun clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags);
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun if (fscache_object_is_live(&xobject->fscache)) {
187*4882a593Smuzhiyun pr_err("\n");
188*4882a593Smuzhiyun pr_err("Error: Unexpected object collision\n");
189*4882a593Smuzhiyun cachefiles_printk_object(object, xobject);
190*4882a593Smuzhiyun }
191*4882a593Smuzhiyun atomic_inc(&xobject->usage);
192*4882a593Smuzhiyun write_unlock(&cache->active_lock);
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun if (test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags)) {
195*4882a593Smuzhiyun wait_queue_head_t *wq;
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun signed long timeout = 60 * HZ;
198*4882a593Smuzhiyun wait_queue_entry_t wait;
199*4882a593Smuzhiyun bool requeue;
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun /* if the object we're waiting for is queued for processing,
202*4882a593Smuzhiyun * then just put ourselves on the queue behind it */
203*4882a593Smuzhiyun if (work_pending(&xobject->fscache.work)) {
204*4882a593Smuzhiyun _debug("queue OBJ%x behind OBJ%x immediately",
205*4882a593Smuzhiyun object->fscache.debug_id,
206*4882a593Smuzhiyun xobject->fscache.debug_id);
207*4882a593Smuzhiyun goto requeue;
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun /* otherwise we sleep until either the object we're waiting for
211*4882a593Smuzhiyun * is done, or the fscache_object is congested */
212*4882a593Smuzhiyun wq = bit_waitqueue(&xobject->flags, CACHEFILES_OBJECT_ACTIVE);
213*4882a593Smuzhiyun init_wait(&wait);
214*4882a593Smuzhiyun requeue = false;
215*4882a593Smuzhiyun do {
216*4882a593Smuzhiyun prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
217*4882a593Smuzhiyun if (!test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags))
218*4882a593Smuzhiyun break;
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun requeue = fscache_object_sleep_till_congested(&timeout);
221*4882a593Smuzhiyun } while (timeout > 0 && !requeue);
222*4882a593Smuzhiyun finish_wait(wq, &wait);
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun if (requeue &&
225*4882a593Smuzhiyun test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags)) {
226*4882a593Smuzhiyun _debug("queue OBJ%x behind OBJ%x after wait",
227*4882a593Smuzhiyun object->fscache.debug_id,
228*4882a593Smuzhiyun xobject->fscache.debug_id);
229*4882a593Smuzhiyun goto requeue;
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun if (timeout <= 0) {
233*4882a593Smuzhiyun pr_err("\n");
234*4882a593Smuzhiyun pr_err("Error: Overlong wait for old active object to go away\n");
235*4882a593Smuzhiyun cachefiles_printk_object(object, xobject);
236*4882a593Smuzhiyun goto requeue;
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun ASSERT(!test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags));
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun cache->cache.ops->put_object(&xobject->fscache,
243*4882a593Smuzhiyun (enum fscache_obj_ref_trace)cachefiles_obj_put_wait_retry);
244*4882a593Smuzhiyun goto try_again;
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun requeue:
247*4882a593Smuzhiyun cache->cache.ops->put_object(&xobject->fscache,
248*4882a593Smuzhiyun (enum fscache_obj_ref_trace)cachefiles_obj_put_wait_timeo);
249*4882a593Smuzhiyun _leave(" = -ETIMEDOUT");
250*4882a593Smuzhiyun return -ETIMEDOUT;
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun /*
254*4882a593Smuzhiyun * Mark an object as being inactive.
255*4882a593Smuzhiyun */
cachefiles_mark_object_inactive(struct cachefiles_cache * cache,struct cachefiles_object * object,blkcnt_t i_blocks)256*4882a593Smuzhiyun void cachefiles_mark_object_inactive(struct cachefiles_cache *cache,
257*4882a593Smuzhiyun struct cachefiles_object *object,
258*4882a593Smuzhiyun blkcnt_t i_blocks)
259*4882a593Smuzhiyun {
260*4882a593Smuzhiyun struct dentry *dentry = object->dentry;
261*4882a593Smuzhiyun struct inode *inode = d_backing_inode(dentry);
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun trace_cachefiles_mark_inactive(object, dentry, inode);
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun write_lock(&cache->active_lock);
266*4882a593Smuzhiyun rb_erase(&object->active_node, &cache->active_nodes);
267*4882a593Smuzhiyun clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags);
268*4882a593Smuzhiyun write_unlock(&cache->active_lock);
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun wake_up_bit(&object->flags, CACHEFILES_OBJECT_ACTIVE);
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun /* This object can now be culled, so we need to let the daemon know
273*4882a593Smuzhiyun * that there is something it can remove if it needs to.
274*4882a593Smuzhiyun */
275*4882a593Smuzhiyun atomic_long_add(i_blocks, &cache->b_released);
276*4882a593Smuzhiyun if (atomic_inc_return(&cache->f_released))
277*4882a593Smuzhiyun cachefiles_state_changed(cache);
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun /*
281*4882a593Smuzhiyun * delete an object representation from the cache
282*4882a593Smuzhiyun * - file backed objects are unlinked
283*4882a593Smuzhiyun * - directory backed objects are stuffed into the graveyard for userspace to
284*4882a593Smuzhiyun * delete
285*4882a593Smuzhiyun * - unlocks the directory mutex
286*4882a593Smuzhiyun */
cachefiles_bury_object(struct cachefiles_cache * cache,struct cachefiles_object * object,struct dentry * dir,struct dentry * rep,bool preemptive,enum fscache_why_object_killed why)287*4882a593Smuzhiyun static int cachefiles_bury_object(struct cachefiles_cache *cache,
288*4882a593Smuzhiyun struct cachefiles_object *object,
289*4882a593Smuzhiyun struct dentry *dir,
290*4882a593Smuzhiyun struct dentry *rep,
291*4882a593Smuzhiyun bool preemptive,
292*4882a593Smuzhiyun enum fscache_why_object_killed why)
293*4882a593Smuzhiyun {
294*4882a593Smuzhiyun struct dentry *grave, *trap;
295*4882a593Smuzhiyun struct path path, path_to_graveyard;
296*4882a593Smuzhiyun char nbuffer[8 + 8 + 1];
297*4882a593Smuzhiyun int ret;
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun _enter(",'%pd','%pd'", dir, rep);
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun _debug("remove %p from %p", rep, dir);
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun /* non-directories can just be unlinked */
304*4882a593Smuzhiyun if (!d_is_dir(rep)) {
305*4882a593Smuzhiyun _debug("unlink stale object");
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun path.mnt = cache->mnt;
308*4882a593Smuzhiyun path.dentry = dir;
309*4882a593Smuzhiyun ret = security_path_unlink(&path, rep);
310*4882a593Smuzhiyun if (ret < 0) {
311*4882a593Smuzhiyun cachefiles_io_error(cache, "Unlink security error");
312*4882a593Smuzhiyun } else {
313*4882a593Smuzhiyun trace_cachefiles_unlink(object, rep, why);
314*4882a593Smuzhiyun ret = vfs_unlink(d_inode(dir), rep, NULL);
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun if (preemptive)
317*4882a593Smuzhiyun cachefiles_mark_object_buried(cache, rep, why);
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun inode_unlock(d_inode(dir));
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun if (ret == -EIO)
323*4882a593Smuzhiyun cachefiles_io_error(cache, "Unlink failed");
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun _leave(" = %d", ret);
326*4882a593Smuzhiyun return ret;
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun /* directories have to be moved to the graveyard */
330*4882a593Smuzhiyun _debug("move stale object to graveyard");
331*4882a593Smuzhiyun inode_unlock(d_inode(dir));
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun try_again:
334*4882a593Smuzhiyun /* first step is to make up a grave dentry in the graveyard */
335*4882a593Smuzhiyun sprintf(nbuffer, "%08x%08x",
336*4882a593Smuzhiyun (uint32_t) ktime_get_real_seconds(),
337*4882a593Smuzhiyun (uint32_t) atomic_inc_return(&cache->gravecounter));
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun /* do the multiway lock magic */
340*4882a593Smuzhiyun trap = lock_rename(cache->graveyard, dir);
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun /* do some checks before getting the grave dentry */
343*4882a593Smuzhiyun if (rep->d_parent != dir || IS_DEADDIR(d_inode(rep))) {
344*4882a593Smuzhiyun /* the entry was probably culled when we dropped the parent dir
345*4882a593Smuzhiyun * lock */
346*4882a593Smuzhiyun unlock_rename(cache->graveyard, dir);
347*4882a593Smuzhiyun _leave(" = 0 [culled?]");
348*4882a593Smuzhiyun return 0;
349*4882a593Smuzhiyun }
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun if (!d_can_lookup(cache->graveyard)) {
352*4882a593Smuzhiyun unlock_rename(cache->graveyard, dir);
353*4882a593Smuzhiyun cachefiles_io_error(cache, "Graveyard no longer a directory");
354*4882a593Smuzhiyun return -EIO;
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun if (trap == rep) {
358*4882a593Smuzhiyun unlock_rename(cache->graveyard, dir);
359*4882a593Smuzhiyun cachefiles_io_error(cache, "May not make directory loop");
360*4882a593Smuzhiyun return -EIO;
361*4882a593Smuzhiyun }
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun if (d_mountpoint(rep)) {
364*4882a593Smuzhiyun unlock_rename(cache->graveyard, dir);
365*4882a593Smuzhiyun cachefiles_io_error(cache, "Mountpoint in cache");
366*4882a593Smuzhiyun return -EIO;
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun grave = lookup_one_len(nbuffer, cache->graveyard, strlen(nbuffer));
370*4882a593Smuzhiyun if (IS_ERR(grave)) {
371*4882a593Smuzhiyun unlock_rename(cache->graveyard, dir);
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun if (PTR_ERR(grave) == -ENOMEM) {
374*4882a593Smuzhiyun _leave(" = -ENOMEM");
375*4882a593Smuzhiyun return -ENOMEM;
376*4882a593Smuzhiyun }
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun cachefiles_io_error(cache, "Lookup error %ld",
379*4882a593Smuzhiyun PTR_ERR(grave));
380*4882a593Smuzhiyun return -EIO;
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun if (d_is_positive(grave)) {
384*4882a593Smuzhiyun unlock_rename(cache->graveyard, dir);
385*4882a593Smuzhiyun dput(grave);
386*4882a593Smuzhiyun grave = NULL;
387*4882a593Smuzhiyun cond_resched();
388*4882a593Smuzhiyun goto try_again;
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun if (d_mountpoint(grave)) {
392*4882a593Smuzhiyun unlock_rename(cache->graveyard, dir);
393*4882a593Smuzhiyun dput(grave);
394*4882a593Smuzhiyun cachefiles_io_error(cache, "Mountpoint in graveyard");
395*4882a593Smuzhiyun return -EIO;
396*4882a593Smuzhiyun }
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun /* target should not be an ancestor of source */
399*4882a593Smuzhiyun if (trap == grave) {
400*4882a593Smuzhiyun unlock_rename(cache->graveyard, dir);
401*4882a593Smuzhiyun dput(grave);
402*4882a593Smuzhiyun cachefiles_io_error(cache, "May not make directory loop");
403*4882a593Smuzhiyun return -EIO;
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun /* attempt the rename */
407*4882a593Smuzhiyun path.mnt = cache->mnt;
408*4882a593Smuzhiyun path.dentry = dir;
409*4882a593Smuzhiyun path_to_graveyard.mnt = cache->mnt;
410*4882a593Smuzhiyun path_to_graveyard.dentry = cache->graveyard;
411*4882a593Smuzhiyun ret = security_path_rename(&path, rep, &path_to_graveyard, grave, 0);
412*4882a593Smuzhiyun if (ret < 0) {
413*4882a593Smuzhiyun cachefiles_io_error(cache, "Rename security error %d", ret);
414*4882a593Smuzhiyun } else {
415*4882a593Smuzhiyun trace_cachefiles_rename(object, rep, grave, why);
416*4882a593Smuzhiyun ret = vfs_rename(d_inode(dir), rep,
417*4882a593Smuzhiyun d_inode(cache->graveyard), grave, NULL, 0);
418*4882a593Smuzhiyun if (ret != 0 && ret != -ENOMEM)
419*4882a593Smuzhiyun cachefiles_io_error(cache,
420*4882a593Smuzhiyun "Rename failed with error %d", ret);
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun if (preemptive)
423*4882a593Smuzhiyun cachefiles_mark_object_buried(cache, rep, why);
424*4882a593Smuzhiyun }
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun unlock_rename(cache->graveyard, dir);
427*4882a593Smuzhiyun dput(grave);
428*4882a593Smuzhiyun _leave(" = 0");
429*4882a593Smuzhiyun return 0;
430*4882a593Smuzhiyun }
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun /*
433*4882a593Smuzhiyun * delete an object representation from the cache
434*4882a593Smuzhiyun */
cachefiles_delete_object(struct cachefiles_cache * cache,struct cachefiles_object * object)435*4882a593Smuzhiyun int cachefiles_delete_object(struct cachefiles_cache *cache,
436*4882a593Smuzhiyun struct cachefiles_object *object)
437*4882a593Smuzhiyun {
438*4882a593Smuzhiyun struct dentry *dir;
439*4882a593Smuzhiyun int ret;
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun _enter(",OBJ%x{%p}", object->fscache.debug_id, object->dentry);
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun ASSERT(object->dentry);
444*4882a593Smuzhiyun ASSERT(d_backing_inode(object->dentry));
445*4882a593Smuzhiyun ASSERT(object->dentry->d_parent);
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun dir = dget_parent(object->dentry);
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun inode_lock_nested(d_inode(dir), I_MUTEX_PARENT);
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun if (test_bit(FSCACHE_OBJECT_KILLED_BY_CACHE, &object->fscache.flags)) {
452*4882a593Smuzhiyun /* object allocation for the same key preemptively deleted this
453*4882a593Smuzhiyun * object's file so that it could create its own file */
454*4882a593Smuzhiyun _debug("object preemptively buried");
455*4882a593Smuzhiyun inode_unlock(d_inode(dir));
456*4882a593Smuzhiyun ret = 0;
457*4882a593Smuzhiyun } else {
458*4882a593Smuzhiyun /* we need to check that our parent is _still_ our parent - it
459*4882a593Smuzhiyun * may have been renamed */
460*4882a593Smuzhiyun if (dir == object->dentry->d_parent) {
461*4882a593Smuzhiyun ret = cachefiles_bury_object(cache, object, dir,
462*4882a593Smuzhiyun object->dentry, false,
463*4882a593Smuzhiyun FSCACHE_OBJECT_WAS_RETIRED);
464*4882a593Smuzhiyun } else {
465*4882a593Smuzhiyun /* it got moved, presumably by cachefilesd culling it,
466*4882a593Smuzhiyun * so it's no longer in the key path and we can ignore
467*4882a593Smuzhiyun * it */
468*4882a593Smuzhiyun inode_unlock(d_inode(dir));
469*4882a593Smuzhiyun ret = 0;
470*4882a593Smuzhiyun }
471*4882a593Smuzhiyun }
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun dput(dir);
474*4882a593Smuzhiyun _leave(" = %d", ret);
475*4882a593Smuzhiyun return ret;
476*4882a593Smuzhiyun }
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun /*
479*4882a593Smuzhiyun * walk from the parent object to the child object through the backing
480*4882a593Smuzhiyun * filesystem, creating directories as we go
481*4882a593Smuzhiyun */
cachefiles_walk_to_object(struct cachefiles_object * parent,struct cachefiles_object * object,const char * key,struct cachefiles_xattr * auxdata)482*4882a593Smuzhiyun int cachefiles_walk_to_object(struct cachefiles_object *parent,
483*4882a593Smuzhiyun struct cachefiles_object *object,
484*4882a593Smuzhiyun const char *key,
485*4882a593Smuzhiyun struct cachefiles_xattr *auxdata)
486*4882a593Smuzhiyun {
487*4882a593Smuzhiyun struct cachefiles_cache *cache;
488*4882a593Smuzhiyun struct dentry *dir, *next = NULL;
489*4882a593Smuzhiyun struct inode *inode;
490*4882a593Smuzhiyun struct path path;
491*4882a593Smuzhiyun unsigned long start;
492*4882a593Smuzhiyun const char *name;
493*4882a593Smuzhiyun int ret, nlen;
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun _enter("OBJ%x{%p},OBJ%x,%s,",
496*4882a593Smuzhiyun parent->fscache.debug_id, parent->dentry,
497*4882a593Smuzhiyun object->fscache.debug_id, key);
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun cache = container_of(parent->fscache.cache,
500*4882a593Smuzhiyun struct cachefiles_cache, cache);
501*4882a593Smuzhiyun path.mnt = cache->mnt;
502*4882a593Smuzhiyun
503*4882a593Smuzhiyun ASSERT(parent->dentry);
504*4882a593Smuzhiyun ASSERT(d_backing_inode(parent->dentry));
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun if (!(d_is_dir(parent->dentry))) {
507*4882a593Smuzhiyun // TODO: convert file to dir
508*4882a593Smuzhiyun _leave("looking up in none directory");
509*4882a593Smuzhiyun return -ENOBUFS;
510*4882a593Smuzhiyun }
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun dir = dget(parent->dentry);
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun advance:
515*4882a593Smuzhiyun /* attempt to transit the first directory component */
516*4882a593Smuzhiyun name = key;
517*4882a593Smuzhiyun nlen = strlen(key);
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun /* key ends in a double NUL */
520*4882a593Smuzhiyun key = key + nlen + 1;
521*4882a593Smuzhiyun if (!*key)
522*4882a593Smuzhiyun key = NULL;
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun lookup_again:
525*4882a593Smuzhiyun /* search the current directory for the element name */
526*4882a593Smuzhiyun _debug("lookup '%s'", name);
527*4882a593Smuzhiyun
528*4882a593Smuzhiyun inode_lock_nested(d_inode(dir), I_MUTEX_PARENT);
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun start = jiffies;
531*4882a593Smuzhiyun next = lookup_one_len(name, dir, nlen);
532*4882a593Smuzhiyun cachefiles_hist(cachefiles_lookup_histogram, start);
533*4882a593Smuzhiyun if (IS_ERR(next)) {
534*4882a593Smuzhiyun trace_cachefiles_lookup(object, next, NULL);
535*4882a593Smuzhiyun goto lookup_error;
536*4882a593Smuzhiyun }
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun inode = d_backing_inode(next);
539*4882a593Smuzhiyun trace_cachefiles_lookup(object, next, inode);
540*4882a593Smuzhiyun _debug("next -> %p %s", next, inode ? "positive" : "negative");
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun if (!key)
543*4882a593Smuzhiyun object->new = !inode;
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun /* if this element of the path doesn't exist, then the lookup phase
546*4882a593Smuzhiyun * failed, and we can release any readers in the certain knowledge that
547*4882a593Smuzhiyun * there's nothing for them to actually read */
548*4882a593Smuzhiyun if (d_is_negative(next))
549*4882a593Smuzhiyun fscache_object_lookup_negative(&object->fscache);
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun /* we need to create the object if it's negative */
552*4882a593Smuzhiyun if (key || object->type == FSCACHE_COOKIE_TYPE_INDEX) {
553*4882a593Smuzhiyun /* index objects and intervening tree levels must be subdirs */
554*4882a593Smuzhiyun if (d_is_negative(next)) {
555*4882a593Smuzhiyun ret = cachefiles_has_space(cache, 1, 0);
556*4882a593Smuzhiyun if (ret < 0)
557*4882a593Smuzhiyun goto no_space_error;
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun path.dentry = dir;
560*4882a593Smuzhiyun ret = security_path_mkdir(&path, next, 0);
561*4882a593Smuzhiyun if (ret < 0)
562*4882a593Smuzhiyun goto create_error;
563*4882a593Smuzhiyun start = jiffies;
564*4882a593Smuzhiyun ret = vfs_mkdir(d_inode(dir), next, 0);
565*4882a593Smuzhiyun cachefiles_hist(cachefiles_mkdir_histogram, start);
566*4882a593Smuzhiyun if (!key)
567*4882a593Smuzhiyun trace_cachefiles_mkdir(object, next, ret);
568*4882a593Smuzhiyun if (ret < 0)
569*4882a593Smuzhiyun goto create_error;
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun if (unlikely(d_unhashed(next))) {
572*4882a593Smuzhiyun dput(next);
573*4882a593Smuzhiyun inode_unlock(d_inode(dir));
574*4882a593Smuzhiyun goto lookup_again;
575*4882a593Smuzhiyun }
576*4882a593Smuzhiyun ASSERT(d_backing_inode(next));
577*4882a593Smuzhiyun
578*4882a593Smuzhiyun _debug("mkdir -> %p{%p{ino=%lu}}",
579*4882a593Smuzhiyun next, d_backing_inode(next), d_backing_inode(next)->i_ino);
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun } else if (!d_can_lookup(next)) {
582*4882a593Smuzhiyun pr_err("inode %lu is not a directory\n",
583*4882a593Smuzhiyun d_backing_inode(next)->i_ino);
584*4882a593Smuzhiyun ret = -ENOBUFS;
585*4882a593Smuzhiyun goto error;
586*4882a593Smuzhiyun }
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun } else {
589*4882a593Smuzhiyun /* non-index objects start out life as files */
590*4882a593Smuzhiyun if (d_is_negative(next)) {
591*4882a593Smuzhiyun ret = cachefiles_has_space(cache, 1, 0);
592*4882a593Smuzhiyun if (ret < 0)
593*4882a593Smuzhiyun goto no_space_error;
594*4882a593Smuzhiyun
595*4882a593Smuzhiyun path.dentry = dir;
596*4882a593Smuzhiyun ret = security_path_mknod(&path, next, S_IFREG, 0);
597*4882a593Smuzhiyun if (ret < 0)
598*4882a593Smuzhiyun goto create_error;
599*4882a593Smuzhiyun start = jiffies;
600*4882a593Smuzhiyun ret = vfs_create(d_inode(dir), next, S_IFREG, true);
601*4882a593Smuzhiyun cachefiles_hist(cachefiles_create_histogram, start);
602*4882a593Smuzhiyun trace_cachefiles_create(object, next, ret);
603*4882a593Smuzhiyun if (ret < 0)
604*4882a593Smuzhiyun goto create_error;
605*4882a593Smuzhiyun
606*4882a593Smuzhiyun ASSERT(d_backing_inode(next));
607*4882a593Smuzhiyun
608*4882a593Smuzhiyun _debug("create -> %p{%p{ino=%lu}}",
609*4882a593Smuzhiyun next, d_backing_inode(next), d_backing_inode(next)->i_ino);
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun } else if (!d_can_lookup(next) &&
612*4882a593Smuzhiyun !d_is_reg(next)
613*4882a593Smuzhiyun ) {
614*4882a593Smuzhiyun pr_err("inode %lu is not a file or directory\n",
615*4882a593Smuzhiyun d_backing_inode(next)->i_ino);
616*4882a593Smuzhiyun ret = -ENOBUFS;
617*4882a593Smuzhiyun goto error;
618*4882a593Smuzhiyun }
619*4882a593Smuzhiyun }
620*4882a593Smuzhiyun
621*4882a593Smuzhiyun /* process the next component */
622*4882a593Smuzhiyun if (key) {
623*4882a593Smuzhiyun _debug("advance");
624*4882a593Smuzhiyun inode_unlock(d_inode(dir));
625*4882a593Smuzhiyun dput(dir);
626*4882a593Smuzhiyun dir = next;
627*4882a593Smuzhiyun next = NULL;
628*4882a593Smuzhiyun goto advance;
629*4882a593Smuzhiyun }
630*4882a593Smuzhiyun
631*4882a593Smuzhiyun /* we've found the object we were looking for */
632*4882a593Smuzhiyun object->dentry = next;
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun /* if we've found that the terminal object exists, then we need to
635*4882a593Smuzhiyun * check its attributes and delete it if it's out of date */
636*4882a593Smuzhiyun if (!object->new) {
637*4882a593Smuzhiyun _debug("validate '%pd'", next);
638*4882a593Smuzhiyun
639*4882a593Smuzhiyun ret = cachefiles_check_object_xattr(object, auxdata);
640*4882a593Smuzhiyun if (ret == -ESTALE) {
641*4882a593Smuzhiyun /* delete the object (the deleter drops the directory
642*4882a593Smuzhiyun * mutex) */
643*4882a593Smuzhiyun object->dentry = NULL;
644*4882a593Smuzhiyun
645*4882a593Smuzhiyun ret = cachefiles_bury_object(cache, object, dir, next,
646*4882a593Smuzhiyun true,
647*4882a593Smuzhiyun FSCACHE_OBJECT_IS_STALE);
648*4882a593Smuzhiyun dput(next);
649*4882a593Smuzhiyun next = NULL;
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun if (ret < 0)
652*4882a593Smuzhiyun goto delete_error;
653*4882a593Smuzhiyun
654*4882a593Smuzhiyun _debug("redo lookup");
655*4882a593Smuzhiyun fscache_object_retrying_stale(&object->fscache);
656*4882a593Smuzhiyun goto lookup_again;
657*4882a593Smuzhiyun }
658*4882a593Smuzhiyun }
659*4882a593Smuzhiyun
660*4882a593Smuzhiyun /* note that we're now using this object */
661*4882a593Smuzhiyun ret = cachefiles_mark_object_active(cache, object);
662*4882a593Smuzhiyun
663*4882a593Smuzhiyun inode_unlock(d_inode(dir));
664*4882a593Smuzhiyun dput(dir);
665*4882a593Smuzhiyun dir = NULL;
666*4882a593Smuzhiyun
667*4882a593Smuzhiyun if (ret == -ETIMEDOUT)
668*4882a593Smuzhiyun goto mark_active_timed_out;
669*4882a593Smuzhiyun
670*4882a593Smuzhiyun _debug("=== OBTAINED_OBJECT ===");
671*4882a593Smuzhiyun
672*4882a593Smuzhiyun if (object->new) {
673*4882a593Smuzhiyun /* attach data to a newly constructed terminal object */
674*4882a593Smuzhiyun ret = cachefiles_set_object_xattr(object, auxdata);
675*4882a593Smuzhiyun if (ret < 0)
676*4882a593Smuzhiyun goto check_error;
677*4882a593Smuzhiyun } else {
678*4882a593Smuzhiyun /* always update the atime on an object we've just looked up
679*4882a593Smuzhiyun * (this is used to keep track of culling, and atimes are only
680*4882a593Smuzhiyun * updated by read, write and readdir but not lookup or
681*4882a593Smuzhiyun * open) */
682*4882a593Smuzhiyun path.dentry = next;
683*4882a593Smuzhiyun touch_atime(&path);
684*4882a593Smuzhiyun }
685*4882a593Smuzhiyun
686*4882a593Smuzhiyun /* open a file interface onto a data file */
687*4882a593Smuzhiyun if (object->type != FSCACHE_COOKIE_TYPE_INDEX) {
688*4882a593Smuzhiyun if (d_is_reg(object->dentry)) {
689*4882a593Smuzhiyun const struct address_space_operations *aops;
690*4882a593Smuzhiyun
691*4882a593Smuzhiyun ret = -EPERM;
692*4882a593Smuzhiyun aops = d_backing_inode(object->dentry)->i_mapping->a_ops;
693*4882a593Smuzhiyun if (!aops->bmap)
694*4882a593Smuzhiyun goto check_error;
695*4882a593Smuzhiyun if (object->dentry->d_sb->s_blocksize > PAGE_SIZE)
696*4882a593Smuzhiyun goto check_error;
697*4882a593Smuzhiyun
698*4882a593Smuzhiyun object->backer = object->dentry;
699*4882a593Smuzhiyun } else {
700*4882a593Smuzhiyun BUG(); // TODO: open file in data-class subdir
701*4882a593Smuzhiyun }
702*4882a593Smuzhiyun }
703*4882a593Smuzhiyun
704*4882a593Smuzhiyun object->new = 0;
705*4882a593Smuzhiyun fscache_obtained_object(&object->fscache);
706*4882a593Smuzhiyun
707*4882a593Smuzhiyun _leave(" = 0 [%lu]", d_backing_inode(object->dentry)->i_ino);
708*4882a593Smuzhiyun return 0;
709*4882a593Smuzhiyun
710*4882a593Smuzhiyun no_space_error:
711*4882a593Smuzhiyun fscache_object_mark_killed(&object->fscache, FSCACHE_OBJECT_NO_SPACE);
712*4882a593Smuzhiyun create_error:
713*4882a593Smuzhiyun _debug("create error %d", ret);
714*4882a593Smuzhiyun if (ret == -EIO)
715*4882a593Smuzhiyun cachefiles_io_error(cache, "Create/mkdir failed");
716*4882a593Smuzhiyun goto error;
717*4882a593Smuzhiyun
718*4882a593Smuzhiyun mark_active_timed_out:
719*4882a593Smuzhiyun _debug("mark active timed out");
720*4882a593Smuzhiyun goto release_dentry;
721*4882a593Smuzhiyun
722*4882a593Smuzhiyun check_error:
723*4882a593Smuzhiyun _debug("check error %d", ret);
724*4882a593Smuzhiyun cachefiles_mark_object_inactive(
725*4882a593Smuzhiyun cache, object, d_backing_inode(object->dentry)->i_blocks);
726*4882a593Smuzhiyun release_dentry:
727*4882a593Smuzhiyun dput(object->dentry);
728*4882a593Smuzhiyun object->dentry = NULL;
729*4882a593Smuzhiyun goto error_out;
730*4882a593Smuzhiyun
731*4882a593Smuzhiyun delete_error:
732*4882a593Smuzhiyun _debug("delete error %d", ret);
733*4882a593Smuzhiyun goto error_out2;
734*4882a593Smuzhiyun
735*4882a593Smuzhiyun lookup_error:
736*4882a593Smuzhiyun _debug("lookup error %ld", PTR_ERR(next));
737*4882a593Smuzhiyun ret = PTR_ERR(next);
738*4882a593Smuzhiyun if (ret == -EIO)
739*4882a593Smuzhiyun cachefiles_io_error(cache, "Lookup failed");
740*4882a593Smuzhiyun next = NULL;
741*4882a593Smuzhiyun error:
742*4882a593Smuzhiyun inode_unlock(d_inode(dir));
743*4882a593Smuzhiyun dput(next);
744*4882a593Smuzhiyun error_out2:
745*4882a593Smuzhiyun dput(dir);
746*4882a593Smuzhiyun error_out:
747*4882a593Smuzhiyun _leave(" = error %d", -ret);
748*4882a593Smuzhiyun return ret;
749*4882a593Smuzhiyun }
750*4882a593Smuzhiyun
751*4882a593Smuzhiyun /*
752*4882a593Smuzhiyun * get a subdirectory
753*4882a593Smuzhiyun */
cachefiles_get_directory(struct cachefiles_cache * cache,struct dentry * dir,const char * dirname)754*4882a593Smuzhiyun struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache,
755*4882a593Smuzhiyun struct dentry *dir,
756*4882a593Smuzhiyun const char *dirname)
757*4882a593Smuzhiyun {
758*4882a593Smuzhiyun struct dentry *subdir;
759*4882a593Smuzhiyun unsigned long start;
760*4882a593Smuzhiyun struct path path;
761*4882a593Smuzhiyun int ret;
762*4882a593Smuzhiyun
763*4882a593Smuzhiyun _enter(",,%s", dirname);
764*4882a593Smuzhiyun
765*4882a593Smuzhiyun /* search the current directory for the element name */
766*4882a593Smuzhiyun inode_lock(d_inode(dir));
767*4882a593Smuzhiyun
768*4882a593Smuzhiyun retry:
769*4882a593Smuzhiyun start = jiffies;
770*4882a593Smuzhiyun subdir = lookup_one_len(dirname, dir, strlen(dirname));
771*4882a593Smuzhiyun cachefiles_hist(cachefiles_lookup_histogram, start);
772*4882a593Smuzhiyun if (IS_ERR(subdir)) {
773*4882a593Smuzhiyun if (PTR_ERR(subdir) == -ENOMEM)
774*4882a593Smuzhiyun goto nomem_d_alloc;
775*4882a593Smuzhiyun goto lookup_error;
776*4882a593Smuzhiyun }
777*4882a593Smuzhiyun
778*4882a593Smuzhiyun _debug("subdir -> %p %s",
779*4882a593Smuzhiyun subdir, d_backing_inode(subdir) ? "positive" : "negative");
780*4882a593Smuzhiyun
781*4882a593Smuzhiyun /* we need to create the subdir if it doesn't exist yet */
782*4882a593Smuzhiyun if (d_is_negative(subdir)) {
783*4882a593Smuzhiyun ret = cachefiles_has_space(cache, 1, 0);
784*4882a593Smuzhiyun if (ret < 0)
785*4882a593Smuzhiyun goto mkdir_error;
786*4882a593Smuzhiyun
787*4882a593Smuzhiyun _debug("attempt mkdir");
788*4882a593Smuzhiyun
789*4882a593Smuzhiyun path.mnt = cache->mnt;
790*4882a593Smuzhiyun path.dentry = dir;
791*4882a593Smuzhiyun ret = security_path_mkdir(&path, subdir, 0700);
792*4882a593Smuzhiyun if (ret < 0)
793*4882a593Smuzhiyun goto mkdir_error;
794*4882a593Smuzhiyun ret = vfs_mkdir(d_inode(dir), subdir, 0700);
795*4882a593Smuzhiyun if (ret < 0)
796*4882a593Smuzhiyun goto mkdir_error;
797*4882a593Smuzhiyun
798*4882a593Smuzhiyun if (unlikely(d_unhashed(subdir))) {
799*4882a593Smuzhiyun dput(subdir);
800*4882a593Smuzhiyun goto retry;
801*4882a593Smuzhiyun }
802*4882a593Smuzhiyun ASSERT(d_backing_inode(subdir));
803*4882a593Smuzhiyun
804*4882a593Smuzhiyun _debug("mkdir -> %p{%p{ino=%lu}}",
805*4882a593Smuzhiyun subdir,
806*4882a593Smuzhiyun d_backing_inode(subdir),
807*4882a593Smuzhiyun d_backing_inode(subdir)->i_ino);
808*4882a593Smuzhiyun }
809*4882a593Smuzhiyun
810*4882a593Smuzhiyun inode_unlock(d_inode(dir));
811*4882a593Smuzhiyun
812*4882a593Smuzhiyun /* we need to make sure the subdir is a directory */
813*4882a593Smuzhiyun ASSERT(d_backing_inode(subdir));
814*4882a593Smuzhiyun
815*4882a593Smuzhiyun if (!d_can_lookup(subdir)) {
816*4882a593Smuzhiyun pr_err("%s is not a directory\n", dirname);
817*4882a593Smuzhiyun ret = -EIO;
818*4882a593Smuzhiyun goto check_error;
819*4882a593Smuzhiyun }
820*4882a593Smuzhiyun
821*4882a593Smuzhiyun ret = -EPERM;
822*4882a593Smuzhiyun if (!(d_backing_inode(subdir)->i_opflags & IOP_XATTR) ||
823*4882a593Smuzhiyun !d_backing_inode(subdir)->i_op->lookup ||
824*4882a593Smuzhiyun !d_backing_inode(subdir)->i_op->mkdir ||
825*4882a593Smuzhiyun !d_backing_inode(subdir)->i_op->create ||
826*4882a593Smuzhiyun !d_backing_inode(subdir)->i_op->rename ||
827*4882a593Smuzhiyun !d_backing_inode(subdir)->i_op->rmdir ||
828*4882a593Smuzhiyun !d_backing_inode(subdir)->i_op->unlink)
829*4882a593Smuzhiyun goto check_error;
830*4882a593Smuzhiyun
831*4882a593Smuzhiyun _leave(" = [%lu]", d_backing_inode(subdir)->i_ino);
832*4882a593Smuzhiyun return subdir;
833*4882a593Smuzhiyun
834*4882a593Smuzhiyun check_error:
835*4882a593Smuzhiyun dput(subdir);
836*4882a593Smuzhiyun _leave(" = %d [check]", ret);
837*4882a593Smuzhiyun return ERR_PTR(ret);
838*4882a593Smuzhiyun
839*4882a593Smuzhiyun mkdir_error:
840*4882a593Smuzhiyun inode_unlock(d_inode(dir));
841*4882a593Smuzhiyun dput(subdir);
842*4882a593Smuzhiyun pr_err("mkdir %s failed with error %d\n", dirname, ret);
843*4882a593Smuzhiyun return ERR_PTR(ret);
844*4882a593Smuzhiyun
845*4882a593Smuzhiyun lookup_error:
846*4882a593Smuzhiyun inode_unlock(d_inode(dir));
847*4882a593Smuzhiyun ret = PTR_ERR(subdir);
848*4882a593Smuzhiyun pr_err("Lookup %s failed with error %d\n", dirname, ret);
849*4882a593Smuzhiyun return ERR_PTR(ret);
850*4882a593Smuzhiyun
851*4882a593Smuzhiyun nomem_d_alloc:
852*4882a593Smuzhiyun inode_unlock(d_inode(dir));
853*4882a593Smuzhiyun _leave(" = -ENOMEM");
854*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
855*4882a593Smuzhiyun }
856*4882a593Smuzhiyun
857*4882a593Smuzhiyun /*
858*4882a593Smuzhiyun * find out if an object is in use or not
859*4882a593Smuzhiyun * - if finds object and it's not in use:
860*4882a593Smuzhiyun * - returns a pointer to the object and a reference on it
861*4882a593Smuzhiyun * - returns with the directory locked
862*4882a593Smuzhiyun */
cachefiles_check_active(struct cachefiles_cache * cache,struct dentry * dir,char * filename)863*4882a593Smuzhiyun static struct dentry *cachefiles_check_active(struct cachefiles_cache *cache,
864*4882a593Smuzhiyun struct dentry *dir,
865*4882a593Smuzhiyun char *filename)
866*4882a593Smuzhiyun {
867*4882a593Smuzhiyun struct cachefiles_object *object;
868*4882a593Smuzhiyun struct rb_node *_n;
869*4882a593Smuzhiyun struct dentry *victim;
870*4882a593Smuzhiyun unsigned long start;
871*4882a593Smuzhiyun int ret;
872*4882a593Smuzhiyun
873*4882a593Smuzhiyun //_enter(",%pd/,%s",
874*4882a593Smuzhiyun // dir, filename);
875*4882a593Smuzhiyun
876*4882a593Smuzhiyun /* look up the victim */
877*4882a593Smuzhiyun inode_lock_nested(d_inode(dir), I_MUTEX_PARENT);
878*4882a593Smuzhiyun
879*4882a593Smuzhiyun start = jiffies;
880*4882a593Smuzhiyun victim = lookup_one_len(filename, dir, strlen(filename));
881*4882a593Smuzhiyun cachefiles_hist(cachefiles_lookup_histogram, start);
882*4882a593Smuzhiyun if (IS_ERR(victim))
883*4882a593Smuzhiyun goto lookup_error;
884*4882a593Smuzhiyun
885*4882a593Smuzhiyun //_debug("victim -> %p %s",
886*4882a593Smuzhiyun // victim, d_backing_inode(victim) ? "positive" : "negative");
887*4882a593Smuzhiyun
888*4882a593Smuzhiyun /* if the object is no longer there then we probably retired the object
889*4882a593Smuzhiyun * at the netfs's request whilst the cull was in progress
890*4882a593Smuzhiyun */
891*4882a593Smuzhiyun if (d_is_negative(victim)) {
892*4882a593Smuzhiyun inode_unlock(d_inode(dir));
893*4882a593Smuzhiyun dput(victim);
894*4882a593Smuzhiyun _leave(" = -ENOENT [absent]");
895*4882a593Smuzhiyun return ERR_PTR(-ENOENT);
896*4882a593Smuzhiyun }
897*4882a593Smuzhiyun
898*4882a593Smuzhiyun /* check to see if we're using this object */
899*4882a593Smuzhiyun read_lock(&cache->active_lock);
900*4882a593Smuzhiyun
901*4882a593Smuzhiyun _n = cache->active_nodes.rb_node;
902*4882a593Smuzhiyun
903*4882a593Smuzhiyun while (_n) {
904*4882a593Smuzhiyun object = rb_entry(_n, struct cachefiles_object, active_node);
905*4882a593Smuzhiyun
906*4882a593Smuzhiyun if (object->dentry > victim)
907*4882a593Smuzhiyun _n = _n->rb_left;
908*4882a593Smuzhiyun else if (object->dentry < victim)
909*4882a593Smuzhiyun _n = _n->rb_right;
910*4882a593Smuzhiyun else
911*4882a593Smuzhiyun goto object_in_use;
912*4882a593Smuzhiyun }
913*4882a593Smuzhiyun
914*4882a593Smuzhiyun read_unlock(&cache->active_lock);
915*4882a593Smuzhiyun
916*4882a593Smuzhiyun //_leave(" = %p", victim);
917*4882a593Smuzhiyun return victim;
918*4882a593Smuzhiyun
919*4882a593Smuzhiyun object_in_use:
920*4882a593Smuzhiyun read_unlock(&cache->active_lock);
921*4882a593Smuzhiyun inode_unlock(d_inode(dir));
922*4882a593Smuzhiyun dput(victim);
923*4882a593Smuzhiyun //_leave(" = -EBUSY [in use]");
924*4882a593Smuzhiyun return ERR_PTR(-EBUSY);
925*4882a593Smuzhiyun
926*4882a593Smuzhiyun lookup_error:
927*4882a593Smuzhiyun inode_unlock(d_inode(dir));
928*4882a593Smuzhiyun ret = PTR_ERR(victim);
929*4882a593Smuzhiyun if (ret == -ENOENT) {
930*4882a593Smuzhiyun /* file or dir now absent - probably retired by netfs */
931*4882a593Smuzhiyun _leave(" = -ESTALE [absent]");
932*4882a593Smuzhiyun return ERR_PTR(-ESTALE);
933*4882a593Smuzhiyun }
934*4882a593Smuzhiyun
935*4882a593Smuzhiyun if (ret == -EIO) {
936*4882a593Smuzhiyun cachefiles_io_error(cache, "Lookup failed");
937*4882a593Smuzhiyun } else if (ret != -ENOMEM) {
938*4882a593Smuzhiyun pr_err("Internal error: %d\n", ret);
939*4882a593Smuzhiyun ret = -EIO;
940*4882a593Smuzhiyun }
941*4882a593Smuzhiyun
942*4882a593Smuzhiyun _leave(" = %d", ret);
943*4882a593Smuzhiyun return ERR_PTR(ret);
944*4882a593Smuzhiyun }
945*4882a593Smuzhiyun
946*4882a593Smuzhiyun /*
947*4882a593Smuzhiyun * cull an object if it's not in use
948*4882a593Smuzhiyun * - called only by cache manager daemon
949*4882a593Smuzhiyun */
cachefiles_cull(struct cachefiles_cache * cache,struct dentry * dir,char * filename)950*4882a593Smuzhiyun int cachefiles_cull(struct cachefiles_cache *cache, struct dentry *dir,
951*4882a593Smuzhiyun char *filename)
952*4882a593Smuzhiyun {
953*4882a593Smuzhiyun struct dentry *victim;
954*4882a593Smuzhiyun int ret;
955*4882a593Smuzhiyun
956*4882a593Smuzhiyun _enter(",%pd/,%s", dir, filename);
957*4882a593Smuzhiyun
958*4882a593Smuzhiyun victim = cachefiles_check_active(cache, dir, filename);
959*4882a593Smuzhiyun if (IS_ERR(victim))
960*4882a593Smuzhiyun return PTR_ERR(victim);
961*4882a593Smuzhiyun
962*4882a593Smuzhiyun _debug("victim -> %p %s",
963*4882a593Smuzhiyun victim, d_backing_inode(victim) ? "positive" : "negative");
964*4882a593Smuzhiyun
965*4882a593Smuzhiyun /* okay... the victim is not being used so we can cull it
966*4882a593Smuzhiyun * - start by marking it as stale
967*4882a593Smuzhiyun */
968*4882a593Smuzhiyun _debug("victim is cullable");
969*4882a593Smuzhiyun
970*4882a593Smuzhiyun ret = cachefiles_remove_object_xattr(cache, victim);
971*4882a593Smuzhiyun if (ret < 0)
972*4882a593Smuzhiyun goto error_unlock;
973*4882a593Smuzhiyun
974*4882a593Smuzhiyun /* actually remove the victim (drops the dir mutex) */
975*4882a593Smuzhiyun _debug("bury");
976*4882a593Smuzhiyun
977*4882a593Smuzhiyun ret = cachefiles_bury_object(cache, NULL, dir, victim, false,
978*4882a593Smuzhiyun FSCACHE_OBJECT_WAS_CULLED);
979*4882a593Smuzhiyun if (ret < 0)
980*4882a593Smuzhiyun goto error;
981*4882a593Smuzhiyun
982*4882a593Smuzhiyun dput(victim);
983*4882a593Smuzhiyun _leave(" = 0");
984*4882a593Smuzhiyun return 0;
985*4882a593Smuzhiyun
986*4882a593Smuzhiyun error_unlock:
987*4882a593Smuzhiyun inode_unlock(d_inode(dir));
988*4882a593Smuzhiyun error:
989*4882a593Smuzhiyun dput(victim);
990*4882a593Smuzhiyun if (ret == -ENOENT) {
991*4882a593Smuzhiyun /* file or dir now absent - probably retired by netfs */
992*4882a593Smuzhiyun _leave(" = -ESTALE [absent]");
993*4882a593Smuzhiyun return -ESTALE;
994*4882a593Smuzhiyun }
995*4882a593Smuzhiyun
996*4882a593Smuzhiyun if (ret != -ENOMEM) {
997*4882a593Smuzhiyun pr_err("Internal error: %d\n", ret);
998*4882a593Smuzhiyun ret = -EIO;
999*4882a593Smuzhiyun }
1000*4882a593Smuzhiyun
1001*4882a593Smuzhiyun _leave(" = %d", ret);
1002*4882a593Smuzhiyun return ret;
1003*4882a593Smuzhiyun }
1004*4882a593Smuzhiyun
1005*4882a593Smuzhiyun /*
1006*4882a593Smuzhiyun * find out if an object is in use or not
1007*4882a593Smuzhiyun * - called only by cache manager daemon
1008*4882a593Smuzhiyun * - returns -EBUSY or 0 to indicate whether an object is in use or not
1009*4882a593Smuzhiyun */
cachefiles_check_in_use(struct cachefiles_cache * cache,struct dentry * dir,char * filename)1010*4882a593Smuzhiyun int cachefiles_check_in_use(struct cachefiles_cache *cache, struct dentry *dir,
1011*4882a593Smuzhiyun char *filename)
1012*4882a593Smuzhiyun {
1013*4882a593Smuzhiyun struct dentry *victim;
1014*4882a593Smuzhiyun
1015*4882a593Smuzhiyun //_enter(",%pd/,%s",
1016*4882a593Smuzhiyun // dir, filename);
1017*4882a593Smuzhiyun
1018*4882a593Smuzhiyun victim = cachefiles_check_active(cache, dir, filename);
1019*4882a593Smuzhiyun if (IS_ERR(victim))
1020*4882a593Smuzhiyun return PTR_ERR(victim);
1021*4882a593Smuzhiyun
1022*4882a593Smuzhiyun inode_unlock(d_inode(dir));
1023*4882a593Smuzhiyun dput(victim);
1024*4882a593Smuzhiyun //_leave(" = 0");
1025*4882a593Smuzhiyun return 0;
1026*4882a593Smuzhiyun }
1027