1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun
3*4882a593Smuzhiyun /*
4*4882a593Smuzhiyun * Copyright 2019, 2020 Amazon.com, Inc. or its affiliates. All rights reserved.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * User extended attribute client side cache functions.
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * Author: Frank van der Linden <fllinden@amazon.com>
9*4882a593Smuzhiyun */
10*4882a593Smuzhiyun #include <linux/errno.h>
11*4882a593Smuzhiyun #include <linux/nfs_fs.h>
12*4882a593Smuzhiyun #include <linux/hashtable.h>
13*4882a593Smuzhiyun #include <linux/refcount.h>
14*4882a593Smuzhiyun #include <uapi/linux/xattr.h>
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun #include "nfs4_fs.h"
17*4882a593Smuzhiyun #include "internal.h"
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun /*
20*4882a593Smuzhiyun * User extended attributes client side caching is implemented by having
21*4882a593Smuzhiyun * a cache structure attached to NFS inodes. This structure is allocated
22*4882a593Smuzhiyun * when needed, and freed when the cache is zapped.
23*4882a593Smuzhiyun *
24*4882a593Smuzhiyun * The cache structure contains as hash table of entries, and a pointer
25*4882a593Smuzhiyun * to a special-cased entry for the listxattr cache.
26*4882a593Smuzhiyun *
27*4882a593Smuzhiyun * Accessing and allocating / freeing the caches is done via reference
28*4882a593Smuzhiyun * counting. The cache entries use a similar refcounting scheme.
29*4882a593Smuzhiyun *
30*4882a593Smuzhiyun * This makes freeing a cache, both from the shrinker and from the
31*4882a593Smuzhiyun * zap cache path, easy. It also means that, in current use cases,
32*4882a593Smuzhiyun * the large majority of inodes will not waste any memory, as they
33*4882a593Smuzhiyun * will never have any user extended attributes assigned to them.
34*4882a593Smuzhiyun *
35*4882a593Smuzhiyun * Attribute entries are hashed in to a simple hash table. They are
36*4882a593Smuzhiyun * also part of an LRU.
37*4882a593Smuzhiyun *
38*4882a593Smuzhiyun * There are three shrinkers.
39*4882a593Smuzhiyun *
40*4882a593Smuzhiyun * Two shrinkers deal with the cache entries themselves: one for
41*4882a593Smuzhiyun * large entries (> PAGE_SIZE), and one for smaller entries. The
42*4882a593Smuzhiyun * shrinker for the larger entries works more aggressively than
43*4882a593Smuzhiyun * those for the smaller entries.
44*4882a593Smuzhiyun *
45*4882a593Smuzhiyun * The other shrinker frees the cache structures themselves.
46*4882a593Smuzhiyun */
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun /*
49*4882a593Smuzhiyun * 64 buckets is a good default. There is likely no reasonable
50*4882a593Smuzhiyun * workload that uses more than even 64 user extended attributes.
51*4882a593Smuzhiyun * You can certainly add a lot more - but you get what you ask for
52*4882a593Smuzhiyun * in those circumstances.
53*4882a593Smuzhiyun */
54*4882a593Smuzhiyun #define NFS4_XATTR_HASH_SIZE 64
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun #define NFSDBG_FACILITY NFSDBG_XATTRCACHE
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun struct nfs4_xattr_cache;
59*4882a593Smuzhiyun struct nfs4_xattr_entry;
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun struct nfs4_xattr_bucket {
62*4882a593Smuzhiyun spinlock_t lock;
63*4882a593Smuzhiyun struct hlist_head hlist;
64*4882a593Smuzhiyun struct nfs4_xattr_cache *cache;
65*4882a593Smuzhiyun bool draining;
66*4882a593Smuzhiyun };
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun struct nfs4_xattr_cache {
69*4882a593Smuzhiyun struct kref ref;
70*4882a593Smuzhiyun struct nfs4_xattr_bucket buckets[NFS4_XATTR_HASH_SIZE];
71*4882a593Smuzhiyun struct list_head lru;
72*4882a593Smuzhiyun struct list_head dispose;
73*4882a593Smuzhiyun atomic_long_t nent;
74*4882a593Smuzhiyun spinlock_t listxattr_lock;
75*4882a593Smuzhiyun struct inode *inode;
76*4882a593Smuzhiyun struct nfs4_xattr_entry *listxattr;
77*4882a593Smuzhiyun };
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun struct nfs4_xattr_entry {
80*4882a593Smuzhiyun struct kref ref;
81*4882a593Smuzhiyun struct hlist_node hnode;
82*4882a593Smuzhiyun struct list_head lru;
83*4882a593Smuzhiyun struct list_head dispose;
84*4882a593Smuzhiyun char *xattr_name;
85*4882a593Smuzhiyun void *xattr_value;
86*4882a593Smuzhiyun size_t xattr_size;
87*4882a593Smuzhiyun struct nfs4_xattr_bucket *bucket;
88*4882a593Smuzhiyun uint32_t flags;
89*4882a593Smuzhiyun };
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun #define NFS4_XATTR_ENTRY_EXTVAL 0x0001
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun /*
94*4882a593Smuzhiyun * LRU list of NFS inodes that have xattr caches.
95*4882a593Smuzhiyun */
96*4882a593Smuzhiyun static struct list_lru nfs4_xattr_cache_lru;
97*4882a593Smuzhiyun static struct list_lru nfs4_xattr_entry_lru;
98*4882a593Smuzhiyun static struct list_lru nfs4_xattr_large_entry_lru;
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun static struct kmem_cache *nfs4_xattr_cache_cachep;
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun /*
103*4882a593Smuzhiyun * Hashing helper functions.
104*4882a593Smuzhiyun */
105*4882a593Smuzhiyun static void
nfs4_xattr_hash_init(struct nfs4_xattr_cache * cache)106*4882a593Smuzhiyun nfs4_xattr_hash_init(struct nfs4_xattr_cache *cache)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun unsigned int i;
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun for (i = 0; i < NFS4_XATTR_HASH_SIZE; i++) {
111*4882a593Smuzhiyun INIT_HLIST_HEAD(&cache->buckets[i].hlist);
112*4882a593Smuzhiyun spin_lock_init(&cache->buckets[i].lock);
113*4882a593Smuzhiyun cache->buckets[i].cache = cache;
114*4882a593Smuzhiyun cache->buckets[i].draining = false;
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun /*
119*4882a593Smuzhiyun * Locking order:
120*4882a593Smuzhiyun * 1. inode i_lock or bucket lock
121*4882a593Smuzhiyun * 2. list_lru lock (taken by list_lru_* functions)
122*4882a593Smuzhiyun */
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun /*
125*4882a593Smuzhiyun * Wrapper functions to add a cache entry to the right LRU.
126*4882a593Smuzhiyun */
127*4882a593Smuzhiyun static bool
nfs4_xattr_entry_lru_add(struct nfs4_xattr_entry * entry)128*4882a593Smuzhiyun nfs4_xattr_entry_lru_add(struct nfs4_xattr_entry *entry)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun struct list_lru *lru;
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun lru = (entry->flags & NFS4_XATTR_ENTRY_EXTVAL) ?
133*4882a593Smuzhiyun &nfs4_xattr_large_entry_lru : &nfs4_xattr_entry_lru;
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun return list_lru_add(lru, &entry->lru);
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun static bool
nfs4_xattr_entry_lru_del(struct nfs4_xattr_entry * entry)139*4882a593Smuzhiyun nfs4_xattr_entry_lru_del(struct nfs4_xattr_entry *entry)
140*4882a593Smuzhiyun {
141*4882a593Smuzhiyun struct list_lru *lru;
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun lru = (entry->flags & NFS4_XATTR_ENTRY_EXTVAL) ?
144*4882a593Smuzhiyun &nfs4_xattr_large_entry_lru : &nfs4_xattr_entry_lru;
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun return list_lru_del(lru, &entry->lru);
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun /*
150*4882a593Smuzhiyun * This function allocates cache entries. They are the normal
151*4882a593Smuzhiyun * extended attribute name/value pairs, but may also be a listxattr
152*4882a593Smuzhiyun * cache. Those allocations use the same entry so that they can be
153*4882a593Smuzhiyun * treated as one by the memory shrinker.
154*4882a593Smuzhiyun *
155*4882a593Smuzhiyun * xattr cache entries are allocated together with names. If the
156*4882a593Smuzhiyun * value fits in to one page with the entry structure and the name,
157*4882a593Smuzhiyun * it will also be part of the same allocation (kmalloc). This is
158*4882a593Smuzhiyun * expected to be the vast majority of cases. Larger allocations
159*4882a593Smuzhiyun * have a value pointer that is allocated separately by kvmalloc.
160*4882a593Smuzhiyun *
161*4882a593Smuzhiyun * Parameters:
162*4882a593Smuzhiyun *
163*4882a593Smuzhiyun * @name: Name of the extended attribute. NULL for listxattr cache
164*4882a593Smuzhiyun * entry.
165*4882a593Smuzhiyun * @value: Value of attribute, or listxattr cache. NULL if the
166*4882a593Smuzhiyun * value is to be copied from pages instead.
167*4882a593Smuzhiyun * @pages: Pages to copy the value from, if not NULL. Passed in to
168*4882a593Smuzhiyun * make it easier to copy the value after an RPC, even if
169*4882a593Smuzhiyun * the value will not be passed up to application (e.g.
170*4882a593Smuzhiyun * for a 'query' getxattr with NULL buffer).
171*4882a593Smuzhiyun * @len: Length of the value. Can be 0 for zero-length attribues.
172*4882a593Smuzhiyun * @value and @pages will be NULL if @len is 0.
173*4882a593Smuzhiyun */
174*4882a593Smuzhiyun static struct nfs4_xattr_entry *
nfs4_xattr_alloc_entry(const char * name,const void * value,struct page ** pages,size_t len)175*4882a593Smuzhiyun nfs4_xattr_alloc_entry(const char *name, const void *value,
176*4882a593Smuzhiyun struct page **pages, size_t len)
177*4882a593Smuzhiyun {
178*4882a593Smuzhiyun struct nfs4_xattr_entry *entry;
179*4882a593Smuzhiyun void *valp;
180*4882a593Smuzhiyun char *namep;
181*4882a593Smuzhiyun size_t alloclen, slen;
182*4882a593Smuzhiyun char *buf;
183*4882a593Smuzhiyun uint32_t flags;
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun BUILD_BUG_ON(sizeof(struct nfs4_xattr_entry) +
186*4882a593Smuzhiyun XATTR_NAME_MAX + 1 > PAGE_SIZE);
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun alloclen = sizeof(struct nfs4_xattr_entry);
189*4882a593Smuzhiyun if (name != NULL) {
190*4882a593Smuzhiyun slen = strlen(name) + 1;
191*4882a593Smuzhiyun alloclen += slen;
192*4882a593Smuzhiyun } else
193*4882a593Smuzhiyun slen = 0;
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun if (alloclen + len <= PAGE_SIZE) {
196*4882a593Smuzhiyun alloclen += len;
197*4882a593Smuzhiyun flags = 0;
198*4882a593Smuzhiyun } else {
199*4882a593Smuzhiyun flags = NFS4_XATTR_ENTRY_EXTVAL;
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun buf = kmalloc(alloclen, GFP_KERNEL_ACCOUNT | GFP_NOFS);
203*4882a593Smuzhiyun if (buf == NULL)
204*4882a593Smuzhiyun return NULL;
205*4882a593Smuzhiyun entry = (struct nfs4_xattr_entry *)buf;
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun if (name != NULL) {
208*4882a593Smuzhiyun namep = buf + sizeof(struct nfs4_xattr_entry);
209*4882a593Smuzhiyun memcpy(namep, name, slen);
210*4882a593Smuzhiyun } else {
211*4882a593Smuzhiyun namep = NULL;
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun if (flags & NFS4_XATTR_ENTRY_EXTVAL) {
216*4882a593Smuzhiyun valp = kvmalloc(len, GFP_KERNEL_ACCOUNT | GFP_NOFS);
217*4882a593Smuzhiyun if (valp == NULL) {
218*4882a593Smuzhiyun kfree(buf);
219*4882a593Smuzhiyun return NULL;
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun } else if (len != 0) {
222*4882a593Smuzhiyun valp = buf + sizeof(struct nfs4_xattr_entry) + slen;
223*4882a593Smuzhiyun } else
224*4882a593Smuzhiyun valp = NULL;
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun if (valp != NULL) {
227*4882a593Smuzhiyun if (value != NULL)
228*4882a593Smuzhiyun memcpy(valp, value, len);
229*4882a593Smuzhiyun else
230*4882a593Smuzhiyun _copy_from_pages(valp, pages, 0, len);
231*4882a593Smuzhiyun }
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun entry->flags = flags;
234*4882a593Smuzhiyun entry->xattr_value = valp;
235*4882a593Smuzhiyun kref_init(&entry->ref);
236*4882a593Smuzhiyun entry->xattr_name = namep;
237*4882a593Smuzhiyun entry->xattr_size = len;
238*4882a593Smuzhiyun entry->bucket = NULL;
239*4882a593Smuzhiyun INIT_LIST_HEAD(&entry->lru);
240*4882a593Smuzhiyun INIT_LIST_HEAD(&entry->dispose);
241*4882a593Smuzhiyun INIT_HLIST_NODE(&entry->hnode);
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun return entry;
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun static void
nfs4_xattr_free_entry(struct nfs4_xattr_entry * entry)247*4882a593Smuzhiyun nfs4_xattr_free_entry(struct nfs4_xattr_entry *entry)
248*4882a593Smuzhiyun {
249*4882a593Smuzhiyun if (entry->flags & NFS4_XATTR_ENTRY_EXTVAL)
250*4882a593Smuzhiyun kvfree(entry->xattr_value);
251*4882a593Smuzhiyun kfree(entry);
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun static void
nfs4_xattr_free_entry_cb(struct kref * kref)255*4882a593Smuzhiyun nfs4_xattr_free_entry_cb(struct kref *kref)
256*4882a593Smuzhiyun {
257*4882a593Smuzhiyun struct nfs4_xattr_entry *entry;
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun entry = container_of(kref, struct nfs4_xattr_entry, ref);
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun if (WARN_ON(!list_empty(&entry->lru)))
262*4882a593Smuzhiyun return;
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun nfs4_xattr_free_entry(entry);
265*4882a593Smuzhiyun }
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun static void
nfs4_xattr_free_cache_cb(struct kref * kref)268*4882a593Smuzhiyun nfs4_xattr_free_cache_cb(struct kref *kref)
269*4882a593Smuzhiyun {
270*4882a593Smuzhiyun struct nfs4_xattr_cache *cache;
271*4882a593Smuzhiyun int i;
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun cache = container_of(kref, struct nfs4_xattr_cache, ref);
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun for (i = 0; i < NFS4_XATTR_HASH_SIZE; i++) {
276*4882a593Smuzhiyun if (WARN_ON(!hlist_empty(&cache->buckets[i].hlist)))
277*4882a593Smuzhiyun return;
278*4882a593Smuzhiyun cache->buckets[i].draining = false;
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun cache->listxattr = NULL;
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun kmem_cache_free(nfs4_xattr_cache_cachep, cache);
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun static struct nfs4_xattr_cache *
nfs4_xattr_alloc_cache(void)288*4882a593Smuzhiyun nfs4_xattr_alloc_cache(void)
289*4882a593Smuzhiyun {
290*4882a593Smuzhiyun struct nfs4_xattr_cache *cache;
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun cache = kmem_cache_alloc(nfs4_xattr_cache_cachep,
293*4882a593Smuzhiyun GFP_KERNEL_ACCOUNT | GFP_NOFS);
294*4882a593Smuzhiyun if (cache == NULL)
295*4882a593Smuzhiyun return NULL;
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun kref_init(&cache->ref);
298*4882a593Smuzhiyun atomic_long_set(&cache->nent, 0);
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun return cache;
301*4882a593Smuzhiyun }
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun /*
304*4882a593Smuzhiyun * Set the listxattr cache, which is a special-cased cache entry.
305*4882a593Smuzhiyun * The special value ERR_PTR(-ESTALE) is used to indicate that
306*4882a593Smuzhiyun * the cache is being drained - this prevents a new listxattr
307*4882a593Smuzhiyun * cache from being added to what is now a stale cache.
308*4882a593Smuzhiyun */
309*4882a593Smuzhiyun static int
nfs4_xattr_set_listcache(struct nfs4_xattr_cache * cache,struct nfs4_xattr_entry * new)310*4882a593Smuzhiyun nfs4_xattr_set_listcache(struct nfs4_xattr_cache *cache,
311*4882a593Smuzhiyun struct nfs4_xattr_entry *new)
312*4882a593Smuzhiyun {
313*4882a593Smuzhiyun struct nfs4_xattr_entry *old;
314*4882a593Smuzhiyun int ret = 1;
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun spin_lock(&cache->listxattr_lock);
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun old = cache->listxattr;
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun if (old == ERR_PTR(-ESTALE)) {
321*4882a593Smuzhiyun ret = 0;
322*4882a593Smuzhiyun goto out;
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun cache->listxattr = new;
326*4882a593Smuzhiyun if (new != NULL && new != ERR_PTR(-ESTALE))
327*4882a593Smuzhiyun nfs4_xattr_entry_lru_add(new);
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun if (old != NULL) {
330*4882a593Smuzhiyun nfs4_xattr_entry_lru_del(old);
331*4882a593Smuzhiyun kref_put(&old->ref, nfs4_xattr_free_entry_cb);
332*4882a593Smuzhiyun }
333*4882a593Smuzhiyun out:
334*4882a593Smuzhiyun spin_unlock(&cache->listxattr_lock);
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun return ret;
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun /*
340*4882a593Smuzhiyun * Unlink a cache from its parent inode, clearing out an invalid
341*4882a593Smuzhiyun * cache. Must be called with i_lock held.
342*4882a593Smuzhiyun */
343*4882a593Smuzhiyun static struct nfs4_xattr_cache *
nfs4_xattr_cache_unlink(struct inode * inode)344*4882a593Smuzhiyun nfs4_xattr_cache_unlink(struct inode *inode)
345*4882a593Smuzhiyun {
346*4882a593Smuzhiyun struct nfs_inode *nfsi;
347*4882a593Smuzhiyun struct nfs4_xattr_cache *oldcache;
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun nfsi = NFS_I(inode);
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun oldcache = nfsi->xattr_cache;
352*4882a593Smuzhiyun if (oldcache != NULL) {
353*4882a593Smuzhiyun list_lru_del(&nfs4_xattr_cache_lru, &oldcache->lru);
354*4882a593Smuzhiyun oldcache->inode = NULL;
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun nfsi->xattr_cache = NULL;
357*4882a593Smuzhiyun nfsi->cache_validity &= ~NFS_INO_INVALID_XATTR;
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun return oldcache;
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun }
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun /*
364*4882a593Smuzhiyun * Discard a cache. Called by get_cache() if there was an old,
365*4882a593Smuzhiyun * invalid cache. Can also be called from a shrinker callback.
366*4882a593Smuzhiyun *
367*4882a593Smuzhiyun * The cache is dead, it has already been unlinked from its inode,
368*4882a593Smuzhiyun * and no longer appears on the cache LRU list.
369*4882a593Smuzhiyun *
370*4882a593Smuzhiyun * Mark all buckets as draining, so that no new entries are added. This
371*4882a593Smuzhiyun * could still happen in the unlikely, but possible case that another
372*4882a593Smuzhiyun * thread had grabbed a reference before it was unlinked from the inode,
373*4882a593Smuzhiyun * and is still holding it for an add operation.
374*4882a593Smuzhiyun *
375*4882a593Smuzhiyun * Remove all entries from the LRU lists, so that there is no longer
376*4882a593Smuzhiyun * any way to 'find' this cache. Then, remove the entries from the hash
377*4882a593Smuzhiyun * table.
378*4882a593Smuzhiyun *
379*4882a593Smuzhiyun * At that point, the cache will remain empty and can be freed when the final
380*4882a593Smuzhiyun * reference drops, which is very likely the kref_put at the end of
381*4882a593Smuzhiyun * this function, or the one called immediately afterwards in the
382*4882a593Smuzhiyun * shrinker callback.
383*4882a593Smuzhiyun */
384*4882a593Smuzhiyun static void
nfs4_xattr_discard_cache(struct nfs4_xattr_cache * cache)385*4882a593Smuzhiyun nfs4_xattr_discard_cache(struct nfs4_xattr_cache *cache)
386*4882a593Smuzhiyun {
387*4882a593Smuzhiyun unsigned int i;
388*4882a593Smuzhiyun struct nfs4_xattr_entry *entry;
389*4882a593Smuzhiyun struct nfs4_xattr_bucket *bucket;
390*4882a593Smuzhiyun struct hlist_node *n;
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun nfs4_xattr_set_listcache(cache, ERR_PTR(-ESTALE));
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun for (i = 0; i < NFS4_XATTR_HASH_SIZE; i++) {
395*4882a593Smuzhiyun bucket = &cache->buckets[i];
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun spin_lock(&bucket->lock);
398*4882a593Smuzhiyun bucket->draining = true;
399*4882a593Smuzhiyun hlist_for_each_entry_safe(entry, n, &bucket->hlist, hnode) {
400*4882a593Smuzhiyun nfs4_xattr_entry_lru_del(entry);
401*4882a593Smuzhiyun hlist_del_init(&entry->hnode);
402*4882a593Smuzhiyun kref_put(&entry->ref, nfs4_xattr_free_entry_cb);
403*4882a593Smuzhiyun }
404*4882a593Smuzhiyun spin_unlock(&bucket->lock);
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun atomic_long_set(&cache->nent, 0);
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
410*4882a593Smuzhiyun }
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun /*
413*4882a593Smuzhiyun * Get a referenced copy of the cache structure. Avoid doing allocs
414*4882a593Smuzhiyun * while holding i_lock. Which means that we do some optimistic allocation,
415*4882a593Smuzhiyun * and might have to free the result in rare cases.
416*4882a593Smuzhiyun *
417*4882a593Smuzhiyun * This function only checks the NFS_INO_INVALID_XATTR cache validity bit
418*4882a593Smuzhiyun * and acts accordingly, replacing the cache when needed. For the read case
419*4882a593Smuzhiyun * (!add), this means that the caller must make sure that the cache
420*4882a593Smuzhiyun * is valid before caling this function. getxattr and listxattr call
421*4882a593Smuzhiyun * revalidate_inode to do this. The attribute cache timeout (for the
422*4882a593Smuzhiyun * non-delegated case) is expected to be dealt with in the revalidate
423*4882a593Smuzhiyun * call.
424*4882a593Smuzhiyun */
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun static struct nfs4_xattr_cache *
nfs4_xattr_get_cache(struct inode * inode,int add)427*4882a593Smuzhiyun nfs4_xattr_get_cache(struct inode *inode, int add)
428*4882a593Smuzhiyun {
429*4882a593Smuzhiyun struct nfs_inode *nfsi;
430*4882a593Smuzhiyun struct nfs4_xattr_cache *cache, *oldcache, *newcache;
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun nfsi = NFS_I(inode);
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun cache = oldcache = NULL;
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun spin_lock(&inode->i_lock);
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun if (nfsi->cache_validity & NFS_INO_INVALID_XATTR)
439*4882a593Smuzhiyun oldcache = nfs4_xattr_cache_unlink(inode);
440*4882a593Smuzhiyun else
441*4882a593Smuzhiyun cache = nfsi->xattr_cache;
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun if (cache != NULL)
444*4882a593Smuzhiyun kref_get(&cache->ref);
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun spin_unlock(&inode->i_lock);
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun if (add && cache == NULL) {
449*4882a593Smuzhiyun newcache = NULL;
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun cache = nfs4_xattr_alloc_cache();
452*4882a593Smuzhiyun if (cache == NULL)
453*4882a593Smuzhiyun goto out;
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun spin_lock(&inode->i_lock);
456*4882a593Smuzhiyun if (nfsi->cache_validity & NFS_INO_INVALID_XATTR) {
457*4882a593Smuzhiyun /*
458*4882a593Smuzhiyun * The cache was invalidated again. Give up,
459*4882a593Smuzhiyun * since what we want to enter is now likely
460*4882a593Smuzhiyun * outdated anyway.
461*4882a593Smuzhiyun */
462*4882a593Smuzhiyun spin_unlock(&inode->i_lock);
463*4882a593Smuzhiyun kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
464*4882a593Smuzhiyun cache = NULL;
465*4882a593Smuzhiyun goto out;
466*4882a593Smuzhiyun }
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun /*
469*4882a593Smuzhiyun * Check if someone beat us to it.
470*4882a593Smuzhiyun */
471*4882a593Smuzhiyun if (nfsi->xattr_cache != NULL) {
472*4882a593Smuzhiyun newcache = nfsi->xattr_cache;
473*4882a593Smuzhiyun kref_get(&newcache->ref);
474*4882a593Smuzhiyun } else {
475*4882a593Smuzhiyun kref_get(&cache->ref);
476*4882a593Smuzhiyun nfsi->xattr_cache = cache;
477*4882a593Smuzhiyun cache->inode = inode;
478*4882a593Smuzhiyun list_lru_add(&nfs4_xattr_cache_lru, &cache->lru);
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun spin_unlock(&inode->i_lock);
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun /*
484*4882a593Smuzhiyun * If there was a race, throw away the cache we just
485*4882a593Smuzhiyun * allocated, and use the new one allocated by someone
486*4882a593Smuzhiyun * else.
487*4882a593Smuzhiyun */
488*4882a593Smuzhiyun if (newcache != NULL) {
489*4882a593Smuzhiyun kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
490*4882a593Smuzhiyun cache = newcache;
491*4882a593Smuzhiyun }
492*4882a593Smuzhiyun }
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun out:
495*4882a593Smuzhiyun /*
496*4882a593Smuzhiyun * Discard the now orphaned old cache.
497*4882a593Smuzhiyun */
498*4882a593Smuzhiyun if (oldcache != NULL)
499*4882a593Smuzhiyun nfs4_xattr_discard_cache(oldcache);
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun return cache;
502*4882a593Smuzhiyun }
503*4882a593Smuzhiyun
504*4882a593Smuzhiyun static inline struct nfs4_xattr_bucket *
nfs4_xattr_hash_bucket(struct nfs4_xattr_cache * cache,const char * name)505*4882a593Smuzhiyun nfs4_xattr_hash_bucket(struct nfs4_xattr_cache *cache, const char *name)
506*4882a593Smuzhiyun {
507*4882a593Smuzhiyun return &cache->buckets[jhash(name, strlen(name), 0) &
508*4882a593Smuzhiyun (ARRAY_SIZE(cache->buckets) - 1)];
509*4882a593Smuzhiyun }
510*4882a593Smuzhiyun
511*4882a593Smuzhiyun static struct nfs4_xattr_entry *
nfs4_xattr_get_entry(struct nfs4_xattr_bucket * bucket,const char * name)512*4882a593Smuzhiyun nfs4_xattr_get_entry(struct nfs4_xattr_bucket *bucket, const char *name)
513*4882a593Smuzhiyun {
514*4882a593Smuzhiyun struct nfs4_xattr_entry *entry;
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun entry = NULL;
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun hlist_for_each_entry(entry, &bucket->hlist, hnode) {
519*4882a593Smuzhiyun if (!strcmp(entry->xattr_name, name))
520*4882a593Smuzhiyun break;
521*4882a593Smuzhiyun }
522*4882a593Smuzhiyun
523*4882a593Smuzhiyun return entry;
524*4882a593Smuzhiyun }
525*4882a593Smuzhiyun
526*4882a593Smuzhiyun static int
nfs4_xattr_hash_add(struct nfs4_xattr_cache * cache,struct nfs4_xattr_entry * entry)527*4882a593Smuzhiyun nfs4_xattr_hash_add(struct nfs4_xattr_cache *cache,
528*4882a593Smuzhiyun struct nfs4_xattr_entry *entry)
529*4882a593Smuzhiyun {
530*4882a593Smuzhiyun struct nfs4_xattr_bucket *bucket;
531*4882a593Smuzhiyun struct nfs4_xattr_entry *oldentry = NULL;
532*4882a593Smuzhiyun int ret = 1;
533*4882a593Smuzhiyun
534*4882a593Smuzhiyun bucket = nfs4_xattr_hash_bucket(cache, entry->xattr_name);
535*4882a593Smuzhiyun entry->bucket = bucket;
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun spin_lock(&bucket->lock);
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun if (bucket->draining) {
540*4882a593Smuzhiyun ret = 0;
541*4882a593Smuzhiyun goto out;
542*4882a593Smuzhiyun }
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun oldentry = nfs4_xattr_get_entry(bucket, entry->xattr_name);
545*4882a593Smuzhiyun if (oldentry != NULL) {
546*4882a593Smuzhiyun hlist_del_init(&oldentry->hnode);
547*4882a593Smuzhiyun nfs4_xattr_entry_lru_del(oldentry);
548*4882a593Smuzhiyun } else {
549*4882a593Smuzhiyun atomic_long_inc(&cache->nent);
550*4882a593Smuzhiyun }
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun hlist_add_head(&entry->hnode, &bucket->hlist);
553*4882a593Smuzhiyun nfs4_xattr_entry_lru_add(entry);
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun out:
556*4882a593Smuzhiyun spin_unlock(&bucket->lock);
557*4882a593Smuzhiyun
558*4882a593Smuzhiyun if (oldentry != NULL)
559*4882a593Smuzhiyun kref_put(&oldentry->ref, nfs4_xattr_free_entry_cb);
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun return ret;
562*4882a593Smuzhiyun }
563*4882a593Smuzhiyun
564*4882a593Smuzhiyun static void
nfs4_xattr_hash_remove(struct nfs4_xattr_cache * cache,const char * name)565*4882a593Smuzhiyun nfs4_xattr_hash_remove(struct nfs4_xattr_cache *cache, const char *name)
566*4882a593Smuzhiyun {
567*4882a593Smuzhiyun struct nfs4_xattr_bucket *bucket;
568*4882a593Smuzhiyun struct nfs4_xattr_entry *entry;
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun bucket = nfs4_xattr_hash_bucket(cache, name);
571*4882a593Smuzhiyun
572*4882a593Smuzhiyun spin_lock(&bucket->lock);
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun entry = nfs4_xattr_get_entry(bucket, name);
575*4882a593Smuzhiyun if (entry != NULL) {
576*4882a593Smuzhiyun hlist_del_init(&entry->hnode);
577*4882a593Smuzhiyun nfs4_xattr_entry_lru_del(entry);
578*4882a593Smuzhiyun atomic_long_dec(&cache->nent);
579*4882a593Smuzhiyun }
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun spin_unlock(&bucket->lock);
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun if (entry != NULL)
584*4882a593Smuzhiyun kref_put(&entry->ref, nfs4_xattr_free_entry_cb);
585*4882a593Smuzhiyun }
586*4882a593Smuzhiyun
587*4882a593Smuzhiyun static struct nfs4_xattr_entry *
nfs4_xattr_hash_find(struct nfs4_xattr_cache * cache,const char * name)588*4882a593Smuzhiyun nfs4_xattr_hash_find(struct nfs4_xattr_cache *cache, const char *name)
589*4882a593Smuzhiyun {
590*4882a593Smuzhiyun struct nfs4_xattr_bucket *bucket;
591*4882a593Smuzhiyun struct nfs4_xattr_entry *entry;
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun bucket = nfs4_xattr_hash_bucket(cache, name);
594*4882a593Smuzhiyun
595*4882a593Smuzhiyun spin_lock(&bucket->lock);
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun entry = nfs4_xattr_get_entry(bucket, name);
598*4882a593Smuzhiyun if (entry != NULL)
599*4882a593Smuzhiyun kref_get(&entry->ref);
600*4882a593Smuzhiyun
601*4882a593Smuzhiyun spin_unlock(&bucket->lock);
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun return entry;
604*4882a593Smuzhiyun }
605*4882a593Smuzhiyun
606*4882a593Smuzhiyun /*
607*4882a593Smuzhiyun * Entry point to retrieve an entry from the cache.
608*4882a593Smuzhiyun */
nfs4_xattr_cache_get(struct inode * inode,const char * name,char * buf,ssize_t buflen)609*4882a593Smuzhiyun ssize_t nfs4_xattr_cache_get(struct inode *inode, const char *name, char *buf,
610*4882a593Smuzhiyun ssize_t buflen)
611*4882a593Smuzhiyun {
612*4882a593Smuzhiyun struct nfs4_xattr_cache *cache;
613*4882a593Smuzhiyun struct nfs4_xattr_entry *entry;
614*4882a593Smuzhiyun ssize_t ret;
615*4882a593Smuzhiyun
616*4882a593Smuzhiyun cache = nfs4_xattr_get_cache(inode, 0);
617*4882a593Smuzhiyun if (cache == NULL)
618*4882a593Smuzhiyun return -ENOENT;
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun ret = 0;
621*4882a593Smuzhiyun entry = nfs4_xattr_hash_find(cache, name);
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun if (entry != NULL) {
624*4882a593Smuzhiyun dprintk("%s: cache hit '%s', len %lu\n", __func__,
625*4882a593Smuzhiyun entry->xattr_name, (unsigned long)entry->xattr_size);
626*4882a593Smuzhiyun if (buflen == 0) {
627*4882a593Smuzhiyun /* Length probe only */
628*4882a593Smuzhiyun ret = entry->xattr_size;
629*4882a593Smuzhiyun } else if (buflen < entry->xattr_size)
630*4882a593Smuzhiyun ret = -ERANGE;
631*4882a593Smuzhiyun else {
632*4882a593Smuzhiyun memcpy(buf, entry->xattr_value, entry->xattr_size);
633*4882a593Smuzhiyun ret = entry->xattr_size;
634*4882a593Smuzhiyun }
635*4882a593Smuzhiyun kref_put(&entry->ref, nfs4_xattr_free_entry_cb);
636*4882a593Smuzhiyun } else {
637*4882a593Smuzhiyun dprintk("%s: cache miss '%s'\n", __func__, name);
638*4882a593Smuzhiyun ret = -ENOENT;
639*4882a593Smuzhiyun }
640*4882a593Smuzhiyun
641*4882a593Smuzhiyun kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun return ret;
644*4882a593Smuzhiyun }
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun /*
647*4882a593Smuzhiyun * Retrieve a cached list of xattrs from the cache.
648*4882a593Smuzhiyun */
nfs4_xattr_cache_list(struct inode * inode,char * buf,ssize_t buflen)649*4882a593Smuzhiyun ssize_t nfs4_xattr_cache_list(struct inode *inode, char *buf, ssize_t buflen)
650*4882a593Smuzhiyun {
651*4882a593Smuzhiyun struct nfs4_xattr_cache *cache;
652*4882a593Smuzhiyun struct nfs4_xattr_entry *entry;
653*4882a593Smuzhiyun ssize_t ret;
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun cache = nfs4_xattr_get_cache(inode, 0);
656*4882a593Smuzhiyun if (cache == NULL)
657*4882a593Smuzhiyun return -ENOENT;
658*4882a593Smuzhiyun
659*4882a593Smuzhiyun spin_lock(&cache->listxattr_lock);
660*4882a593Smuzhiyun
661*4882a593Smuzhiyun entry = cache->listxattr;
662*4882a593Smuzhiyun
663*4882a593Smuzhiyun if (entry != NULL && entry != ERR_PTR(-ESTALE)) {
664*4882a593Smuzhiyun if (buflen == 0) {
665*4882a593Smuzhiyun /* Length probe only */
666*4882a593Smuzhiyun ret = entry->xattr_size;
667*4882a593Smuzhiyun } else if (entry->xattr_size > buflen)
668*4882a593Smuzhiyun ret = -ERANGE;
669*4882a593Smuzhiyun else {
670*4882a593Smuzhiyun memcpy(buf, entry->xattr_value, entry->xattr_size);
671*4882a593Smuzhiyun ret = entry->xattr_size;
672*4882a593Smuzhiyun }
673*4882a593Smuzhiyun } else {
674*4882a593Smuzhiyun ret = -ENOENT;
675*4882a593Smuzhiyun }
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun spin_unlock(&cache->listxattr_lock);
678*4882a593Smuzhiyun
679*4882a593Smuzhiyun kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun return ret;
682*4882a593Smuzhiyun }
683*4882a593Smuzhiyun
684*4882a593Smuzhiyun /*
685*4882a593Smuzhiyun * Add an xattr to the cache.
686*4882a593Smuzhiyun *
687*4882a593Smuzhiyun * This also invalidates the xattr list cache.
688*4882a593Smuzhiyun */
nfs4_xattr_cache_add(struct inode * inode,const char * name,const char * buf,struct page ** pages,ssize_t buflen)689*4882a593Smuzhiyun void nfs4_xattr_cache_add(struct inode *inode, const char *name,
690*4882a593Smuzhiyun const char *buf, struct page **pages, ssize_t buflen)
691*4882a593Smuzhiyun {
692*4882a593Smuzhiyun struct nfs4_xattr_cache *cache;
693*4882a593Smuzhiyun struct nfs4_xattr_entry *entry;
694*4882a593Smuzhiyun
695*4882a593Smuzhiyun dprintk("%s: add '%s' len %lu\n", __func__,
696*4882a593Smuzhiyun name, (unsigned long)buflen);
697*4882a593Smuzhiyun
698*4882a593Smuzhiyun cache = nfs4_xattr_get_cache(inode, 1);
699*4882a593Smuzhiyun if (cache == NULL)
700*4882a593Smuzhiyun return;
701*4882a593Smuzhiyun
702*4882a593Smuzhiyun entry = nfs4_xattr_alloc_entry(name, buf, pages, buflen);
703*4882a593Smuzhiyun if (entry == NULL)
704*4882a593Smuzhiyun goto out;
705*4882a593Smuzhiyun
706*4882a593Smuzhiyun (void)nfs4_xattr_set_listcache(cache, NULL);
707*4882a593Smuzhiyun
708*4882a593Smuzhiyun if (!nfs4_xattr_hash_add(cache, entry))
709*4882a593Smuzhiyun kref_put(&entry->ref, nfs4_xattr_free_entry_cb);
710*4882a593Smuzhiyun
711*4882a593Smuzhiyun out:
712*4882a593Smuzhiyun kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
713*4882a593Smuzhiyun }
714*4882a593Smuzhiyun
715*4882a593Smuzhiyun
716*4882a593Smuzhiyun /*
717*4882a593Smuzhiyun * Remove an xattr from the cache.
718*4882a593Smuzhiyun *
719*4882a593Smuzhiyun * This also invalidates the xattr list cache.
720*4882a593Smuzhiyun */
nfs4_xattr_cache_remove(struct inode * inode,const char * name)721*4882a593Smuzhiyun void nfs4_xattr_cache_remove(struct inode *inode, const char *name)
722*4882a593Smuzhiyun {
723*4882a593Smuzhiyun struct nfs4_xattr_cache *cache;
724*4882a593Smuzhiyun
725*4882a593Smuzhiyun dprintk("%s: remove '%s'\n", __func__, name);
726*4882a593Smuzhiyun
727*4882a593Smuzhiyun cache = nfs4_xattr_get_cache(inode, 0);
728*4882a593Smuzhiyun if (cache == NULL)
729*4882a593Smuzhiyun return;
730*4882a593Smuzhiyun
731*4882a593Smuzhiyun (void)nfs4_xattr_set_listcache(cache, NULL);
732*4882a593Smuzhiyun nfs4_xattr_hash_remove(cache, name);
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
735*4882a593Smuzhiyun }
736*4882a593Smuzhiyun
737*4882a593Smuzhiyun /*
738*4882a593Smuzhiyun * Cache listxattr output, replacing any possible old one.
739*4882a593Smuzhiyun */
nfs4_xattr_cache_set_list(struct inode * inode,const char * buf,ssize_t buflen)740*4882a593Smuzhiyun void nfs4_xattr_cache_set_list(struct inode *inode, const char *buf,
741*4882a593Smuzhiyun ssize_t buflen)
742*4882a593Smuzhiyun {
743*4882a593Smuzhiyun struct nfs4_xattr_cache *cache;
744*4882a593Smuzhiyun struct nfs4_xattr_entry *entry;
745*4882a593Smuzhiyun
746*4882a593Smuzhiyun cache = nfs4_xattr_get_cache(inode, 1);
747*4882a593Smuzhiyun if (cache == NULL)
748*4882a593Smuzhiyun return;
749*4882a593Smuzhiyun
750*4882a593Smuzhiyun entry = nfs4_xattr_alloc_entry(NULL, buf, NULL, buflen);
751*4882a593Smuzhiyun if (entry == NULL)
752*4882a593Smuzhiyun goto out;
753*4882a593Smuzhiyun
754*4882a593Smuzhiyun /*
755*4882a593Smuzhiyun * This is just there to be able to get to bucket->cache,
756*4882a593Smuzhiyun * which is obviously the same for all buckets, so just
757*4882a593Smuzhiyun * use bucket 0.
758*4882a593Smuzhiyun */
759*4882a593Smuzhiyun entry->bucket = &cache->buckets[0];
760*4882a593Smuzhiyun
761*4882a593Smuzhiyun if (!nfs4_xattr_set_listcache(cache, entry))
762*4882a593Smuzhiyun kref_put(&entry->ref, nfs4_xattr_free_entry_cb);
763*4882a593Smuzhiyun
764*4882a593Smuzhiyun out:
765*4882a593Smuzhiyun kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
766*4882a593Smuzhiyun }
767*4882a593Smuzhiyun
768*4882a593Smuzhiyun /*
769*4882a593Smuzhiyun * Zap the entire cache. Called when an inode is evicted.
770*4882a593Smuzhiyun */
nfs4_xattr_cache_zap(struct inode * inode)771*4882a593Smuzhiyun void nfs4_xattr_cache_zap(struct inode *inode)
772*4882a593Smuzhiyun {
773*4882a593Smuzhiyun struct nfs4_xattr_cache *oldcache;
774*4882a593Smuzhiyun
775*4882a593Smuzhiyun spin_lock(&inode->i_lock);
776*4882a593Smuzhiyun oldcache = nfs4_xattr_cache_unlink(inode);
777*4882a593Smuzhiyun spin_unlock(&inode->i_lock);
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun if (oldcache)
780*4882a593Smuzhiyun nfs4_xattr_discard_cache(oldcache);
781*4882a593Smuzhiyun }
782*4882a593Smuzhiyun
783*4882a593Smuzhiyun /*
784*4882a593Smuzhiyun * The entry LRU is shrunk more aggressively than the cache LRU,
785*4882a593Smuzhiyun * by settings @seeks to 1.
786*4882a593Smuzhiyun *
787*4882a593Smuzhiyun * Cache structures are freed only when they've become empty, after
788*4882a593Smuzhiyun * pruning all but one entry.
789*4882a593Smuzhiyun */
790*4882a593Smuzhiyun
791*4882a593Smuzhiyun static unsigned long nfs4_xattr_cache_count(struct shrinker *shrink,
792*4882a593Smuzhiyun struct shrink_control *sc);
793*4882a593Smuzhiyun static unsigned long nfs4_xattr_entry_count(struct shrinker *shrink,
794*4882a593Smuzhiyun struct shrink_control *sc);
795*4882a593Smuzhiyun static unsigned long nfs4_xattr_cache_scan(struct shrinker *shrink,
796*4882a593Smuzhiyun struct shrink_control *sc);
797*4882a593Smuzhiyun static unsigned long nfs4_xattr_entry_scan(struct shrinker *shrink,
798*4882a593Smuzhiyun struct shrink_control *sc);
799*4882a593Smuzhiyun
800*4882a593Smuzhiyun static struct shrinker nfs4_xattr_cache_shrinker = {
801*4882a593Smuzhiyun .count_objects = nfs4_xattr_cache_count,
802*4882a593Smuzhiyun .scan_objects = nfs4_xattr_cache_scan,
803*4882a593Smuzhiyun .seeks = DEFAULT_SEEKS,
804*4882a593Smuzhiyun .flags = SHRINKER_MEMCG_AWARE,
805*4882a593Smuzhiyun };
806*4882a593Smuzhiyun
807*4882a593Smuzhiyun static struct shrinker nfs4_xattr_entry_shrinker = {
808*4882a593Smuzhiyun .count_objects = nfs4_xattr_entry_count,
809*4882a593Smuzhiyun .scan_objects = nfs4_xattr_entry_scan,
810*4882a593Smuzhiyun .seeks = DEFAULT_SEEKS,
811*4882a593Smuzhiyun .batch = 512,
812*4882a593Smuzhiyun .flags = SHRINKER_MEMCG_AWARE,
813*4882a593Smuzhiyun };
814*4882a593Smuzhiyun
815*4882a593Smuzhiyun static struct shrinker nfs4_xattr_large_entry_shrinker = {
816*4882a593Smuzhiyun .count_objects = nfs4_xattr_entry_count,
817*4882a593Smuzhiyun .scan_objects = nfs4_xattr_entry_scan,
818*4882a593Smuzhiyun .seeks = 1,
819*4882a593Smuzhiyun .batch = 512,
820*4882a593Smuzhiyun .flags = SHRINKER_MEMCG_AWARE,
821*4882a593Smuzhiyun };
822*4882a593Smuzhiyun
823*4882a593Smuzhiyun static enum lru_status
cache_lru_isolate(struct list_head * item,struct list_lru_one * lru,spinlock_t * lru_lock,void * arg)824*4882a593Smuzhiyun cache_lru_isolate(struct list_head *item,
825*4882a593Smuzhiyun struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
826*4882a593Smuzhiyun {
827*4882a593Smuzhiyun struct list_head *dispose = arg;
828*4882a593Smuzhiyun struct inode *inode;
829*4882a593Smuzhiyun struct nfs4_xattr_cache *cache = container_of(item,
830*4882a593Smuzhiyun struct nfs4_xattr_cache, lru);
831*4882a593Smuzhiyun
832*4882a593Smuzhiyun if (atomic_long_read(&cache->nent) > 1)
833*4882a593Smuzhiyun return LRU_SKIP;
834*4882a593Smuzhiyun
835*4882a593Smuzhiyun /*
836*4882a593Smuzhiyun * If a cache structure is on the LRU list, we know that
837*4882a593Smuzhiyun * its inode is valid. Try to lock it to break the link.
838*4882a593Smuzhiyun * Since we're inverting the lock order here, only try.
839*4882a593Smuzhiyun */
840*4882a593Smuzhiyun inode = cache->inode;
841*4882a593Smuzhiyun
842*4882a593Smuzhiyun if (!spin_trylock(&inode->i_lock))
843*4882a593Smuzhiyun return LRU_SKIP;
844*4882a593Smuzhiyun
845*4882a593Smuzhiyun kref_get(&cache->ref);
846*4882a593Smuzhiyun
847*4882a593Smuzhiyun cache->inode = NULL;
848*4882a593Smuzhiyun NFS_I(inode)->xattr_cache = NULL;
849*4882a593Smuzhiyun NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_XATTR;
850*4882a593Smuzhiyun list_lru_isolate(lru, &cache->lru);
851*4882a593Smuzhiyun
852*4882a593Smuzhiyun spin_unlock(&inode->i_lock);
853*4882a593Smuzhiyun
854*4882a593Smuzhiyun list_add_tail(&cache->dispose, dispose);
855*4882a593Smuzhiyun return LRU_REMOVED;
856*4882a593Smuzhiyun }
857*4882a593Smuzhiyun
858*4882a593Smuzhiyun static unsigned long
nfs4_xattr_cache_scan(struct shrinker * shrink,struct shrink_control * sc)859*4882a593Smuzhiyun nfs4_xattr_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
860*4882a593Smuzhiyun {
861*4882a593Smuzhiyun LIST_HEAD(dispose);
862*4882a593Smuzhiyun unsigned long freed;
863*4882a593Smuzhiyun struct nfs4_xattr_cache *cache;
864*4882a593Smuzhiyun
865*4882a593Smuzhiyun freed = list_lru_shrink_walk(&nfs4_xattr_cache_lru, sc,
866*4882a593Smuzhiyun cache_lru_isolate, &dispose);
867*4882a593Smuzhiyun while (!list_empty(&dispose)) {
868*4882a593Smuzhiyun cache = list_first_entry(&dispose, struct nfs4_xattr_cache,
869*4882a593Smuzhiyun dispose);
870*4882a593Smuzhiyun list_del_init(&cache->dispose);
871*4882a593Smuzhiyun nfs4_xattr_discard_cache(cache);
872*4882a593Smuzhiyun kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
873*4882a593Smuzhiyun }
874*4882a593Smuzhiyun
875*4882a593Smuzhiyun return freed;
876*4882a593Smuzhiyun }
877*4882a593Smuzhiyun
878*4882a593Smuzhiyun
879*4882a593Smuzhiyun static unsigned long
nfs4_xattr_cache_count(struct shrinker * shrink,struct shrink_control * sc)880*4882a593Smuzhiyun nfs4_xattr_cache_count(struct shrinker *shrink, struct shrink_control *sc)
881*4882a593Smuzhiyun {
882*4882a593Smuzhiyun unsigned long count;
883*4882a593Smuzhiyun
884*4882a593Smuzhiyun count = list_lru_shrink_count(&nfs4_xattr_cache_lru, sc);
885*4882a593Smuzhiyun return vfs_pressure_ratio(count);
886*4882a593Smuzhiyun }
887*4882a593Smuzhiyun
888*4882a593Smuzhiyun static enum lru_status
entry_lru_isolate(struct list_head * item,struct list_lru_one * lru,spinlock_t * lru_lock,void * arg)889*4882a593Smuzhiyun entry_lru_isolate(struct list_head *item,
890*4882a593Smuzhiyun struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
891*4882a593Smuzhiyun {
892*4882a593Smuzhiyun struct list_head *dispose = arg;
893*4882a593Smuzhiyun struct nfs4_xattr_bucket *bucket;
894*4882a593Smuzhiyun struct nfs4_xattr_cache *cache;
895*4882a593Smuzhiyun struct nfs4_xattr_entry *entry = container_of(item,
896*4882a593Smuzhiyun struct nfs4_xattr_entry, lru);
897*4882a593Smuzhiyun
898*4882a593Smuzhiyun bucket = entry->bucket;
899*4882a593Smuzhiyun cache = bucket->cache;
900*4882a593Smuzhiyun
901*4882a593Smuzhiyun /*
902*4882a593Smuzhiyun * Unhook the entry from its parent (either a cache bucket
903*4882a593Smuzhiyun * or a cache structure if it's a listxattr buf), so that
904*4882a593Smuzhiyun * it's no longer found. Then add it to the isolate list,
905*4882a593Smuzhiyun * to be freed later.
906*4882a593Smuzhiyun *
907*4882a593Smuzhiyun * In both cases, we're reverting lock order, so use
908*4882a593Smuzhiyun * trylock and skip the entry if we can't get the lock.
909*4882a593Smuzhiyun */
910*4882a593Smuzhiyun if (entry->xattr_name != NULL) {
911*4882a593Smuzhiyun /* Regular cache entry */
912*4882a593Smuzhiyun if (!spin_trylock(&bucket->lock))
913*4882a593Smuzhiyun return LRU_SKIP;
914*4882a593Smuzhiyun
915*4882a593Smuzhiyun kref_get(&entry->ref);
916*4882a593Smuzhiyun
917*4882a593Smuzhiyun hlist_del_init(&entry->hnode);
918*4882a593Smuzhiyun atomic_long_dec(&cache->nent);
919*4882a593Smuzhiyun list_lru_isolate(lru, &entry->lru);
920*4882a593Smuzhiyun
921*4882a593Smuzhiyun spin_unlock(&bucket->lock);
922*4882a593Smuzhiyun } else {
923*4882a593Smuzhiyun /* Listxattr cache entry */
924*4882a593Smuzhiyun if (!spin_trylock(&cache->listxattr_lock))
925*4882a593Smuzhiyun return LRU_SKIP;
926*4882a593Smuzhiyun
927*4882a593Smuzhiyun kref_get(&entry->ref);
928*4882a593Smuzhiyun
929*4882a593Smuzhiyun cache->listxattr = NULL;
930*4882a593Smuzhiyun list_lru_isolate(lru, &entry->lru);
931*4882a593Smuzhiyun
932*4882a593Smuzhiyun spin_unlock(&cache->listxattr_lock);
933*4882a593Smuzhiyun }
934*4882a593Smuzhiyun
935*4882a593Smuzhiyun list_add_tail(&entry->dispose, dispose);
936*4882a593Smuzhiyun return LRU_REMOVED;
937*4882a593Smuzhiyun }
938*4882a593Smuzhiyun
939*4882a593Smuzhiyun static unsigned long
nfs4_xattr_entry_scan(struct shrinker * shrink,struct shrink_control * sc)940*4882a593Smuzhiyun nfs4_xattr_entry_scan(struct shrinker *shrink, struct shrink_control *sc)
941*4882a593Smuzhiyun {
942*4882a593Smuzhiyun LIST_HEAD(dispose);
943*4882a593Smuzhiyun unsigned long freed;
944*4882a593Smuzhiyun struct nfs4_xattr_entry *entry;
945*4882a593Smuzhiyun struct list_lru *lru;
946*4882a593Smuzhiyun
947*4882a593Smuzhiyun lru = (shrink == &nfs4_xattr_large_entry_shrinker) ?
948*4882a593Smuzhiyun &nfs4_xattr_large_entry_lru : &nfs4_xattr_entry_lru;
949*4882a593Smuzhiyun
950*4882a593Smuzhiyun freed = list_lru_shrink_walk(lru, sc, entry_lru_isolate, &dispose);
951*4882a593Smuzhiyun
952*4882a593Smuzhiyun while (!list_empty(&dispose)) {
953*4882a593Smuzhiyun entry = list_first_entry(&dispose, struct nfs4_xattr_entry,
954*4882a593Smuzhiyun dispose);
955*4882a593Smuzhiyun list_del_init(&entry->dispose);
956*4882a593Smuzhiyun
957*4882a593Smuzhiyun /*
958*4882a593Smuzhiyun * Drop two references: the one that we just grabbed
959*4882a593Smuzhiyun * in entry_lru_isolate, and the one that was set
960*4882a593Smuzhiyun * when the entry was first allocated.
961*4882a593Smuzhiyun */
962*4882a593Smuzhiyun kref_put(&entry->ref, nfs4_xattr_free_entry_cb);
963*4882a593Smuzhiyun kref_put(&entry->ref, nfs4_xattr_free_entry_cb);
964*4882a593Smuzhiyun }
965*4882a593Smuzhiyun
966*4882a593Smuzhiyun return freed;
967*4882a593Smuzhiyun }
968*4882a593Smuzhiyun
969*4882a593Smuzhiyun static unsigned long
nfs4_xattr_entry_count(struct shrinker * shrink,struct shrink_control * sc)970*4882a593Smuzhiyun nfs4_xattr_entry_count(struct shrinker *shrink, struct shrink_control *sc)
971*4882a593Smuzhiyun {
972*4882a593Smuzhiyun unsigned long count;
973*4882a593Smuzhiyun struct list_lru *lru;
974*4882a593Smuzhiyun
975*4882a593Smuzhiyun lru = (shrink == &nfs4_xattr_large_entry_shrinker) ?
976*4882a593Smuzhiyun &nfs4_xattr_large_entry_lru : &nfs4_xattr_entry_lru;
977*4882a593Smuzhiyun
978*4882a593Smuzhiyun count = list_lru_shrink_count(lru, sc);
979*4882a593Smuzhiyun return vfs_pressure_ratio(count);
980*4882a593Smuzhiyun }
981*4882a593Smuzhiyun
982*4882a593Smuzhiyun
nfs4_xattr_cache_init_once(void * p)983*4882a593Smuzhiyun static void nfs4_xattr_cache_init_once(void *p)
984*4882a593Smuzhiyun {
985*4882a593Smuzhiyun struct nfs4_xattr_cache *cache = (struct nfs4_xattr_cache *)p;
986*4882a593Smuzhiyun
987*4882a593Smuzhiyun spin_lock_init(&cache->listxattr_lock);
988*4882a593Smuzhiyun atomic_long_set(&cache->nent, 0);
989*4882a593Smuzhiyun nfs4_xattr_hash_init(cache);
990*4882a593Smuzhiyun cache->listxattr = NULL;
991*4882a593Smuzhiyun INIT_LIST_HEAD(&cache->lru);
992*4882a593Smuzhiyun INIT_LIST_HEAD(&cache->dispose);
993*4882a593Smuzhiyun }
994*4882a593Smuzhiyun
nfs4_xattr_cache_init(void)995*4882a593Smuzhiyun int __init nfs4_xattr_cache_init(void)
996*4882a593Smuzhiyun {
997*4882a593Smuzhiyun int ret = 0;
998*4882a593Smuzhiyun
999*4882a593Smuzhiyun nfs4_xattr_cache_cachep = kmem_cache_create("nfs4_xattr_cache_cache",
1000*4882a593Smuzhiyun sizeof(struct nfs4_xattr_cache), 0,
1001*4882a593Smuzhiyun (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_ACCOUNT),
1002*4882a593Smuzhiyun nfs4_xattr_cache_init_once);
1003*4882a593Smuzhiyun if (nfs4_xattr_cache_cachep == NULL)
1004*4882a593Smuzhiyun return -ENOMEM;
1005*4882a593Smuzhiyun
1006*4882a593Smuzhiyun ret = list_lru_init_memcg(&nfs4_xattr_large_entry_lru,
1007*4882a593Smuzhiyun &nfs4_xattr_large_entry_shrinker);
1008*4882a593Smuzhiyun if (ret)
1009*4882a593Smuzhiyun goto out4;
1010*4882a593Smuzhiyun
1011*4882a593Smuzhiyun ret = list_lru_init_memcg(&nfs4_xattr_entry_lru,
1012*4882a593Smuzhiyun &nfs4_xattr_entry_shrinker);
1013*4882a593Smuzhiyun if (ret)
1014*4882a593Smuzhiyun goto out3;
1015*4882a593Smuzhiyun
1016*4882a593Smuzhiyun ret = list_lru_init_memcg(&nfs4_xattr_cache_lru,
1017*4882a593Smuzhiyun &nfs4_xattr_cache_shrinker);
1018*4882a593Smuzhiyun if (ret)
1019*4882a593Smuzhiyun goto out2;
1020*4882a593Smuzhiyun
1021*4882a593Smuzhiyun ret = register_shrinker(&nfs4_xattr_cache_shrinker);
1022*4882a593Smuzhiyun if (ret)
1023*4882a593Smuzhiyun goto out1;
1024*4882a593Smuzhiyun
1025*4882a593Smuzhiyun ret = register_shrinker(&nfs4_xattr_entry_shrinker);
1026*4882a593Smuzhiyun if (ret)
1027*4882a593Smuzhiyun goto out;
1028*4882a593Smuzhiyun
1029*4882a593Smuzhiyun ret = register_shrinker(&nfs4_xattr_large_entry_shrinker);
1030*4882a593Smuzhiyun if (!ret)
1031*4882a593Smuzhiyun return 0;
1032*4882a593Smuzhiyun
1033*4882a593Smuzhiyun unregister_shrinker(&nfs4_xattr_entry_shrinker);
1034*4882a593Smuzhiyun out:
1035*4882a593Smuzhiyun unregister_shrinker(&nfs4_xattr_cache_shrinker);
1036*4882a593Smuzhiyun out1:
1037*4882a593Smuzhiyun list_lru_destroy(&nfs4_xattr_cache_lru);
1038*4882a593Smuzhiyun out2:
1039*4882a593Smuzhiyun list_lru_destroy(&nfs4_xattr_entry_lru);
1040*4882a593Smuzhiyun out3:
1041*4882a593Smuzhiyun list_lru_destroy(&nfs4_xattr_large_entry_lru);
1042*4882a593Smuzhiyun out4:
1043*4882a593Smuzhiyun kmem_cache_destroy(nfs4_xattr_cache_cachep);
1044*4882a593Smuzhiyun
1045*4882a593Smuzhiyun return ret;
1046*4882a593Smuzhiyun }
1047*4882a593Smuzhiyun
nfs4_xattr_cache_exit(void)1048*4882a593Smuzhiyun void nfs4_xattr_cache_exit(void)
1049*4882a593Smuzhiyun {
1050*4882a593Smuzhiyun unregister_shrinker(&nfs4_xattr_large_entry_shrinker);
1051*4882a593Smuzhiyun unregister_shrinker(&nfs4_xattr_entry_shrinker);
1052*4882a593Smuzhiyun unregister_shrinker(&nfs4_xattr_cache_shrinker);
1053*4882a593Smuzhiyun list_lru_destroy(&nfs4_xattr_large_entry_lru);
1054*4882a593Smuzhiyun list_lru_destroy(&nfs4_xattr_entry_lru);
1055*4882a593Smuzhiyun list_lru_destroy(&nfs4_xattr_cache_lru);
1056*4882a593Smuzhiyun kmem_cache_destroy(nfs4_xattr_cache_cachep);
1057*4882a593Smuzhiyun }
1058