1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Request reply cache. This is currently a global cache, but this may
4*4882a593Smuzhiyun * change in the future and be a per-client cache.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * This code is heavily inspired by the 44BSD implementation, although
7*4882a593Smuzhiyun * it does things a bit differently.
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
10*4882a593Smuzhiyun */
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include <linux/sunrpc/svc_xprt.h>
13*4882a593Smuzhiyun #include <linux/slab.h>
14*4882a593Smuzhiyun #include <linux/vmalloc.h>
15*4882a593Smuzhiyun #include <linux/sunrpc/addr.h>
16*4882a593Smuzhiyun #include <linux/highmem.h>
17*4882a593Smuzhiyun #include <linux/log2.h>
18*4882a593Smuzhiyun #include <linux/hash.h>
19*4882a593Smuzhiyun #include <net/checksum.h>
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun #include "nfsd.h"
22*4882a593Smuzhiyun #include "cache.h"
23*4882a593Smuzhiyun #include "trace.h"
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun /*
26*4882a593Smuzhiyun * We use this value to determine the number of hash buckets from the max
27*4882a593Smuzhiyun * cache size, the idea being that when the cache is at its maximum number
28*4882a593Smuzhiyun * of entries, then this should be the average number of entries per bucket.
29*4882a593Smuzhiyun */
30*4882a593Smuzhiyun #define TARGET_BUCKET_SIZE 64
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun struct nfsd_drc_bucket {
33*4882a593Smuzhiyun struct rb_root rb_head;
34*4882a593Smuzhiyun struct list_head lru_head;
35*4882a593Smuzhiyun spinlock_t cache_lock;
36*4882a593Smuzhiyun };
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun static struct kmem_cache *drc_slab;
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec);
41*4882a593Smuzhiyun static unsigned long nfsd_reply_cache_count(struct shrinker *shrink,
42*4882a593Smuzhiyun struct shrink_control *sc);
43*4882a593Smuzhiyun static unsigned long nfsd_reply_cache_scan(struct shrinker *shrink,
44*4882a593Smuzhiyun struct shrink_control *sc);
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun /*
47*4882a593Smuzhiyun * Put a cap on the size of the DRC based on the amount of available
48*4882a593Smuzhiyun * low memory in the machine.
49*4882a593Smuzhiyun *
50*4882a593Smuzhiyun * 64MB: 8192
51*4882a593Smuzhiyun * 128MB: 11585
52*4882a593Smuzhiyun * 256MB: 16384
53*4882a593Smuzhiyun * 512MB: 23170
54*4882a593Smuzhiyun * 1GB: 32768
55*4882a593Smuzhiyun * 2GB: 46340
56*4882a593Smuzhiyun * 4GB: 65536
57*4882a593Smuzhiyun * 8GB: 92681
58*4882a593Smuzhiyun * 16GB: 131072
59*4882a593Smuzhiyun *
60*4882a593Smuzhiyun * ...with a hard cap of 256k entries. In the worst case, each entry will be
61*4882a593Smuzhiyun * ~1k, so the above numbers should give a rough max of the amount of memory
62*4882a593Smuzhiyun * used in k.
63*4882a593Smuzhiyun *
64*4882a593Smuzhiyun * XXX: these limits are per-container, so memory used will increase
65*4882a593Smuzhiyun * linearly with number of containers. Maybe that's OK.
66*4882a593Smuzhiyun */
67*4882a593Smuzhiyun static unsigned int
nfsd_cache_size_limit(void)68*4882a593Smuzhiyun nfsd_cache_size_limit(void)
69*4882a593Smuzhiyun {
70*4882a593Smuzhiyun unsigned int limit;
71*4882a593Smuzhiyun unsigned long low_pages = totalram_pages() - totalhigh_pages();
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun limit = (16 * int_sqrt(low_pages)) << (PAGE_SHIFT-10);
74*4882a593Smuzhiyun return min_t(unsigned int, limit, 256*1024);
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun /*
78*4882a593Smuzhiyun * Compute the number of hash buckets we need. Divide the max cachesize by
79*4882a593Smuzhiyun * the "target" max bucket size, and round up to next power of two.
80*4882a593Smuzhiyun */
81*4882a593Smuzhiyun static unsigned int
nfsd_hashsize(unsigned int limit)82*4882a593Smuzhiyun nfsd_hashsize(unsigned int limit)
83*4882a593Smuzhiyun {
84*4882a593Smuzhiyun return roundup_pow_of_two(limit / TARGET_BUCKET_SIZE);
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun static u32
nfsd_cache_hash(__be32 xid,struct nfsd_net * nn)88*4882a593Smuzhiyun nfsd_cache_hash(__be32 xid, struct nfsd_net *nn)
89*4882a593Smuzhiyun {
90*4882a593Smuzhiyun return hash_32(be32_to_cpu(xid), nn->maskbits);
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun static struct svc_cacherep *
nfsd_reply_cache_alloc(struct svc_rqst * rqstp,__wsum csum,struct nfsd_net * nn)94*4882a593Smuzhiyun nfsd_reply_cache_alloc(struct svc_rqst *rqstp, __wsum csum,
95*4882a593Smuzhiyun struct nfsd_net *nn)
96*4882a593Smuzhiyun {
97*4882a593Smuzhiyun struct svc_cacherep *rp;
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun rp = kmem_cache_alloc(drc_slab, GFP_KERNEL);
100*4882a593Smuzhiyun if (rp) {
101*4882a593Smuzhiyun rp->c_state = RC_UNUSED;
102*4882a593Smuzhiyun rp->c_type = RC_NOCACHE;
103*4882a593Smuzhiyun RB_CLEAR_NODE(&rp->c_node);
104*4882a593Smuzhiyun INIT_LIST_HEAD(&rp->c_lru);
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun memset(&rp->c_key, 0, sizeof(rp->c_key));
107*4882a593Smuzhiyun rp->c_key.k_xid = rqstp->rq_xid;
108*4882a593Smuzhiyun rp->c_key.k_proc = rqstp->rq_proc;
109*4882a593Smuzhiyun rpc_copy_addr((struct sockaddr *)&rp->c_key.k_addr, svc_addr(rqstp));
110*4882a593Smuzhiyun rpc_set_port((struct sockaddr *)&rp->c_key.k_addr, rpc_get_port(svc_addr(rqstp)));
111*4882a593Smuzhiyun rp->c_key.k_prot = rqstp->rq_prot;
112*4882a593Smuzhiyun rp->c_key.k_vers = rqstp->rq_vers;
113*4882a593Smuzhiyun rp->c_key.k_len = rqstp->rq_arg.len;
114*4882a593Smuzhiyun rp->c_key.k_csum = csum;
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun return rp;
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun static void
nfsd_reply_cache_free_locked(struct nfsd_drc_bucket * b,struct svc_cacherep * rp,struct nfsd_net * nn)120*4882a593Smuzhiyun nfsd_reply_cache_free_locked(struct nfsd_drc_bucket *b, struct svc_cacherep *rp,
121*4882a593Smuzhiyun struct nfsd_net *nn)
122*4882a593Smuzhiyun {
123*4882a593Smuzhiyun if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) {
124*4882a593Smuzhiyun nn->drc_mem_usage -= rp->c_replvec.iov_len;
125*4882a593Smuzhiyun kfree(rp->c_replvec.iov_base);
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun if (rp->c_state != RC_UNUSED) {
128*4882a593Smuzhiyun rb_erase(&rp->c_node, &b->rb_head);
129*4882a593Smuzhiyun list_del(&rp->c_lru);
130*4882a593Smuzhiyun atomic_dec(&nn->num_drc_entries);
131*4882a593Smuzhiyun nn->drc_mem_usage -= sizeof(*rp);
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun kmem_cache_free(drc_slab, rp);
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun static void
nfsd_reply_cache_free(struct nfsd_drc_bucket * b,struct svc_cacherep * rp,struct nfsd_net * nn)137*4882a593Smuzhiyun nfsd_reply_cache_free(struct nfsd_drc_bucket *b, struct svc_cacherep *rp,
138*4882a593Smuzhiyun struct nfsd_net *nn)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun spin_lock(&b->cache_lock);
141*4882a593Smuzhiyun nfsd_reply_cache_free_locked(b, rp, nn);
142*4882a593Smuzhiyun spin_unlock(&b->cache_lock);
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun
nfsd_drc_slab_create(void)145*4882a593Smuzhiyun int nfsd_drc_slab_create(void)
146*4882a593Smuzhiyun {
147*4882a593Smuzhiyun drc_slab = kmem_cache_create("nfsd_drc",
148*4882a593Smuzhiyun sizeof(struct svc_cacherep), 0, 0, NULL);
149*4882a593Smuzhiyun return drc_slab ? 0: -ENOMEM;
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun
nfsd_drc_slab_free(void)152*4882a593Smuzhiyun void nfsd_drc_slab_free(void)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun kmem_cache_destroy(drc_slab);
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun
nfsd_reply_cache_init(struct nfsd_net * nn)157*4882a593Smuzhiyun int nfsd_reply_cache_init(struct nfsd_net *nn)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun unsigned int hashsize;
160*4882a593Smuzhiyun unsigned int i;
161*4882a593Smuzhiyun int status = 0;
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun nn->max_drc_entries = nfsd_cache_size_limit();
164*4882a593Smuzhiyun atomic_set(&nn->num_drc_entries, 0);
165*4882a593Smuzhiyun hashsize = nfsd_hashsize(nn->max_drc_entries);
166*4882a593Smuzhiyun nn->maskbits = ilog2(hashsize);
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun nn->nfsd_reply_cache_shrinker.scan_objects = nfsd_reply_cache_scan;
169*4882a593Smuzhiyun nn->nfsd_reply_cache_shrinker.count_objects = nfsd_reply_cache_count;
170*4882a593Smuzhiyun nn->nfsd_reply_cache_shrinker.seeks = 1;
171*4882a593Smuzhiyun status = register_shrinker(&nn->nfsd_reply_cache_shrinker);
172*4882a593Smuzhiyun if (status)
173*4882a593Smuzhiyun goto out_nomem;
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun nn->drc_hashtbl = kvzalloc(array_size(hashsize,
176*4882a593Smuzhiyun sizeof(*nn->drc_hashtbl)), GFP_KERNEL);
177*4882a593Smuzhiyun if (!nn->drc_hashtbl)
178*4882a593Smuzhiyun goto out_shrinker;
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun for (i = 0; i < hashsize; i++) {
181*4882a593Smuzhiyun INIT_LIST_HEAD(&nn->drc_hashtbl[i].lru_head);
182*4882a593Smuzhiyun spin_lock_init(&nn->drc_hashtbl[i].cache_lock);
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun nn->drc_hashsize = hashsize;
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun return 0;
187*4882a593Smuzhiyun out_shrinker:
188*4882a593Smuzhiyun unregister_shrinker(&nn->nfsd_reply_cache_shrinker);
189*4882a593Smuzhiyun out_nomem:
190*4882a593Smuzhiyun printk(KERN_ERR "nfsd: failed to allocate reply cache\n");
191*4882a593Smuzhiyun return -ENOMEM;
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun
nfsd_reply_cache_shutdown(struct nfsd_net * nn)194*4882a593Smuzhiyun void nfsd_reply_cache_shutdown(struct nfsd_net *nn)
195*4882a593Smuzhiyun {
196*4882a593Smuzhiyun struct svc_cacherep *rp;
197*4882a593Smuzhiyun unsigned int i;
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun unregister_shrinker(&nn->nfsd_reply_cache_shrinker);
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun for (i = 0; i < nn->drc_hashsize; i++) {
202*4882a593Smuzhiyun struct list_head *head = &nn->drc_hashtbl[i].lru_head;
203*4882a593Smuzhiyun while (!list_empty(head)) {
204*4882a593Smuzhiyun rp = list_first_entry(head, struct svc_cacherep, c_lru);
205*4882a593Smuzhiyun nfsd_reply_cache_free_locked(&nn->drc_hashtbl[i],
206*4882a593Smuzhiyun rp, nn);
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun kvfree(nn->drc_hashtbl);
211*4882a593Smuzhiyun nn->drc_hashtbl = NULL;
212*4882a593Smuzhiyun nn->drc_hashsize = 0;
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun /*
217*4882a593Smuzhiyun * Move cache entry to end of LRU list, and queue the cleaner to run if it's
218*4882a593Smuzhiyun * not already scheduled.
219*4882a593Smuzhiyun */
220*4882a593Smuzhiyun static void
lru_put_end(struct nfsd_drc_bucket * b,struct svc_cacherep * rp)221*4882a593Smuzhiyun lru_put_end(struct nfsd_drc_bucket *b, struct svc_cacherep *rp)
222*4882a593Smuzhiyun {
223*4882a593Smuzhiyun rp->c_timestamp = jiffies;
224*4882a593Smuzhiyun list_move_tail(&rp->c_lru, &b->lru_head);
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun static long
prune_bucket(struct nfsd_drc_bucket * b,struct nfsd_net * nn)228*4882a593Smuzhiyun prune_bucket(struct nfsd_drc_bucket *b, struct nfsd_net *nn)
229*4882a593Smuzhiyun {
230*4882a593Smuzhiyun struct svc_cacherep *rp, *tmp;
231*4882a593Smuzhiyun long freed = 0;
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun list_for_each_entry_safe(rp, tmp, &b->lru_head, c_lru) {
234*4882a593Smuzhiyun /*
235*4882a593Smuzhiyun * Don't free entries attached to calls that are still
236*4882a593Smuzhiyun * in-progress, but do keep scanning the list.
237*4882a593Smuzhiyun */
238*4882a593Smuzhiyun if (rp->c_state == RC_INPROG)
239*4882a593Smuzhiyun continue;
240*4882a593Smuzhiyun if (atomic_read(&nn->num_drc_entries) <= nn->max_drc_entries &&
241*4882a593Smuzhiyun time_before(jiffies, rp->c_timestamp + RC_EXPIRE))
242*4882a593Smuzhiyun break;
243*4882a593Smuzhiyun nfsd_reply_cache_free_locked(b, rp, nn);
244*4882a593Smuzhiyun freed++;
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun return freed;
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun /*
250*4882a593Smuzhiyun * Walk the LRU list and prune off entries that are older than RC_EXPIRE.
251*4882a593Smuzhiyun * Also prune the oldest ones when the total exceeds the max number of entries.
252*4882a593Smuzhiyun */
253*4882a593Smuzhiyun static long
prune_cache_entries(struct nfsd_net * nn)254*4882a593Smuzhiyun prune_cache_entries(struct nfsd_net *nn)
255*4882a593Smuzhiyun {
256*4882a593Smuzhiyun unsigned int i;
257*4882a593Smuzhiyun long freed = 0;
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun for (i = 0; i < nn->drc_hashsize; i++) {
260*4882a593Smuzhiyun struct nfsd_drc_bucket *b = &nn->drc_hashtbl[i];
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun if (list_empty(&b->lru_head))
263*4882a593Smuzhiyun continue;
264*4882a593Smuzhiyun spin_lock(&b->cache_lock);
265*4882a593Smuzhiyun freed += prune_bucket(b, nn);
266*4882a593Smuzhiyun spin_unlock(&b->cache_lock);
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun return freed;
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun static unsigned long
nfsd_reply_cache_count(struct shrinker * shrink,struct shrink_control * sc)272*4882a593Smuzhiyun nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc)
273*4882a593Smuzhiyun {
274*4882a593Smuzhiyun struct nfsd_net *nn = container_of(shrink,
275*4882a593Smuzhiyun struct nfsd_net, nfsd_reply_cache_shrinker);
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun return atomic_read(&nn->num_drc_entries);
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun static unsigned long
nfsd_reply_cache_scan(struct shrinker * shrink,struct shrink_control * sc)281*4882a593Smuzhiyun nfsd_reply_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
282*4882a593Smuzhiyun {
283*4882a593Smuzhiyun struct nfsd_net *nn = container_of(shrink,
284*4882a593Smuzhiyun struct nfsd_net, nfsd_reply_cache_shrinker);
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun return prune_cache_entries(nn);
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun /*
289*4882a593Smuzhiyun * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes
290*4882a593Smuzhiyun */
291*4882a593Smuzhiyun static __wsum
nfsd_cache_csum(struct svc_rqst * rqstp)292*4882a593Smuzhiyun nfsd_cache_csum(struct svc_rqst *rqstp)
293*4882a593Smuzhiyun {
294*4882a593Smuzhiyun int idx;
295*4882a593Smuzhiyun unsigned int base;
296*4882a593Smuzhiyun __wsum csum;
297*4882a593Smuzhiyun struct xdr_buf *buf = &rqstp->rq_arg;
298*4882a593Smuzhiyun const unsigned char *p = buf->head[0].iov_base;
299*4882a593Smuzhiyun size_t csum_len = min_t(size_t, buf->head[0].iov_len + buf->page_len,
300*4882a593Smuzhiyun RC_CSUMLEN);
301*4882a593Smuzhiyun size_t len = min(buf->head[0].iov_len, csum_len);
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun /* rq_arg.head first */
304*4882a593Smuzhiyun csum = csum_partial(p, len, 0);
305*4882a593Smuzhiyun csum_len -= len;
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun /* Continue into page array */
308*4882a593Smuzhiyun idx = buf->page_base / PAGE_SIZE;
309*4882a593Smuzhiyun base = buf->page_base & ~PAGE_MASK;
310*4882a593Smuzhiyun while (csum_len) {
311*4882a593Smuzhiyun p = page_address(buf->pages[idx]) + base;
312*4882a593Smuzhiyun len = min_t(size_t, PAGE_SIZE - base, csum_len);
313*4882a593Smuzhiyun csum = csum_partial(p, len, csum);
314*4882a593Smuzhiyun csum_len -= len;
315*4882a593Smuzhiyun base = 0;
316*4882a593Smuzhiyun ++idx;
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun return csum;
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun static int
nfsd_cache_key_cmp(const struct svc_cacherep * key,const struct svc_cacherep * rp,struct nfsd_net * nn)322*4882a593Smuzhiyun nfsd_cache_key_cmp(const struct svc_cacherep *key,
323*4882a593Smuzhiyun const struct svc_cacherep *rp, struct nfsd_net *nn)
324*4882a593Smuzhiyun {
325*4882a593Smuzhiyun if (key->c_key.k_xid == rp->c_key.k_xid &&
326*4882a593Smuzhiyun key->c_key.k_csum != rp->c_key.k_csum) {
327*4882a593Smuzhiyun ++nn->payload_misses;
328*4882a593Smuzhiyun trace_nfsd_drc_mismatch(nn, key, rp);
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun return memcmp(&key->c_key, &rp->c_key, sizeof(key->c_key));
332*4882a593Smuzhiyun }
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun /*
335*4882a593Smuzhiyun * Search the request hash for an entry that matches the given rqstp.
336*4882a593Smuzhiyun * Must be called with cache_lock held. Returns the found entry or
337*4882a593Smuzhiyun * inserts an empty key on failure.
338*4882a593Smuzhiyun */
339*4882a593Smuzhiyun static struct svc_cacherep *
nfsd_cache_insert(struct nfsd_drc_bucket * b,struct svc_cacherep * key,struct nfsd_net * nn)340*4882a593Smuzhiyun nfsd_cache_insert(struct nfsd_drc_bucket *b, struct svc_cacherep *key,
341*4882a593Smuzhiyun struct nfsd_net *nn)
342*4882a593Smuzhiyun {
343*4882a593Smuzhiyun struct svc_cacherep *rp, *ret = key;
344*4882a593Smuzhiyun struct rb_node **p = &b->rb_head.rb_node,
345*4882a593Smuzhiyun *parent = NULL;
346*4882a593Smuzhiyun unsigned int entries = 0;
347*4882a593Smuzhiyun int cmp;
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun while (*p != NULL) {
350*4882a593Smuzhiyun ++entries;
351*4882a593Smuzhiyun parent = *p;
352*4882a593Smuzhiyun rp = rb_entry(parent, struct svc_cacherep, c_node);
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun cmp = nfsd_cache_key_cmp(key, rp, nn);
355*4882a593Smuzhiyun if (cmp < 0)
356*4882a593Smuzhiyun p = &parent->rb_left;
357*4882a593Smuzhiyun else if (cmp > 0)
358*4882a593Smuzhiyun p = &parent->rb_right;
359*4882a593Smuzhiyun else {
360*4882a593Smuzhiyun ret = rp;
361*4882a593Smuzhiyun goto out;
362*4882a593Smuzhiyun }
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun rb_link_node(&key->c_node, parent, p);
365*4882a593Smuzhiyun rb_insert_color(&key->c_node, &b->rb_head);
366*4882a593Smuzhiyun out:
367*4882a593Smuzhiyun /* tally hash chain length stats */
368*4882a593Smuzhiyun if (entries > nn->longest_chain) {
369*4882a593Smuzhiyun nn->longest_chain = entries;
370*4882a593Smuzhiyun nn->longest_chain_cachesize = atomic_read(&nn->num_drc_entries);
371*4882a593Smuzhiyun } else if (entries == nn->longest_chain) {
372*4882a593Smuzhiyun /* prefer to keep the smallest cachesize possible here */
373*4882a593Smuzhiyun nn->longest_chain_cachesize = min_t(unsigned int,
374*4882a593Smuzhiyun nn->longest_chain_cachesize,
375*4882a593Smuzhiyun atomic_read(&nn->num_drc_entries));
376*4882a593Smuzhiyun }
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun lru_put_end(b, ret);
379*4882a593Smuzhiyun return ret;
380*4882a593Smuzhiyun }
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun /**
383*4882a593Smuzhiyun * nfsd_cache_lookup - Find an entry in the duplicate reply cache
384*4882a593Smuzhiyun * @rqstp: Incoming Call to find
385*4882a593Smuzhiyun *
386*4882a593Smuzhiyun * Try to find an entry matching the current call in the cache. When none
387*4882a593Smuzhiyun * is found, we try to grab the oldest expired entry off the LRU list. If
388*4882a593Smuzhiyun * a suitable one isn't there, then drop the cache_lock and allocate a
389*4882a593Smuzhiyun * new one, then search again in case one got inserted while this thread
390*4882a593Smuzhiyun * didn't hold the lock.
391*4882a593Smuzhiyun *
392*4882a593Smuzhiyun * Return values:
393*4882a593Smuzhiyun * %RC_DOIT: Process the request normally
394*4882a593Smuzhiyun * %RC_REPLY: Reply from cache
395*4882a593Smuzhiyun * %RC_DROPIT: Do not process the request further
396*4882a593Smuzhiyun */
nfsd_cache_lookup(struct svc_rqst * rqstp)397*4882a593Smuzhiyun int nfsd_cache_lookup(struct svc_rqst *rqstp)
398*4882a593Smuzhiyun {
399*4882a593Smuzhiyun struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
400*4882a593Smuzhiyun struct svc_cacherep *rp, *found;
401*4882a593Smuzhiyun __be32 xid = rqstp->rq_xid;
402*4882a593Smuzhiyun __wsum csum;
403*4882a593Smuzhiyun u32 hash = nfsd_cache_hash(xid, nn);
404*4882a593Smuzhiyun struct nfsd_drc_bucket *b = &nn->drc_hashtbl[hash];
405*4882a593Smuzhiyun int type = rqstp->rq_cachetype;
406*4882a593Smuzhiyun int rtn = RC_DOIT;
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun rqstp->rq_cacherep = NULL;
409*4882a593Smuzhiyun if (type == RC_NOCACHE) {
410*4882a593Smuzhiyun nfsdstats.rcnocache++;
411*4882a593Smuzhiyun goto out;
412*4882a593Smuzhiyun }
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun csum = nfsd_cache_csum(rqstp);
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun /*
417*4882a593Smuzhiyun * Since the common case is a cache miss followed by an insert,
418*4882a593Smuzhiyun * preallocate an entry.
419*4882a593Smuzhiyun */
420*4882a593Smuzhiyun rp = nfsd_reply_cache_alloc(rqstp, csum, nn);
421*4882a593Smuzhiyun if (!rp)
422*4882a593Smuzhiyun goto out;
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun spin_lock(&b->cache_lock);
425*4882a593Smuzhiyun found = nfsd_cache_insert(b, rp, nn);
426*4882a593Smuzhiyun if (found != rp) {
427*4882a593Smuzhiyun nfsd_reply_cache_free_locked(NULL, rp, nn);
428*4882a593Smuzhiyun rp = found;
429*4882a593Smuzhiyun goto found_entry;
430*4882a593Smuzhiyun }
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun nfsdstats.rcmisses++;
433*4882a593Smuzhiyun rqstp->rq_cacherep = rp;
434*4882a593Smuzhiyun rp->c_state = RC_INPROG;
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun atomic_inc(&nn->num_drc_entries);
437*4882a593Smuzhiyun nn->drc_mem_usage += sizeof(*rp);
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun /* go ahead and prune the cache */
440*4882a593Smuzhiyun prune_bucket(b, nn);
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun out_unlock:
443*4882a593Smuzhiyun spin_unlock(&b->cache_lock);
444*4882a593Smuzhiyun out:
445*4882a593Smuzhiyun return rtn;
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun found_entry:
448*4882a593Smuzhiyun /* We found a matching entry which is either in progress or done. */
449*4882a593Smuzhiyun nfsdstats.rchits++;
450*4882a593Smuzhiyun rtn = RC_DROPIT;
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun /* Request being processed */
453*4882a593Smuzhiyun if (rp->c_state == RC_INPROG)
454*4882a593Smuzhiyun goto out_trace;
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun /* From the hall of fame of impractical attacks:
457*4882a593Smuzhiyun * Is this a user who tries to snoop on the cache? */
458*4882a593Smuzhiyun rtn = RC_DOIT;
459*4882a593Smuzhiyun if (!test_bit(RQ_SECURE, &rqstp->rq_flags) && rp->c_secure)
460*4882a593Smuzhiyun goto out_trace;
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun /* Compose RPC reply header */
463*4882a593Smuzhiyun switch (rp->c_type) {
464*4882a593Smuzhiyun case RC_NOCACHE:
465*4882a593Smuzhiyun break;
466*4882a593Smuzhiyun case RC_REPLSTAT:
467*4882a593Smuzhiyun svc_putu32(&rqstp->rq_res.head[0], rp->c_replstat);
468*4882a593Smuzhiyun rtn = RC_REPLY;
469*4882a593Smuzhiyun break;
470*4882a593Smuzhiyun case RC_REPLBUFF:
471*4882a593Smuzhiyun if (!nfsd_cache_append(rqstp, &rp->c_replvec))
472*4882a593Smuzhiyun goto out_unlock; /* should not happen */
473*4882a593Smuzhiyun rtn = RC_REPLY;
474*4882a593Smuzhiyun break;
475*4882a593Smuzhiyun default:
476*4882a593Smuzhiyun WARN_ONCE(1, "nfsd: bad repcache type %d\n", rp->c_type);
477*4882a593Smuzhiyun }
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun out_trace:
480*4882a593Smuzhiyun trace_nfsd_drc_found(nn, rqstp, rtn);
481*4882a593Smuzhiyun goto out_unlock;
482*4882a593Smuzhiyun }
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun /**
485*4882a593Smuzhiyun * nfsd_cache_update - Update an entry in the duplicate reply cache.
486*4882a593Smuzhiyun * @rqstp: svc_rqst with a finished Reply
487*4882a593Smuzhiyun * @cachetype: which cache to update
488*4882a593Smuzhiyun * @statp: Reply's status code
489*4882a593Smuzhiyun *
490*4882a593Smuzhiyun * This is called from nfsd_dispatch when the procedure has been
491*4882a593Smuzhiyun * executed and the complete reply is in rqstp->rq_res.
492*4882a593Smuzhiyun *
493*4882a593Smuzhiyun * We're copying around data here rather than swapping buffers because
494*4882a593Smuzhiyun * the toplevel loop requires max-sized buffers, which would be a waste
495*4882a593Smuzhiyun * of memory for a cache with a max reply size of 100 bytes (diropokres).
496*4882a593Smuzhiyun *
497*4882a593Smuzhiyun * If we should start to use different types of cache entries tailored
498*4882a593Smuzhiyun * specifically for attrstat and fh's, we may save even more space.
499*4882a593Smuzhiyun *
500*4882a593Smuzhiyun * Also note that a cachetype of RC_NOCACHE can legally be passed when
501*4882a593Smuzhiyun * nfsd failed to encode a reply that otherwise would have been cached.
502*4882a593Smuzhiyun * In this case, nfsd_cache_update is called with statp == NULL.
503*4882a593Smuzhiyun */
nfsd_cache_update(struct svc_rqst * rqstp,int cachetype,__be32 * statp)504*4882a593Smuzhiyun void nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
505*4882a593Smuzhiyun {
506*4882a593Smuzhiyun struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
507*4882a593Smuzhiyun struct svc_cacherep *rp = rqstp->rq_cacherep;
508*4882a593Smuzhiyun struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
509*4882a593Smuzhiyun u32 hash;
510*4882a593Smuzhiyun struct nfsd_drc_bucket *b;
511*4882a593Smuzhiyun int len;
512*4882a593Smuzhiyun size_t bufsize = 0;
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun if (!rp)
515*4882a593Smuzhiyun return;
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun hash = nfsd_cache_hash(rp->c_key.k_xid, nn);
518*4882a593Smuzhiyun b = &nn->drc_hashtbl[hash];
519*4882a593Smuzhiyun
520*4882a593Smuzhiyun len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
521*4882a593Smuzhiyun len >>= 2;
522*4882a593Smuzhiyun
523*4882a593Smuzhiyun /* Don't cache excessive amounts of data and XDR failures */
524*4882a593Smuzhiyun if (!statp || len > (256 >> 2)) {
525*4882a593Smuzhiyun nfsd_reply_cache_free(b, rp, nn);
526*4882a593Smuzhiyun return;
527*4882a593Smuzhiyun }
528*4882a593Smuzhiyun
529*4882a593Smuzhiyun switch (cachetype) {
530*4882a593Smuzhiyun case RC_REPLSTAT:
531*4882a593Smuzhiyun if (len != 1)
532*4882a593Smuzhiyun printk("nfsd: RC_REPLSTAT/reply len %d!\n",len);
533*4882a593Smuzhiyun rp->c_replstat = *statp;
534*4882a593Smuzhiyun break;
535*4882a593Smuzhiyun case RC_REPLBUFF:
536*4882a593Smuzhiyun cachv = &rp->c_replvec;
537*4882a593Smuzhiyun bufsize = len << 2;
538*4882a593Smuzhiyun cachv->iov_base = kmalloc(bufsize, GFP_KERNEL);
539*4882a593Smuzhiyun if (!cachv->iov_base) {
540*4882a593Smuzhiyun nfsd_reply_cache_free(b, rp, nn);
541*4882a593Smuzhiyun return;
542*4882a593Smuzhiyun }
543*4882a593Smuzhiyun cachv->iov_len = bufsize;
544*4882a593Smuzhiyun memcpy(cachv->iov_base, statp, bufsize);
545*4882a593Smuzhiyun break;
546*4882a593Smuzhiyun case RC_NOCACHE:
547*4882a593Smuzhiyun nfsd_reply_cache_free(b, rp, nn);
548*4882a593Smuzhiyun return;
549*4882a593Smuzhiyun }
550*4882a593Smuzhiyun spin_lock(&b->cache_lock);
551*4882a593Smuzhiyun nn->drc_mem_usage += bufsize;
552*4882a593Smuzhiyun lru_put_end(b, rp);
553*4882a593Smuzhiyun rp->c_secure = test_bit(RQ_SECURE, &rqstp->rq_flags);
554*4882a593Smuzhiyun rp->c_type = cachetype;
555*4882a593Smuzhiyun rp->c_state = RC_DONE;
556*4882a593Smuzhiyun spin_unlock(&b->cache_lock);
557*4882a593Smuzhiyun return;
558*4882a593Smuzhiyun }
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun /*
561*4882a593Smuzhiyun * Copy cached reply to current reply buffer. Should always fit.
562*4882a593Smuzhiyun * FIXME as reply is in a page, we should just attach the page, and
563*4882a593Smuzhiyun * keep a refcount....
564*4882a593Smuzhiyun */
565*4882a593Smuzhiyun static int
nfsd_cache_append(struct svc_rqst * rqstp,struct kvec * data)566*4882a593Smuzhiyun nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data)
567*4882a593Smuzhiyun {
568*4882a593Smuzhiyun struct kvec *vec = &rqstp->rq_res.head[0];
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun if (vec->iov_len + data->iov_len > PAGE_SIZE) {
571*4882a593Smuzhiyun printk(KERN_WARNING "nfsd: cached reply too large (%zd).\n",
572*4882a593Smuzhiyun data->iov_len);
573*4882a593Smuzhiyun return 0;
574*4882a593Smuzhiyun }
575*4882a593Smuzhiyun memcpy((char*)vec->iov_base + vec->iov_len, data->iov_base, data->iov_len);
576*4882a593Smuzhiyun vec->iov_len += data->iov_len;
577*4882a593Smuzhiyun return 1;
578*4882a593Smuzhiyun }
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun /*
581*4882a593Smuzhiyun * Note that fields may be added, removed or reordered in the future. Programs
582*4882a593Smuzhiyun * scraping this file for info should test the labels to ensure they're
583*4882a593Smuzhiyun * getting the correct field.
584*4882a593Smuzhiyun */
nfsd_reply_cache_stats_show(struct seq_file * m,void * v)585*4882a593Smuzhiyun static int nfsd_reply_cache_stats_show(struct seq_file *m, void *v)
586*4882a593Smuzhiyun {
587*4882a593Smuzhiyun struct nfsd_net *nn = m->private;
588*4882a593Smuzhiyun
589*4882a593Smuzhiyun seq_printf(m, "max entries: %u\n", nn->max_drc_entries);
590*4882a593Smuzhiyun seq_printf(m, "num entries: %u\n",
591*4882a593Smuzhiyun atomic_read(&nn->num_drc_entries));
592*4882a593Smuzhiyun seq_printf(m, "hash buckets: %u\n", 1 << nn->maskbits);
593*4882a593Smuzhiyun seq_printf(m, "mem usage: %u\n", nn->drc_mem_usage);
594*4882a593Smuzhiyun seq_printf(m, "cache hits: %u\n", nfsdstats.rchits);
595*4882a593Smuzhiyun seq_printf(m, "cache misses: %u\n", nfsdstats.rcmisses);
596*4882a593Smuzhiyun seq_printf(m, "not cached: %u\n", nfsdstats.rcnocache);
597*4882a593Smuzhiyun seq_printf(m, "payload misses: %u\n", nn->payload_misses);
598*4882a593Smuzhiyun seq_printf(m, "longest chain len: %u\n", nn->longest_chain);
599*4882a593Smuzhiyun seq_printf(m, "cachesize at longest: %u\n", nn->longest_chain_cachesize);
600*4882a593Smuzhiyun return 0;
601*4882a593Smuzhiyun }
602*4882a593Smuzhiyun
nfsd_reply_cache_stats_open(struct inode * inode,struct file * file)603*4882a593Smuzhiyun int nfsd_reply_cache_stats_open(struct inode *inode, struct file *file)
604*4882a593Smuzhiyun {
605*4882a593Smuzhiyun struct nfsd_net *nn = net_generic(file_inode(file)->i_sb->s_fs_info,
606*4882a593Smuzhiyun nfsd_net_id);
607*4882a593Smuzhiyun
608*4882a593Smuzhiyun return single_open(file, nfsd_reply_cache_stats_show, nn);
609*4882a593Smuzhiyun }
610