1 /*
2 * Copyright (c) 2001 The Regents of the University of Michigan.
3 * All rights reserved.
4 *
5 * Kendrick Smith <kmsmith@umich.edu>
6 * Andy Adamson <kandros@umich.edu>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of the University nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
22 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
28 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35 #include <linux/file.h>
36 #include <linux/fs.h>
37 #include <linux/slab.h>
38 #include <linux/namei.h>
39 #include <linux/swap.h>
40 #include <linux/pagemap.h>
41 #include <linux/ratelimit.h>
42 #include <linux/sunrpc/svcauth_gss.h>
43 #include <linux/sunrpc/addr.h>
44 #include <linux/jhash.h>
45 #include <linux/string_helpers.h>
46 #include "xdr4.h"
47 #include "xdr4cb.h"
48 #include "vfs.h"
49 #include "current_stateid.h"
50
51 #include "netns.h"
52 #include "pnfs.h"
53 #include "filecache.h"
54 #include "trace.h"
55
56 #define NFSDDBG_FACILITY NFSDDBG_PROC
57
58 #define all_ones {{~0,~0},~0}
59 static const stateid_t one_stateid = {
60 .si_generation = ~0,
61 .si_opaque = all_ones,
62 };
63 static const stateid_t zero_stateid = {
64 /* all fields zero */
65 };
66 static const stateid_t currentstateid = {
67 .si_generation = 1,
68 };
69 static const stateid_t close_stateid = {
70 .si_generation = 0xffffffffU,
71 };
72
73 static u64 current_sessionid = 1;
74
75 #define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
76 #define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
77 #define CURRENT_STATEID(stateid) (!memcmp((stateid), ¤tstateid, sizeof(stateid_t)))
78 #define CLOSE_STATEID(stateid) (!memcmp((stateid), &close_stateid, sizeof(stateid_t)))
79
80 /* forward declarations */
81 static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner);
82 static void nfs4_free_ol_stateid(struct nfs4_stid *stid);
83 void nfsd4_end_grace(struct nfsd_net *nn);
84 static void _free_cpntf_state_locked(struct nfsd_net *nn, struct nfs4_cpntf_state *cps);
85
86 /* Locking: */
87
88 /*
89 * Currently used for the del_recall_lru and file hash table. In an
90 * effort to decrease the scope of the client_mutex, this spinlock may
91 * eventually cover more:
92 */
93 static DEFINE_SPINLOCK(state_lock);
94
95 enum nfsd4_st_mutex_lock_subclass {
96 OPEN_STATEID_MUTEX = 0,
97 LOCK_STATEID_MUTEX = 1,
98 };
99
100 /*
101 * A waitqueue for all in-progress 4.0 CLOSE operations that are waiting for
102 * the refcount on the open stateid to drop.
103 */
104 static DECLARE_WAIT_QUEUE_HEAD(close_wq);
105
106 /*
107 * A waitqueue where a writer to clients/#/ctl destroying a client can
108 * wait for cl_rpc_users to drop to 0 and then for the client to be
109 * unhashed.
110 */
111 static DECLARE_WAIT_QUEUE_HEAD(expiry_wq);
112
113 static struct kmem_cache *client_slab;
114 static struct kmem_cache *openowner_slab;
115 static struct kmem_cache *lockowner_slab;
116 static struct kmem_cache *file_slab;
117 static struct kmem_cache *stateid_slab;
118 static struct kmem_cache *deleg_slab;
119 static struct kmem_cache *odstate_slab;
120
121 static void free_session(struct nfsd4_session *);
122
123 static const struct nfsd4_callback_ops nfsd4_cb_recall_ops;
124 static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops;
125
is_session_dead(struct nfsd4_session * ses)126 static bool is_session_dead(struct nfsd4_session *ses)
127 {
128 return ses->se_flags & NFS4_SESSION_DEAD;
129 }
130
mark_session_dead_locked(struct nfsd4_session * ses,int ref_held_by_me)131 static __be32 mark_session_dead_locked(struct nfsd4_session *ses, int ref_held_by_me)
132 {
133 if (atomic_read(&ses->se_ref) > ref_held_by_me)
134 return nfserr_jukebox;
135 ses->se_flags |= NFS4_SESSION_DEAD;
136 return nfs_ok;
137 }
138
is_client_expired(struct nfs4_client * clp)139 static bool is_client_expired(struct nfs4_client *clp)
140 {
141 return clp->cl_time == 0;
142 }
143
get_client_locked(struct nfs4_client * clp)144 static __be32 get_client_locked(struct nfs4_client *clp)
145 {
146 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
147
148 lockdep_assert_held(&nn->client_lock);
149
150 if (is_client_expired(clp))
151 return nfserr_expired;
152 atomic_inc(&clp->cl_rpc_users);
153 return nfs_ok;
154 }
155
156 /* must be called under the client_lock */
157 static inline void
renew_client_locked(struct nfs4_client * clp)158 renew_client_locked(struct nfs4_client *clp)
159 {
160 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
161
162 if (is_client_expired(clp)) {
163 WARN_ON(1);
164 printk("%s: client (clientid %08x/%08x) already expired\n",
165 __func__,
166 clp->cl_clientid.cl_boot,
167 clp->cl_clientid.cl_id);
168 return;
169 }
170
171 list_move_tail(&clp->cl_lru, &nn->client_lru);
172 clp->cl_time = ktime_get_boottime_seconds();
173 }
174
put_client_renew_locked(struct nfs4_client * clp)175 static void put_client_renew_locked(struct nfs4_client *clp)
176 {
177 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
178
179 lockdep_assert_held(&nn->client_lock);
180
181 if (!atomic_dec_and_test(&clp->cl_rpc_users))
182 return;
183 if (!is_client_expired(clp))
184 renew_client_locked(clp);
185 else
186 wake_up_all(&expiry_wq);
187 }
188
put_client_renew(struct nfs4_client * clp)189 static void put_client_renew(struct nfs4_client *clp)
190 {
191 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
192
193 if (!atomic_dec_and_lock(&clp->cl_rpc_users, &nn->client_lock))
194 return;
195 if (!is_client_expired(clp))
196 renew_client_locked(clp);
197 else
198 wake_up_all(&expiry_wq);
199 spin_unlock(&nn->client_lock);
200 }
201
nfsd4_get_session_locked(struct nfsd4_session * ses)202 static __be32 nfsd4_get_session_locked(struct nfsd4_session *ses)
203 {
204 __be32 status;
205
206 if (is_session_dead(ses))
207 return nfserr_badsession;
208 status = get_client_locked(ses->se_client);
209 if (status)
210 return status;
211 atomic_inc(&ses->se_ref);
212 return nfs_ok;
213 }
214
nfsd4_put_session_locked(struct nfsd4_session * ses)215 static void nfsd4_put_session_locked(struct nfsd4_session *ses)
216 {
217 struct nfs4_client *clp = ses->se_client;
218 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
219
220 lockdep_assert_held(&nn->client_lock);
221
222 if (atomic_dec_and_test(&ses->se_ref) && is_session_dead(ses))
223 free_session(ses);
224 put_client_renew_locked(clp);
225 }
226
nfsd4_put_session(struct nfsd4_session * ses)227 static void nfsd4_put_session(struct nfsd4_session *ses)
228 {
229 struct nfs4_client *clp = ses->se_client;
230 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
231
232 spin_lock(&nn->client_lock);
233 nfsd4_put_session_locked(ses);
234 spin_unlock(&nn->client_lock);
235 }
236
237 static struct nfsd4_blocked_lock *
find_blocked_lock(struct nfs4_lockowner * lo,struct knfsd_fh * fh,struct nfsd_net * nn)238 find_blocked_lock(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
239 struct nfsd_net *nn)
240 {
241 struct nfsd4_blocked_lock *cur, *found = NULL;
242
243 spin_lock(&nn->blocked_locks_lock);
244 list_for_each_entry(cur, &lo->lo_blocked, nbl_list) {
245 if (fh_match(fh, &cur->nbl_fh)) {
246 list_del_init(&cur->nbl_list);
247 list_del_init(&cur->nbl_lru);
248 found = cur;
249 break;
250 }
251 }
252 spin_unlock(&nn->blocked_locks_lock);
253 if (found)
254 locks_delete_block(&found->nbl_lock);
255 return found;
256 }
257
258 static struct nfsd4_blocked_lock *
find_or_allocate_block(struct nfs4_lockowner * lo,struct knfsd_fh * fh,struct nfsd_net * nn)259 find_or_allocate_block(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
260 struct nfsd_net *nn)
261 {
262 struct nfsd4_blocked_lock *nbl;
263
264 nbl = find_blocked_lock(lo, fh, nn);
265 if (!nbl) {
266 nbl= kmalloc(sizeof(*nbl), GFP_KERNEL);
267 if (nbl) {
268 INIT_LIST_HEAD(&nbl->nbl_list);
269 INIT_LIST_HEAD(&nbl->nbl_lru);
270 fh_copy_shallow(&nbl->nbl_fh, fh);
271 locks_init_lock(&nbl->nbl_lock);
272 nfsd4_init_cb(&nbl->nbl_cb, lo->lo_owner.so_client,
273 &nfsd4_cb_notify_lock_ops,
274 NFSPROC4_CLNT_CB_NOTIFY_LOCK);
275 }
276 }
277 return nbl;
278 }
279
280 static void
free_blocked_lock(struct nfsd4_blocked_lock * nbl)281 free_blocked_lock(struct nfsd4_blocked_lock *nbl)
282 {
283 locks_delete_block(&nbl->nbl_lock);
284 locks_release_private(&nbl->nbl_lock);
285 kfree(nbl);
286 }
287
288 static void
remove_blocked_locks(struct nfs4_lockowner * lo)289 remove_blocked_locks(struct nfs4_lockowner *lo)
290 {
291 struct nfs4_client *clp = lo->lo_owner.so_client;
292 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
293 struct nfsd4_blocked_lock *nbl;
294 LIST_HEAD(reaplist);
295
296 /* Dequeue all blocked locks */
297 spin_lock(&nn->blocked_locks_lock);
298 while (!list_empty(&lo->lo_blocked)) {
299 nbl = list_first_entry(&lo->lo_blocked,
300 struct nfsd4_blocked_lock,
301 nbl_list);
302 list_del_init(&nbl->nbl_list);
303 list_move(&nbl->nbl_lru, &reaplist);
304 }
305 spin_unlock(&nn->blocked_locks_lock);
306
307 /* Now free them */
308 while (!list_empty(&reaplist)) {
309 nbl = list_first_entry(&reaplist, struct nfsd4_blocked_lock,
310 nbl_lru);
311 list_del_init(&nbl->nbl_lru);
312 free_blocked_lock(nbl);
313 }
314 }
315
316 static void
nfsd4_cb_notify_lock_prepare(struct nfsd4_callback * cb)317 nfsd4_cb_notify_lock_prepare(struct nfsd4_callback *cb)
318 {
319 struct nfsd4_blocked_lock *nbl = container_of(cb,
320 struct nfsd4_blocked_lock, nbl_cb);
321 locks_delete_block(&nbl->nbl_lock);
322 }
323
324 static int
nfsd4_cb_notify_lock_done(struct nfsd4_callback * cb,struct rpc_task * task)325 nfsd4_cb_notify_lock_done(struct nfsd4_callback *cb, struct rpc_task *task)
326 {
327 /*
328 * Since this is just an optimization, we don't try very hard if it
329 * turns out not to succeed. We'll requeue it on NFS4ERR_DELAY, and
330 * just quit trying on anything else.
331 */
332 switch (task->tk_status) {
333 case -NFS4ERR_DELAY:
334 rpc_delay(task, 1 * HZ);
335 return 0;
336 default:
337 return 1;
338 }
339 }
340
341 static void
nfsd4_cb_notify_lock_release(struct nfsd4_callback * cb)342 nfsd4_cb_notify_lock_release(struct nfsd4_callback *cb)
343 {
344 struct nfsd4_blocked_lock *nbl = container_of(cb,
345 struct nfsd4_blocked_lock, nbl_cb);
346
347 free_blocked_lock(nbl);
348 }
349
350 static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops = {
351 .prepare = nfsd4_cb_notify_lock_prepare,
352 .done = nfsd4_cb_notify_lock_done,
353 .release = nfsd4_cb_notify_lock_release,
354 };
355
356 static inline struct nfs4_stateowner *
nfs4_get_stateowner(struct nfs4_stateowner * sop)357 nfs4_get_stateowner(struct nfs4_stateowner *sop)
358 {
359 atomic_inc(&sop->so_count);
360 return sop;
361 }
362
363 static int
same_owner_str(struct nfs4_stateowner * sop,struct xdr_netobj * owner)364 same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner)
365 {
366 return (sop->so_owner.len == owner->len) &&
367 0 == memcmp(sop->so_owner.data, owner->data, owner->len);
368 }
369
370 static struct nfs4_openowner *
find_openstateowner_str_locked(unsigned int hashval,struct nfsd4_open * open,struct nfs4_client * clp)371 find_openstateowner_str_locked(unsigned int hashval, struct nfsd4_open *open,
372 struct nfs4_client *clp)
373 {
374 struct nfs4_stateowner *so;
375
376 lockdep_assert_held(&clp->cl_lock);
377
378 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[hashval],
379 so_strhash) {
380 if (!so->so_is_open_owner)
381 continue;
382 if (same_owner_str(so, &open->op_owner))
383 return openowner(nfs4_get_stateowner(so));
384 }
385 return NULL;
386 }
387
388 static struct nfs4_openowner *
find_openstateowner_str(unsigned int hashval,struct nfsd4_open * open,struct nfs4_client * clp)389 find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open,
390 struct nfs4_client *clp)
391 {
392 struct nfs4_openowner *oo;
393
394 spin_lock(&clp->cl_lock);
395 oo = find_openstateowner_str_locked(hashval, open, clp);
396 spin_unlock(&clp->cl_lock);
397 return oo;
398 }
399
400 static inline u32
opaque_hashval(const void * ptr,int nbytes)401 opaque_hashval(const void *ptr, int nbytes)
402 {
403 unsigned char *cptr = (unsigned char *) ptr;
404
405 u32 x = 0;
406 while (nbytes--) {
407 x *= 37;
408 x += *cptr++;
409 }
410 return x;
411 }
412
nfsd4_free_file_rcu(struct rcu_head * rcu)413 static void nfsd4_free_file_rcu(struct rcu_head *rcu)
414 {
415 struct nfs4_file *fp = container_of(rcu, struct nfs4_file, fi_rcu);
416
417 kmem_cache_free(file_slab, fp);
418 }
419
420 void
put_nfs4_file(struct nfs4_file * fi)421 put_nfs4_file(struct nfs4_file *fi)
422 {
423 might_lock(&state_lock);
424
425 if (refcount_dec_and_lock(&fi->fi_ref, &state_lock)) {
426 hlist_del_rcu(&fi->fi_hash);
427 spin_unlock(&state_lock);
428 WARN_ON_ONCE(!list_empty(&fi->fi_clnt_odstate));
429 WARN_ON_ONCE(!list_empty(&fi->fi_delegations));
430 call_rcu(&fi->fi_rcu, nfsd4_free_file_rcu);
431 }
432 }
433
434 static struct nfsd_file *
__nfs4_get_fd(struct nfs4_file * f,int oflag)435 __nfs4_get_fd(struct nfs4_file *f, int oflag)
436 {
437 if (f->fi_fds[oflag])
438 return nfsd_file_get(f->fi_fds[oflag]);
439 return NULL;
440 }
441
442 static struct nfsd_file *
find_writeable_file_locked(struct nfs4_file * f)443 find_writeable_file_locked(struct nfs4_file *f)
444 {
445 struct nfsd_file *ret;
446
447 lockdep_assert_held(&f->fi_lock);
448
449 ret = __nfs4_get_fd(f, O_WRONLY);
450 if (!ret)
451 ret = __nfs4_get_fd(f, O_RDWR);
452 return ret;
453 }
454
455 static struct nfsd_file *
find_writeable_file(struct nfs4_file * f)456 find_writeable_file(struct nfs4_file *f)
457 {
458 struct nfsd_file *ret;
459
460 spin_lock(&f->fi_lock);
461 ret = find_writeable_file_locked(f);
462 spin_unlock(&f->fi_lock);
463
464 return ret;
465 }
466
467 static struct nfsd_file *
find_readable_file_locked(struct nfs4_file * f)468 find_readable_file_locked(struct nfs4_file *f)
469 {
470 struct nfsd_file *ret;
471
472 lockdep_assert_held(&f->fi_lock);
473
474 ret = __nfs4_get_fd(f, O_RDONLY);
475 if (!ret)
476 ret = __nfs4_get_fd(f, O_RDWR);
477 return ret;
478 }
479
480 static struct nfsd_file *
find_readable_file(struct nfs4_file * f)481 find_readable_file(struct nfs4_file *f)
482 {
483 struct nfsd_file *ret;
484
485 spin_lock(&f->fi_lock);
486 ret = find_readable_file_locked(f);
487 spin_unlock(&f->fi_lock);
488
489 return ret;
490 }
491
492 struct nfsd_file *
find_any_file(struct nfs4_file * f)493 find_any_file(struct nfs4_file *f)
494 {
495 struct nfsd_file *ret;
496
497 if (!f)
498 return NULL;
499 spin_lock(&f->fi_lock);
500 ret = __nfs4_get_fd(f, O_RDWR);
501 if (!ret) {
502 ret = __nfs4_get_fd(f, O_WRONLY);
503 if (!ret)
504 ret = __nfs4_get_fd(f, O_RDONLY);
505 }
506 spin_unlock(&f->fi_lock);
507 return ret;
508 }
509
find_deleg_file(struct nfs4_file * f)510 static struct nfsd_file *find_deleg_file(struct nfs4_file *f)
511 {
512 struct nfsd_file *ret = NULL;
513
514 spin_lock(&f->fi_lock);
515 if (f->fi_deleg_file)
516 ret = nfsd_file_get(f->fi_deleg_file);
517 spin_unlock(&f->fi_lock);
518 return ret;
519 }
520
521 static atomic_long_t num_delegations;
522 unsigned long max_delegations;
523
524 /*
525 * Open owner state (share locks)
526 */
527
528 /* hash tables for lock and open owners */
529 #define OWNER_HASH_BITS 8
530 #define OWNER_HASH_SIZE (1 << OWNER_HASH_BITS)
531 #define OWNER_HASH_MASK (OWNER_HASH_SIZE - 1)
532
ownerstr_hashval(struct xdr_netobj * ownername)533 static unsigned int ownerstr_hashval(struct xdr_netobj *ownername)
534 {
535 unsigned int ret;
536
537 ret = opaque_hashval(ownername->data, ownername->len);
538 return ret & OWNER_HASH_MASK;
539 }
540
541 /* hash table for nfs4_file */
542 #define FILE_HASH_BITS 8
543 #define FILE_HASH_SIZE (1 << FILE_HASH_BITS)
544
nfsd_fh_hashval(struct knfsd_fh * fh)545 static unsigned int nfsd_fh_hashval(struct knfsd_fh *fh)
546 {
547 return jhash2(fh->fh_base.fh_pad, XDR_QUADLEN(fh->fh_size), 0);
548 }
549
file_hashval(struct knfsd_fh * fh)550 static unsigned int file_hashval(struct knfsd_fh *fh)
551 {
552 return nfsd_fh_hashval(fh) & (FILE_HASH_SIZE - 1);
553 }
554
555 static struct hlist_head file_hashtbl[FILE_HASH_SIZE];
556
557 static void
__nfs4_file_get_access(struct nfs4_file * fp,u32 access)558 __nfs4_file_get_access(struct nfs4_file *fp, u32 access)
559 {
560 lockdep_assert_held(&fp->fi_lock);
561
562 if (access & NFS4_SHARE_ACCESS_WRITE)
563 atomic_inc(&fp->fi_access[O_WRONLY]);
564 if (access & NFS4_SHARE_ACCESS_READ)
565 atomic_inc(&fp->fi_access[O_RDONLY]);
566 }
567
568 static __be32
nfs4_file_get_access(struct nfs4_file * fp,u32 access)569 nfs4_file_get_access(struct nfs4_file *fp, u32 access)
570 {
571 lockdep_assert_held(&fp->fi_lock);
572
573 /* Does this access mode make sense? */
574 if (access & ~NFS4_SHARE_ACCESS_BOTH)
575 return nfserr_inval;
576
577 /* Does it conflict with a deny mode already set? */
578 if ((access & fp->fi_share_deny) != 0)
579 return nfserr_share_denied;
580
581 __nfs4_file_get_access(fp, access);
582 return nfs_ok;
583 }
584
nfs4_file_check_deny(struct nfs4_file * fp,u32 deny)585 static __be32 nfs4_file_check_deny(struct nfs4_file *fp, u32 deny)
586 {
587 /* Common case is that there is no deny mode. */
588 if (deny) {
589 /* Does this deny mode make sense? */
590 if (deny & ~NFS4_SHARE_DENY_BOTH)
591 return nfserr_inval;
592
593 if ((deny & NFS4_SHARE_DENY_READ) &&
594 atomic_read(&fp->fi_access[O_RDONLY]))
595 return nfserr_share_denied;
596
597 if ((deny & NFS4_SHARE_DENY_WRITE) &&
598 atomic_read(&fp->fi_access[O_WRONLY]))
599 return nfserr_share_denied;
600 }
601 return nfs_ok;
602 }
603
__nfs4_file_put_access(struct nfs4_file * fp,int oflag)604 static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag)
605 {
606 might_lock(&fp->fi_lock);
607
608 if (atomic_dec_and_lock(&fp->fi_access[oflag], &fp->fi_lock)) {
609 struct nfsd_file *f1 = NULL;
610 struct nfsd_file *f2 = NULL;
611
612 swap(f1, fp->fi_fds[oflag]);
613 if (atomic_read(&fp->fi_access[1 - oflag]) == 0)
614 swap(f2, fp->fi_fds[O_RDWR]);
615 spin_unlock(&fp->fi_lock);
616 if (f1)
617 nfsd_file_put(f1);
618 if (f2)
619 nfsd_file_put(f2);
620 }
621 }
622
nfs4_file_put_access(struct nfs4_file * fp,u32 access)623 static void nfs4_file_put_access(struct nfs4_file *fp, u32 access)
624 {
625 WARN_ON_ONCE(access & ~NFS4_SHARE_ACCESS_BOTH);
626
627 if (access & NFS4_SHARE_ACCESS_WRITE)
628 __nfs4_file_put_access(fp, O_WRONLY);
629 if (access & NFS4_SHARE_ACCESS_READ)
630 __nfs4_file_put_access(fp, O_RDONLY);
631 }
632
633 /*
634 * Allocate a new open/delegation state counter. This is needed for
635 * pNFS for proper return on close semantics.
636 *
637 * Note that we only allocate it for pNFS-enabled exports, otherwise
638 * all pointers to struct nfs4_clnt_odstate are always NULL.
639 */
640 static struct nfs4_clnt_odstate *
alloc_clnt_odstate(struct nfs4_client * clp)641 alloc_clnt_odstate(struct nfs4_client *clp)
642 {
643 struct nfs4_clnt_odstate *co;
644
645 co = kmem_cache_zalloc(odstate_slab, GFP_KERNEL);
646 if (co) {
647 co->co_client = clp;
648 refcount_set(&co->co_odcount, 1);
649 }
650 return co;
651 }
652
653 static void
hash_clnt_odstate_locked(struct nfs4_clnt_odstate * co)654 hash_clnt_odstate_locked(struct nfs4_clnt_odstate *co)
655 {
656 struct nfs4_file *fp = co->co_file;
657
658 lockdep_assert_held(&fp->fi_lock);
659 list_add(&co->co_perfile, &fp->fi_clnt_odstate);
660 }
661
662 static inline void
get_clnt_odstate(struct nfs4_clnt_odstate * co)663 get_clnt_odstate(struct nfs4_clnt_odstate *co)
664 {
665 if (co)
666 refcount_inc(&co->co_odcount);
667 }
668
669 static void
put_clnt_odstate(struct nfs4_clnt_odstate * co)670 put_clnt_odstate(struct nfs4_clnt_odstate *co)
671 {
672 struct nfs4_file *fp;
673
674 if (!co)
675 return;
676
677 fp = co->co_file;
678 if (refcount_dec_and_lock(&co->co_odcount, &fp->fi_lock)) {
679 list_del(&co->co_perfile);
680 spin_unlock(&fp->fi_lock);
681
682 nfsd4_return_all_file_layouts(co->co_client, fp);
683 kmem_cache_free(odstate_slab, co);
684 }
685 }
686
687 static struct nfs4_clnt_odstate *
find_or_hash_clnt_odstate(struct nfs4_file * fp,struct nfs4_clnt_odstate * new)688 find_or_hash_clnt_odstate(struct nfs4_file *fp, struct nfs4_clnt_odstate *new)
689 {
690 struct nfs4_clnt_odstate *co;
691 struct nfs4_client *cl;
692
693 if (!new)
694 return NULL;
695
696 cl = new->co_client;
697
698 spin_lock(&fp->fi_lock);
699 list_for_each_entry(co, &fp->fi_clnt_odstate, co_perfile) {
700 if (co->co_client == cl) {
701 get_clnt_odstate(co);
702 goto out;
703 }
704 }
705 co = new;
706 co->co_file = fp;
707 hash_clnt_odstate_locked(new);
708 out:
709 spin_unlock(&fp->fi_lock);
710 return co;
711 }
712
nfs4_alloc_stid(struct nfs4_client * cl,struct kmem_cache * slab,void (* sc_free)(struct nfs4_stid *))713 struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab,
714 void (*sc_free)(struct nfs4_stid *))
715 {
716 struct nfs4_stid *stid;
717 int new_id;
718
719 stid = kmem_cache_zalloc(slab, GFP_KERNEL);
720 if (!stid)
721 return NULL;
722
723 idr_preload(GFP_KERNEL);
724 spin_lock(&cl->cl_lock);
725 /* Reserving 0 for start of file in nfsdfs "states" file: */
726 new_id = idr_alloc_cyclic(&cl->cl_stateids, stid, 1, 0, GFP_NOWAIT);
727 spin_unlock(&cl->cl_lock);
728 idr_preload_end();
729 if (new_id < 0)
730 goto out_free;
731
732 stid->sc_free = sc_free;
733 stid->sc_client = cl;
734 stid->sc_stateid.si_opaque.so_id = new_id;
735 stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid;
736 /* Will be incremented before return to client: */
737 refcount_set(&stid->sc_count, 1);
738 spin_lock_init(&stid->sc_lock);
739 INIT_LIST_HEAD(&stid->sc_cp_list);
740
741 /*
742 * It shouldn't be a problem to reuse an opaque stateid value.
743 * I don't think it is for 4.1. But with 4.0 I worry that, for
744 * example, a stray write retransmission could be accepted by
745 * the server when it should have been rejected. Therefore,
746 * adopt a trick from the sctp code to attempt to maximize the
747 * amount of time until an id is reused, by ensuring they always
748 * "increase" (mod INT_MAX):
749 */
750 return stid;
751 out_free:
752 kmem_cache_free(slab, stid);
753 return NULL;
754 }
755
756 /*
757 * Create a unique stateid_t to represent each COPY.
758 */
nfs4_init_cp_state(struct nfsd_net * nn,copy_stateid_t * stid,unsigned char sc_type)759 static int nfs4_init_cp_state(struct nfsd_net *nn, copy_stateid_t *stid,
760 unsigned char sc_type)
761 {
762 int new_id;
763
764 stid->stid.si_opaque.so_clid.cl_boot = (u32)nn->boot_time;
765 stid->stid.si_opaque.so_clid.cl_id = nn->s2s_cp_cl_id;
766 stid->sc_type = sc_type;
767
768 idr_preload(GFP_KERNEL);
769 spin_lock(&nn->s2s_cp_lock);
770 new_id = idr_alloc_cyclic(&nn->s2s_cp_stateids, stid, 0, 0, GFP_NOWAIT);
771 stid->stid.si_opaque.so_id = new_id;
772 stid->stid.si_generation = 1;
773 spin_unlock(&nn->s2s_cp_lock);
774 idr_preload_end();
775 if (new_id < 0)
776 return 0;
777 return 1;
778 }
779
nfs4_init_copy_state(struct nfsd_net * nn,struct nfsd4_copy * copy)780 int nfs4_init_copy_state(struct nfsd_net *nn, struct nfsd4_copy *copy)
781 {
782 return nfs4_init_cp_state(nn, ©->cp_stateid, NFS4_COPY_STID);
783 }
784
nfs4_alloc_init_cpntf_state(struct nfsd_net * nn,struct nfs4_stid * p_stid)785 struct nfs4_cpntf_state *nfs4_alloc_init_cpntf_state(struct nfsd_net *nn,
786 struct nfs4_stid *p_stid)
787 {
788 struct nfs4_cpntf_state *cps;
789
790 cps = kzalloc(sizeof(struct nfs4_cpntf_state), GFP_KERNEL);
791 if (!cps)
792 return NULL;
793 cps->cpntf_time = ktime_get_boottime_seconds();
794 refcount_set(&cps->cp_stateid.sc_count, 1);
795 if (!nfs4_init_cp_state(nn, &cps->cp_stateid, NFS4_COPYNOTIFY_STID))
796 goto out_free;
797 spin_lock(&nn->s2s_cp_lock);
798 list_add(&cps->cp_list, &p_stid->sc_cp_list);
799 spin_unlock(&nn->s2s_cp_lock);
800 return cps;
801 out_free:
802 kfree(cps);
803 return NULL;
804 }
805
nfs4_free_copy_state(struct nfsd4_copy * copy)806 void nfs4_free_copy_state(struct nfsd4_copy *copy)
807 {
808 struct nfsd_net *nn;
809
810 WARN_ON_ONCE(copy->cp_stateid.sc_type != NFS4_COPY_STID);
811 nn = net_generic(copy->cp_clp->net, nfsd_net_id);
812 spin_lock(&nn->s2s_cp_lock);
813 idr_remove(&nn->s2s_cp_stateids,
814 copy->cp_stateid.stid.si_opaque.so_id);
815 spin_unlock(&nn->s2s_cp_lock);
816 }
817
nfs4_free_cpntf_statelist(struct net * net,struct nfs4_stid * stid)818 static void nfs4_free_cpntf_statelist(struct net *net, struct nfs4_stid *stid)
819 {
820 struct nfs4_cpntf_state *cps;
821 struct nfsd_net *nn;
822
823 nn = net_generic(net, nfsd_net_id);
824 spin_lock(&nn->s2s_cp_lock);
825 while (!list_empty(&stid->sc_cp_list)) {
826 cps = list_first_entry(&stid->sc_cp_list,
827 struct nfs4_cpntf_state, cp_list);
828 _free_cpntf_state_locked(nn, cps);
829 }
830 spin_unlock(&nn->s2s_cp_lock);
831 }
832
nfs4_alloc_open_stateid(struct nfs4_client * clp)833 static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp)
834 {
835 struct nfs4_stid *stid;
836
837 stid = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_ol_stateid);
838 if (!stid)
839 return NULL;
840
841 return openlockstateid(stid);
842 }
843
nfs4_free_deleg(struct nfs4_stid * stid)844 static void nfs4_free_deleg(struct nfs4_stid *stid)
845 {
846 WARN_ON(!list_empty(&stid->sc_cp_list));
847 kmem_cache_free(deleg_slab, stid);
848 atomic_long_dec(&num_delegations);
849 }
850
851 /*
852 * When we recall a delegation, we should be careful not to hand it
853 * out again straight away.
854 * To ensure this we keep a pair of bloom filters ('new' and 'old')
855 * in which the filehandles of recalled delegations are "stored".
856 * If a filehandle appear in either filter, a delegation is blocked.
857 * When a delegation is recalled, the filehandle is stored in the "new"
858 * filter.
859 * Every 30 seconds we swap the filters and clear the "new" one,
860 * unless both are empty of course.
861 *
862 * Each filter is 256 bits. We hash the filehandle to 32bit and use the
863 * low 3 bytes as hash-table indices.
864 *
865 * 'blocked_delegations_lock', which is always taken in block_delegations(),
866 * is used to manage concurrent access. Testing does not need the lock
867 * except when swapping the two filters.
868 */
869 static DEFINE_SPINLOCK(blocked_delegations_lock);
870 static struct bloom_pair {
871 int entries, old_entries;
872 time64_t swap_time;
873 int new; /* index into 'set' */
874 DECLARE_BITMAP(set[2], 256);
875 } blocked_delegations;
876
delegation_blocked(struct knfsd_fh * fh)877 static int delegation_blocked(struct knfsd_fh *fh)
878 {
879 u32 hash;
880 struct bloom_pair *bd = &blocked_delegations;
881
882 if (bd->entries == 0)
883 return 0;
884 if (ktime_get_seconds() - bd->swap_time > 30) {
885 spin_lock(&blocked_delegations_lock);
886 if (ktime_get_seconds() - bd->swap_time > 30) {
887 bd->entries -= bd->old_entries;
888 bd->old_entries = bd->entries;
889 memset(bd->set[bd->new], 0,
890 sizeof(bd->set[0]));
891 bd->new = 1-bd->new;
892 bd->swap_time = ktime_get_seconds();
893 }
894 spin_unlock(&blocked_delegations_lock);
895 }
896 hash = jhash(&fh->fh_base, fh->fh_size, 0);
897 if (test_bit(hash&255, bd->set[0]) &&
898 test_bit((hash>>8)&255, bd->set[0]) &&
899 test_bit((hash>>16)&255, bd->set[0]))
900 return 1;
901
902 if (test_bit(hash&255, bd->set[1]) &&
903 test_bit((hash>>8)&255, bd->set[1]) &&
904 test_bit((hash>>16)&255, bd->set[1]))
905 return 1;
906
907 return 0;
908 }
909
block_delegations(struct knfsd_fh * fh)910 static void block_delegations(struct knfsd_fh *fh)
911 {
912 u32 hash;
913 struct bloom_pair *bd = &blocked_delegations;
914
915 hash = jhash(&fh->fh_base, fh->fh_size, 0);
916
917 spin_lock(&blocked_delegations_lock);
918 __set_bit(hash&255, bd->set[bd->new]);
919 __set_bit((hash>>8)&255, bd->set[bd->new]);
920 __set_bit((hash>>16)&255, bd->set[bd->new]);
921 if (bd->entries == 0)
922 bd->swap_time = ktime_get_seconds();
923 bd->entries += 1;
924 spin_unlock(&blocked_delegations_lock);
925 }
926
927 static struct nfs4_delegation *
alloc_init_deleg(struct nfs4_client * clp,struct nfs4_file * fp,struct svc_fh * current_fh,struct nfs4_clnt_odstate * odstate)928 alloc_init_deleg(struct nfs4_client *clp, struct nfs4_file *fp,
929 struct svc_fh *current_fh,
930 struct nfs4_clnt_odstate *odstate)
931 {
932 struct nfs4_delegation *dp;
933 long n;
934
935 dprintk("NFSD alloc_init_deleg\n");
936 n = atomic_long_inc_return(&num_delegations);
937 if (n < 0 || n > max_delegations)
938 goto out_dec;
939 if (delegation_blocked(¤t_fh->fh_handle))
940 goto out_dec;
941 dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab, nfs4_free_deleg));
942 if (dp == NULL)
943 goto out_dec;
944
945 /*
946 * delegation seqid's are never incremented. The 4.1 special
947 * meaning of seqid 0 isn't meaningful, really, but let's avoid
948 * 0 anyway just for consistency and use 1:
949 */
950 dp->dl_stid.sc_stateid.si_generation = 1;
951 INIT_LIST_HEAD(&dp->dl_perfile);
952 INIT_LIST_HEAD(&dp->dl_perclnt);
953 INIT_LIST_HEAD(&dp->dl_recall_lru);
954 dp->dl_clnt_odstate = odstate;
955 get_clnt_odstate(odstate);
956 dp->dl_type = NFS4_OPEN_DELEGATE_READ;
957 dp->dl_retries = 1;
958 nfsd4_init_cb(&dp->dl_recall, dp->dl_stid.sc_client,
959 &nfsd4_cb_recall_ops, NFSPROC4_CLNT_CB_RECALL);
960 get_nfs4_file(fp);
961 dp->dl_stid.sc_file = fp;
962 return dp;
963 out_dec:
964 atomic_long_dec(&num_delegations);
965 return NULL;
966 }
967
968 void
nfs4_put_stid(struct nfs4_stid * s)969 nfs4_put_stid(struct nfs4_stid *s)
970 {
971 struct nfs4_file *fp = s->sc_file;
972 struct nfs4_client *clp = s->sc_client;
973
974 might_lock(&clp->cl_lock);
975
976 if (!refcount_dec_and_lock(&s->sc_count, &clp->cl_lock)) {
977 wake_up_all(&close_wq);
978 return;
979 }
980 idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
981 nfs4_free_cpntf_statelist(clp->net, s);
982 spin_unlock(&clp->cl_lock);
983 s->sc_free(s);
984 if (fp)
985 put_nfs4_file(fp);
986 }
987
988 void
nfs4_inc_and_copy_stateid(stateid_t * dst,struct nfs4_stid * stid)989 nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid)
990 {
991 stateid_t *src = &stid->sc_stateid;
992
993 spin_lock(&stid->sc_lock);
994 if (unlikely(++src->si_generation == 0))
995 src->si_generation = 1;
996 memcpy(dst, src, sizeof(*dst));
997 spin_unlock(&stid->sc_lock);
998 }
999
put_deleg_file(struct nfs4_file * fp)1000 static void put_deleg_file(struct nfs4_file *fp)
1001 {
1002 struct nfsd_file *nf = NULL;
1003
1004 spin_lock(&fp->fi_lock);
1005 if (--fp->fi_delegees == 0)
1006 swap(nf, fp->fi_deleg_file);
1007 spin_unlock(&fp->fi_lock);
1008
1009 if (nf)
1010 nfsd_file_put(nf);
1011 }
1012
nfs4_unlock_deleg_lease(struct nfs4_delegation * dp)1013 static void nfs4_unlock_deleg_lease(struct nfs4_delegation *dp)
1014 {
1015 struct nfs4_file *fp = dp->dl_stid.sc_file;
1016 struct nfsd_file *nf = fp->fi_deleg_file;
1017
1018 WARN_ON_ONCE(!fp->fi_delegees);
1019
1020 vfs_setlease(nf->nf_file, F_UNLCK, NULL, (void **)&dp);
1021 put_deleg_file(fp);
1022 }
1023
destroy_unhashed_deleg(struct nfs4_delegation * dp)1024 static void destroy_unhashed_deleg(struct nfs4_delegation *dp)
1025 {
1026 put_clnt_odstate(dp->dl_clnt_odstate);
1027 nfs4_unlock_deleg_lease(dp);
1028 nfs4_put_stid(&dp->dl_stid);
1029 }
1030
nfs4_unhash_stid(struct nfs4_stid * s)1031 void nfs4_unhash_stid(struct nfs4_stid *s)
1032 {
1033 s->sc_type = 0;
1034 }
1035
1036 /**
1037 * nfs4_delegation_exists - Discover if this delegation already exists
1038 * @clp: a pointer to the nfs4_client we're granting a delegation to
1039 * @fp: a pointer to the nfs4_file we're granting a delegation on
1040 *
1041 * Return:
1042 * On success: true iff an existing delegation is found
1043 */
1044
1045 static bool
nfs4_delegation_exists(struct nfs4_client * clp,struct nfs4_file * fp)1046 nfs4_delegation_exists(struct nfs4_client *clp, struct nfs4_file *fp)
1047 {
1048 struct nfs4_delegation *searchdp = NULL;
1049 struct nfs4_client *searchclp = NULL;
1050
1051 lockdep_assert_held(&state_lock);
1052 lockdep_assert_held(&fp->fi_lock);
1053
1054 list_for_each_entry(searchdp, &fp->fi_delegations, dl_perfile) {
1055 searchclp = searchdp->dl_stid.sc_client;
1056 if (clp == searchclp) {
1057 return true;
1058 }
1059 }
1060 return false;
1061 }
1062
1063 /**
1064 * hash_delegation_locked - Add a delegation to the appropriate lists
1065 * @dp: a pointer to the nfs4_delegation we are adding.
1066 * @fp: a pointer to the nfs4_file we're granting a delegation on
1067 *
1068 * Return:
1069 * On success: NULL if the delegation was successfully hashed.
1070 *
1071 * On error: -EAGAIN if one was previously granted to this
1072 * nfs4_client for this nfs4_file. Delegation is not hashed.
1073 *
1074 */
1075
1076 static int
hash_delegation_locked(struct nfs4_delegation * dp,struct nfs4_file * fp)1077 hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
1078 {
1079 struct nfs4_client *clp = dp->dl_stid.sc_client;
1080
1081 lockdep_assert_held(&state_lock);
1082 lockdep_assert_held(&fp->fi_lock);
1083
1084 if (nfs4_delegation_exists(clp, fp))
1085 return -EAGAIN;
1086 refcount_inc(&dp->dl_stid.sc_count);
1087 dp->dl_stid.sc_type = NFS4_DELEG_STID;
1088 list_add(&dp->dl_perfile, &fp->fi_delegations);
1089 list_add(&dp->dl_perclnt, &clp->cl_delegations);
1090 return 0;
1091 }
1092
delegation_hashed(struct nfs4_delegation * dp)1093 static bool delegation_hashed(struct nfs4_delegation *dp)
1094 {
1095 return !(list_empty(&dp->dl_perfile));
1096 }
1097
1098 static bool
unhash_delegation_locked(struct nfs4_delegation * dp)1099 unhash_delegation_locked(struct nfs4_delegation *dp)
1100 {
1101 struct nfs4_file *fp = dp->dl_stid.sc_file;
1102
1103 lockdep_assert_held(&state_lock);
1104
1105 if (!delegation_hashed(dp))
1106 return false;
1107
1108 dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID;
1109 /* Ensure that deleg break won't try to requeue it */
1110 ++dp->dl_time;
1111 spin_lock(&fp->fi_lock);
1112 list_del_init(&dp->dl_perclnt);
1113 list_del_init(&dp->dl_recall_lru);
1114 list_del_init(&dp->dl_perfile);
1115 spin_unlock(&fp->fi_lock);
1116 return true;
1117 }
1118
destroy_delegation(struct nfs4_delegation * dp)1119 static void destroy_delegation(struct nfs4_delegation *dp)
1120 {
1121 bool unhashed;
1122
1123 spin_lock(&state_lock);
1124 unhashed = unhash_delegation_locked(dp);
1125 spin_unlock(&state_lock);
1126 if (unhashed)
1127 destroy_unhashed_deleg(dp);
1128 }
1129
revoke_delegation(struct nfs4_delegation * dp)1130 static void revoke_delegation(struct nfs4_delegation *dp)
1131 {
1132 struct nfs4_client *clp = dp->dl_stid.sc_client;
1133
1134 WARN_ON(!list_empty(&dp->dl_recall_lru));
1135
1136 if (clp->cl_minorversion) {
1137 dp->dl_stid.sc_type = NFS4_REVOKED_DELEG_STID;
1138 refcount_inc(&dp->dl_stid.sc_count);
1139 spin_lock(&clp->cl_lock);
1140 list_add(&dp->dl_recall_lru, &clp->cl_revoked);
1141 spin_unlock(&clp->cl_lock);
1142 }
1143 destroy_unhashed_deleg(dp);
1144 }
1145
1146 /*
1147 * SETCLIENTID state
1148 */
1149
clientid_hashval(u32 id)1150 static unsigned int clientid_hashval(u32 id)
1151 {
1152 return id & CLIENT_HASH_MASK;
1153 }
1154
clientstr_hashval(struct xdr_netobj name)1155 static unsigned int clientstr_hashval(struct xdr_netobj name)
1156 {
1157 return opaque_hashval(name.data, 8) & CLIENT_HASH_MASK;
1158 }
1159
1160 /*
1161 * We store the NONE, READ, WRITE, and BOTH bits separately in the
1162 * st_{access,deny}_bmap field of the stateid, in order to track not
1163 * only what share bits are currently in force, but also what
1164 * combinations of share bits previous opens have used. This allows us
1165 * to enforce the recommendation of rfc 3530 14.2.19 that the server
1166 * return an error if the client attempt to downgrade to a combination
1167 * of share bits not explicable by closing some of its previous opens.
1168 *
1169 * XXX: This enforcement is actually incomplete, since we don't keep
1170 * track of access/deny bit combinations; so, e.g., we allow:
1171 *
1172 * OPEN allow read, deny write
1173 * OPEN allow both, deny none
1174 * DOWNGRADE allow read, deny none
1175 *
1176 * which we should reject.
1177 */
1178 static unsigned int
bmap_to_share_mode(unsigned long bmap)1179 bmap_to_share_mode(unsigned long bmap) {
1180 int i;
1181 unsigned int access = 0;
1182
1183 for (i = 1; i < 4; i++) {
1184 if (test_bit(i, &bmap))
1185 access |= i;
1186 }
1187 return access;
1188 }
1189
1190 /* set share access for a given stateid */
1191 static inline void
set_access(u32 access,struct nfs4_ol_stateid * stp)1192 set_access(u32 access, struct nfs4_ol_stateid *stp)
1193 {
1194 unsigned char mask = 1 << access;
1195
1196 WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
1197 stp->st_access_bmap |= mask;
1198 }
1199
1200 /* clear share access for a given stateid */
1201 static inline void
clear_access(u32 access,struct nfs4_ol_stateid * stp)1202 clear_access(u32 access, struct nfs4_ol_stateid *stp)
1203 {
1204 unsigned char mask = 1 << access;
1205
1206 WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
1207 stp->st_access_bmap &= ~mask;
1208 }
1209
1210 /* test whether a given stateid has access */
1211 static inline bool
test_access(u32 access,struct nfs4_ol_stateid * stp)1212 test_access(u32 access, struct nfs4_ol_stateid *stp)
1213 {
1214 unsigned char mask = 1 << access;
1215
1216 return (bool)(stp->st_access_bmap & mask);
1217 }
1218
1219 /* set share deny for a given stateid */
1220 static inline void
set_deny(u32 deny,struct nfs4_ol_stateid * stp)1221 set_deny(u32 deny, struct nfs4_ol_stateid *stp)
1222 {
1223 unsigned char mask = 1 << deny;
1224
1225 WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
1226 stp->st_deny_bmap |= mask;
1227 }
1228
1229 /* clear share deny for a given stateid */
1230 static inline void
clear_deny(u32 deny,struct nfs4_ol_stateid * stp)1231 clear_deny(u32 deny, struct nfs4_ol_stateid *stp)
1232 {
1233 unsigned char mask = 1 << deny;
1234
1235 WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
1236 stp->st_deny_bmap &= ~mask;
1237 }
1238
1239 /* test whether a given stateid is denying specific access */
1240 static inline bool
test_deny(u32 deny,struct nfs4_ol_stateid * stp)1241 test_deny(u32 deny, struct nfs4_ol_stateid *stp)
1242 {
1243 unsigned char mask = 1 << deny;
1244
1245 return (bool)(stp->st_deny_bmap & mask);
1246 }
1247
nfs4_access_to_omode(u32 access)1248 static int nfs4_access_to_omode(u32 access)
1249 {
1250 switch (access & NFS4_SHARE_ACCESS_BOTH) {
1251 case NFS4_SHARE_ACCESS_READ:
1252 return O_RDONLY;
1253 case NFS4_SHARE_ACCESS_WRITE:
1254 return O_WRONLY;
1255 case NFS4_SHARE_ACCESS_BOTH:
1256 return O_RDWR;
1257 }
1258 WARN_ON_ONCE(1);
1259 return O_RDONLY;
1260 }
1261
1262 /*
1263 * A stateid that had a deny mode associated with it is being released
1264 * or downgraded. Recalculate the deny mode on the file.
1265 */
1266 static void
recalculate_deny_mode(struct nfs4_file * fp)1267 recalculate_deny_mode(struct nfs4_file *fp)
1268 {
1269 struct nfs4_ol_stateid *stp;
1270
1271 spin_lock(&fp->fi_lock);
1272 fp->fi_share_deny = 0;
1273 list_for_each_entry(stp, &fp->fi_stateids, st_perfile)
1274 fp->fi_share_deny |= bmap_to_share_mode(stp->st_deny_bmap);
1275 spin_unlock(&fp->fi_lock);
1276 }
1277
1278 static void
reset_union_bmap_deny(u32 deny,struct nfs4_ol_stateid * stp)1279 reset_union_bmap_deny(u32 deny, struct nfs4_ol_stateid *stp)
1280 {
1281 int i;
1282 bool change = false;
1283
1284 for (i = 1; i < 4; i++) {
1285 if ((i & deny) != i) {
1286 change = true;
1287 clear_deny(i, stp);
1288 }
1289 }
1290
1291 /* Recalculate per-file deny mode if there was a change */
1292 if (change)
1293 recalculate_deny_mode(stp->st_stid.sc_file);
1294 }
1295
1296 /* release all access and file references for a given stateid */
1297 static void
release_all_access(struct nfs4_ol_stateid * stp)1298 release_all_access(struct nfs4_ol_stateid *stp)
1299 {
1300 int i;
1301 struct nfs4_file *fp = stp->st_stid.sc_file;
1302
1303 if (fp && stp->st_deny_bmap != 0)
1304 recalculate_deny_mode(fp);
1305
1306 for (i = 1; i < 4; i++) {
1307 if (test_access(i, stp))
1308 nfs4_file_put_access(stp->st_stid.sc_file, i);
1309 clear_access(i, stp);
1310 }
1311 }
1312
nfs4_free_stateowner(struct nfs4_stateowner * sop)1313 static inline void nfs4_free_stateowner(struct nfs4_stateowner *sop)
1314 {
1315 kfree(sop->so_owner.data);
1316 sop->so_ops->so_free(sop);
1317 }
1318
nfs4_put_stateowner(struct nfs4_stateowner * sop)1319 static void nfs4_put_stateowner(struct nfs4_stateowner *sop)
1320 {
1321 struct nfs4_client *clp = sop->so_client;
1322
1323 might_lock(&clp->cl_lock);
1324
1325 if (!atomic_dec_and_lock(&sop->so_count, &clp->cl_lock))
1326 return;
1327 sop->so_ops->so_unhash(sop);
1328 spin_unlock(&clp->cl_lock);
1329 nfs4_free_stateowner(sop);
1330 }
1331
1332 static bool
nfs4_ol_stateid_unhashed(const struct nfs4_ol_stateid * stp)1333 nfs4_ol_stateid_unhashed(const struct nfs4_ol_stateid *stp)
1334 {
1335 return list_empty(&stp->st_perfile);
1336 }
1337
unhash_ol_stateid(struct nfs4_ol_stateid * stp)1338 static bool unhash_ol_stateid(struct nfs4_ol_stateid *stp)
1339 {
1340 struct nfs4_file *fp = stp->st_stid.sc_file;
1341
1342 lockdep_assert_held(&stp->st_stateowner->so_client->cl_lock);
1343
1344 if (list_empty(&stp->st_perfile))
1345 return false;
1346
1347 spin_lock(&fp->fi_lock);
1348 list_del_init(&stp->st_perfile);
1349 spin_unlock(&fp->fi_lock);
1350 list_del(&stp->st_perstateowner);
1351 return true;
1352 }
1353
nfs4_free_ol_stateid(struct nfs4_stid * stid)1354 static void nfs4_free_ol_stateid(struct nfs4_stid *stid)
1355 {
1356 struct nfs4_ol_stateid *stp = openlockstateid(stid);
1357
1358 put_clnt_odstate(stp->st_clnt_odstate);
1359 release_all_access(stp);
1360 if (stp->st_stateowner)
1361 nfs4_put_stateowner(stp->st_stateowner);
1362 WARN_ON(!list_empty(&stid->sc_cp_list));
1363 kmem_cache_free(stateid_slab, stid);
1364 }
1365
nfs4_free_lock_stateid(struct nfs4_stid * stid)1366 static void nfs4_free_lock_stateid(struct nfs4_stid *stid)
1367 {
1368 struct nfs4_ol_stateid *stp = openlockstateid(stid);
1369 struct nfs4_lockowner *lo = lockowner(stp->st_stateowner);
1370 struct nfsd_file *nf;
1371
1372 nf = find_any_file(stp->st_stid.sc_file);
1373 if (nf) {
1374 get_file(nf->nf_file);
1375 filp_close(nf->nf_file, (fl_owner_t)lo);
1376 nfsd_file_put(nf);
1377 }
1378 nfs4_free_ol_stateid(stid);
1379 }
1380
1381 /*
1382 * Put the persistent reference to an already unhashed generic stateid, while
1383 * holding the cl_lock. If it's the last reference, then put it onto the
1384 * reaplist for later destruction.
1385 */
put_ol_stateid_locked(struct nfs4_ol_stateid * stp,struct list_head * reaplist)1386 static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp,
1387 struct list_head *reaplist)
1388 {
1389 struct nfs4_stid *s = &stp->st_stid;
1390 struct nfs4_client *clp = s->sc_client;
1391
1392 lockdep_assert_held(&clp->cl_lock);
1393
1394 WARN_ON_ONCE(!list_empty(&stp->st_locks));
1395
1396 if (!refcount_dec_and_test(&s->sc_count)) {
1397 wake_up_all(&close_wq);
1398 return;
1399 }
1400
1401 idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
1402 list_add(&stp->st_locks, reaplist);
1403 }
1404
unhash_lock_stateid(struct nfs4_ol_stateid * stp)1405 static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
1406 {
1407 lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
1408
1409 if (!unhash_ol_stateid(stp))
1410 return false;
1411 list_del_init(&stp->st_locks);
1412 nfs4_unhash_stid(&stp->st_stid);
1413 return true;
1414 }
1415
release_lock_stateid(struct nfs4_ol_stateid * stp)1416 static void release_lock_stateid(struct nfs4_ol_stateid *stp)
1417 {
1418 struct nfs4_client *clp = stp->st_stid.sc_client;
1419 bool unhashed;
1420
1421 spin_lock(&clp->cl_lock);
1422 unhashed = unhash_lock_stateid(stp);
1423 spin_unlock(&clp->cl_lock);
1424 if (unhashed)
1425 nfs4_put_stid(&stp->st_stid);
1426 }
1427
unhash_lockowner_locked(struct nfs4_lockowner * lo)1428 static void unhash_lockowner_locked(struct nfs4_lockowner *lo)
1429 {
1430 struct nfs4_client *clp = lo->lo_owner.so_client;
1431
1432 lockdep_assert_held(&clp->cl_lock);
1433
1434 list_del_init(&lo->lo_owner.so_strhash);
1435 }
1436
1437 /*
1438 * Free a list of generic stateids that were collected earlier after being
1439 * fully unhashed.
1440 */
1441 static void
free_ol_stateid_reaplist(struct list_head * reaplist)1442 free_ol_stateid_reaplist(struct list_head *reaplist)
1443 {
1444 struct nfs4_ol_stateid *stp;
1445 struct nfs4_file *fp;
1446
1447 might_sleep();
1448
1449 while (!list_empty(reaplist)) {
1450 stp = list_first_entry(reaplist, struct nfs4_ol_stateid,
1451 st_locks);
1452 list_del(&stp->st_locks);
1453 fp = stp->st_stid.sc_file;
1454 stp->st_stid.sc_free(&stp->st_stid);
1455 if (fp)
1456 put_nfs4_file(fp);
1457 }
1458 }
1459
release_open_stateid_locks(struct nfs4_ol_stateid * open_stp,struct list_head * reaplist)1460 static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp,
1461 struct list_head *reaplist)
1462 {
1463 struct nfs4_ol_stateid *stp;
1464
1465 lockdep_assert_held(&open_stp->st_stid.sc_client->cl_lock);
1466
1467 while (!list_empty(&open_stp->st_locks)) {
1468 stp = list_entry(open_stp->st_locks.next,
1469 struct nfs4_ol_stateid, st_locks);
1470 WARN_ON(!unhash_lock_stateid(stp));
1471 put_ol_stateid_locked(stp, reaplist);
1472 }
1473 }
1474
unhash_open_stateid(struct nfs4_ol_stateid * stp,struct list_head * reaplist)1475 static bool unhash_open_stateid(struct nfs4_ol_stateid *stp,
1476 struct list_head *reaplist)
1477 {
1478 lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
1479
1480 if (!unhash_ol_stateid(stp))
1481 return false;
1482 release_open_stateid_locks(stp, reaplist);
1483 return true;
1484 }
1485
release_open_stateid(struct nfs4_ol_stateid * stp)1486 static void release_open_stateid(struct nfs4_ol_stateid *stp)
1487 {
1488 LIST_HEAD(reaplist);
1489
1490 spin_lock(&stp->st_stid.sc_client->cl_lock);
1491 if (unhash_open_stateid(stp, &reaplist))
1492 put_ol_stateid_locked(stp, &reaplist);
1493 spin_unlock(&stp->st_stid.sc_client->cl_lock);
1494 free_ol_stateid_reaplist(&reaplist);
1495 }
1496
unhash_openowner_locked(struct nfs4_openowner * oo)1497 static void unhash_openowner_locked(struct nfs4_openowner *oo)
1498 {
1499 struct nfs4_client *clp = oo->oo_owner.so_client;
1500
1501 lockdep_assert_held(&clp->cl_lock);
1502
1503 list_del_init(&oo->oo_owner.so_strhash);
1504 list_del_init(&oo->oo_perclient);
1505 }
1506
release_last_closed_stateid(struct nfs4_openowner * oo)1507 static void release_last_closed_stateid(struct nfs4_openowner *oo)
1508 {
1509 struct nfsd_net *nn = net_generic(oo->oo_owner.so_client->net,
1510 nfsd_net_id);
1511 struct nfs4_ol_stateid *s;
1512
1513 spin_lock(&nn->client_lock);
1514 s = oo->oo_last_closed_stid;
1515 if (s) {
1516 list_del_init(&oo->oo_close_lru);
1517 oo->oo_last_closed_stid = NULL;
1518 }
1519 spin_unlock(&nn->client_lock);
1520 if (s)
1521 nfs4_put_stid(&s->st_stid);
1522 }
1523
release_openowner(struct nfs4_openowner * oo)1524 static void release_openowner(struct nfs4_openowner *oo)
1525 {
1526 struct nfs4_ol_stateid *stp;
1527 struct nfs4_client *clp = oo->oo_owner.so_client;
1528 struct list_head reaplist;
1529
1530 INIT_LIST_HEAD(&reaplist);
1531
1532 spin_lock(&clp->cl_lock);
1533 unhash_openowner_locked(oo);
1534 while (!list_empty(&oo->oo_owner.so_stateids)) {
1535 stp = list_first_entry(&oo->oo_owner.so_stateids,
1536 struct nfs4_ol_stateid, st_perstateowner);
1537 if (unhash_open_stateid(stp, &reaplist))
1538 put_ol_stateid_locked(stp, &reaplist);
1539 }
1540 spin_unlock(&clp->cl_lock);
1541 free_ol_stateid_reaplist(&reaplist);
1542 release_last_closed_stateid(oo);
1543 nfs4_put_stateowner(&oo->oo_owner);
1544 }
1545
1546 static inline int
hash_sessionid(struct nfs4_sessionid * sessionid)1547 hash_sessionid(struct nfs4_sessionid *sessionid)
1548 {
1549 struct nfsd4_sessionid *sid = (struct nfsd4_sessionid *)sessionid;
1550
1551 return sid->sequence % SESSION_HASH_SIZE;
1552 }
1553
1554 #ifdef CONFIG_SUNRPC_DEBUG
1555 static inline void
dump_sessionid(const char * fn,struct nfs4_sessionid * sessionid)1556 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
1557 {
1558 u32 *ptr = (u32 *)(&sessionid->data[0]);
1559 dprintk("%s: %u:%u:%u:%u\n", fn, ptr[0], ptr[1], ptr[2], ptr[3]);
1560 }
1561 #else
1562 static inline void
dump_sessionid(const char * fn,struct nfs4_sessionid * sessionid)1563 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
1564 {
1565 }
1566 #endif
1567
1568 /*
1569 * Bump the seqid on cstate->replay_owner, and clear replay_owner if it
1570 * won't be used for replay.
1571 */
nfsd4_bump_seqid(struct nfsd4_compound_state * cstate,__be32 nfserr)1572 void nfsd4_bump_seqid(struct nfsd4_compound_state *cstate, __be32 nfserr)
1573 {
1574 struct nfs4_stateowner *so = cstate->replay_owner;
1575
1576 if (nfserr == nfserr_replay_me)
1577 return;
1578
1579 if (!seqid_mutating_err(ntohl(nfserr))) {
1580 nfsd4_cstate_clear_replay(cstate);
1581 return;
1582 }
1583 if (!so)
1584 return;
1585 if (so->so_is_open_owner)
1586 release_last_closed_stateid(openowner(so));
1587 so->so_seqid++;
1588 return;
1589 }
1590
1591 static void
gen_sessionid(struct nfsd4_session * ses)1592 gen_sessionid(struct nfsd4_session *ses)
1593 {
1594 struct nfs4_client *clp = ses->se_client;
1595 struct nfsd4_sessionid *sid;
1596
1597 sid = (struct nfsd4_sessionid *)ses->se_sessionid.data;
1598 sid->clientid = clp->cl_clientid;
1599 sid->sequence = current_sessionid++;
1600 sid->reserved = 0;
1601 }
1602
1603 /*
1604 * The protocol defines ca_maxresponssize_cached to include the size of
1605 * the rpc header, but all we need to cache is the data starting after
1606 * the end of the initial SEQUENCE operation--the rest we regenerate
1607 * each time. Therefore we can advertise a ca_maxresponssize_cached
1608 * value that is the number of bytes in our cache plus a few additional
1609 * bytes. In order to stay on the safe side, and not promise more than
1610 * we can cache, those additional bytes must be the minimum possible: 24
1611 * bytes of rpc header (xid through accept state, with AUTH_NULL
1612 * verifier), 12 for the compound header (with zero-length tag), and 44
1613 * for the SEQUENCE op response:
1614 */
1615 #define NFSD_MIN_HDR_SEQ_SZ (24 + 12 + 44)
1616
1617 static void
free_session_slots(struct nfsd4_session * ses)1618 free_session_slots(struct nfsd4_session *ses)
1619 {
1620 int i;
1621
1622 for (i = 0; i < ses->se_fchannel.maxreqs; i++) {
1623 free_svc_cred(&ses->se_slots[i]->sl_cred);
1624 kfree(ses->se_slots[i]);
1625 }
1626 }
1627
1628 /*
1629 * We don't actually need to cache the rpc and session headers, so we
1630 * can allocate a little less for each slot:
1631 */
slot_bytes(struct nfsd4_channel_attrs * ca)1632 static inline u32 slot_bytes(struct nfsd4_channel_attrs *ca)
1633 {
1634 u32 size;
1635
1636 if (ca->maxresp_cached < NFSD_MIN_HDR_SEQ_SZ)
1637 size = 0;
1638 else
1639 size = ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ;
1640 return size + sizeof(struct nfsd4_slot);
1641 }
1642
1643 /*
1644 * XXX: If we run out of reserved DRC memory we could (up to a point)
1645 * re-negotiate active sessions and reduce their slot usage to make
1646 * room for new connections. For now we just fail the create session.
1647 */
nfsd4_get_drc_mem(struct nfsd4_channel_attrs * ca,struct nfsd_net * nn)1648 static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
1649 {
1650 u32 slotsize = slot_bytes(ca);
1651 u32 num = ca->maxreqs;
1652 unsigned long avail, total_avail;
1653 unsigned int scale_factor;
1654
1655 spin_lock(&nfsd_drc_lock);
1656 if (nfsd_drc_max_mem > nfsd_drc_mem_used)
1657 total_avail = nfsd_drc_max_mem - nfsd_drc_mem_used;
1658 else
1659 /* We have handed out more space than we chose in
1660 * set_max_drc() to allow. That isn't really a
1661 * problem as long as that doesn't make us think we
1662 * have lots more due to integer overflow.
1663 */
1664 total_avail = 0;
1665 avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION, total_avail);
1666 /*
1667 * Never use more than a fraction of the remaining memory,
1668 * unless it's the only way to give this client a slot.
1669 * The chosen fraction is either 1/8 or 1/number of threads,
1670 * whichever is smaller. This ensures there are adequate
1671 * slots to support multiple clients per thread.
1672 * Give the client one slot even if that would require
1673 * over-allocation--it is better than failure.
1674 */
1675 scale_factor = max_t(unsigned int, 8, nn->nfsd_serv->sv_nrthreads);
1676
1677 avail = clamp_t(unsigned long, avail, slotsize,
1678 total_avail/scale_factor);
1679 num = min_t(int, num, avail / slotsize);
1680 num = max_t(int, num, 1);
1681 nfsd_drc_mem_used += num * slotsize;
1682 spin_unlock(&nfsd_drc_lock);
1683
1684 return num;
1685 }
1686
nfsd4_put_drc_mem(struct nfsd4_channel_attrs * ca)1687 static void nfsd4_put_drc_mem(struct nfsd4_channel_attrs *ca)
1688 {
1689 int slotsize = slot_bytes(ca);
1690
1691 spin_lock(&nfsd_drc_lock);
1692 nfsd_drc_mem_used -= slotsize * ca->maxreqs;
1693 spin_unlock(&nfsd_drc_lock);
1694 }
1695
alloc_session(struct nfsd4_channel_attrs * fattrs,struct nfsd4_channel_attrs * battrs)1696 static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fattrs,
1697 struct nfsd4_channel_attrs *battrs)
1698 {
1699 int numslots = fattrs->maxreqs;
1700 int slotsize = slot_bytes(fattrs);
1701 struct nfsd4_session *new;
1702 int mem, i;
1703
1704 BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION * sizeof(struct nfsd4_slot *)
1705 + sizeof(struct nfsd4_session) > PAGE_SIZE);
1706 mem = numslots * sizeof(struct nfsd4_slot *);
1707
1708 new = kzalloc(sizeof(*new) + mem, GFP_KERNEL);
1709 if (!new)
1710 return NULL;
1711 /* allocate each struct nfsd4_slot and data cache in one piece */
1712 for (i = 0; i < numslots; i++) {
1713 new->se_slots[i] = kzalloc(slotsize, GFP_KERNEL);
1714 if (!new->se_slots[i])
1715 goto out_free;
1716 }
1717
1718 memcpy(&new->se_fchannel, fattrs, sizeof(struct nfsd4_channel_attrs));
1719 memcpy(&new->se_bchannel, battrs, sizeof(struct nfsd4_channel_attrs));
1720
1721 return new;
1722 out_free:
1723 while (i--)
1724 kfree(new->se_slots[i]);
1725 kfree(new);
1726 return NULL;
1727 }
1728
free_conn(struct nfsd4_conn * c)1729 static void free_conn(struct nfsd4_conn *c)
1730 {
1731 svc_xprt_put(c->cn_xprt);
1732 kfree(c);
1733 }
1734
nfsd4_conn_lost(struct svc_xpt_user * u)1735 static void nfsd4_conn_lost(struct svc_xpt_user *u)
1736 {
1737 struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user);
1738 struct nfs4_client *clp = c->cn_session->se_client;
1739
1740 spin_lock(&clp->cl_lock);
1741 if (!list_empty(&c->cn_persession)) {
1742 list_del(&c->cn_persession);
1743 free_conn(c);
1744 }
1745 nfsd4_probe_callback(clp);
1746 spin_unlock(&clp->cl_lock);
1747 }
1748
alloc_conn(struct svc_rqst * rqstp,u32 flags)1749 static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags)
1750 {
1751 struct nfsd4_conn *conn;
1752
1753 conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL);
1754 if (!conn)
1755 return NULL;
1756 svc_xprt_get(rqstp->rq_xprt);
1757 conn->cn_xprt = rqstp->rq_xprt;
1758 conn->cn_flags = flags;
1759 INIT_LIST_HEAD(&conn->cn_xpt_user.list);
1760 return conn;
1761 }
1762
__nfsd4_hash_conn(struct nfsd4_conn * conn,struct nfsd4_session * ses)1763 static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
1764 {
1765 conn->cn_session = ses;
1766 list_add(&conn->cn_persession, &ses->se_conns);
1767 }
1768
nfsd4_hash_conn(struct nfsd4_conn * conn,struct nfsd4_session * ses)1769 static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
1770 {
1771 struct nfs4_client *clp = ses->se_client;
1772
1773 spin_lock(&clp->cl_lock);
1774 __nfsd4_hash_conn(conn, ses);
1775 spin_unlock(&clp->cl_lock);
1776 }
1777
nfsd4_register_conn(struct nfsd4_conn * conn)1778 static int nfsd4_register_conn(struct nfsd4_conn *conn)
1779 {
1780 conn->cn_xpt_user.callback = nfsd4_conn_lost;
1781 return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user);
1782 }
1783
nfsd4_init_conn(struct svc_rqst * rqstp,struct nfsd4_conn * conn,struct nfsd4_session * ses)1784 static void nfsd4_init_conn(struct svc_rqst *rqstp, struct nfsd4_conn *conn, struct nfsd4_session *ses)
1785 {
1786 int ret;
1787
1788 nfsd4_hash_conn(conn, ses);
1789 ret = nfsd4_register_conn(conn);
1790 if (ret)
1791 /* oops; xprt is already down: */
1792 nfsd4_conn_lost(&conn->cn_xpt_user);
1793 /* We may have gained or lost a callback channel: */
1794 nfsd4_probe_callback_sync(ses->se_client);
1795 }
1796
alloc_conn_from_crses(struct svc_rqst * rqstp,struct nfsd4_create_session * cses)1797 static struct nfsd4_conn *alloc_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_create_session *cses)
1798 {
1799 u32 dir = NFS4_CDFC4_FORE;
1800
1801 if (cses->flags & SESSION4_BACK_CHAN)
1802 dir |= NFS4_CDFC4_BACK;
1803 return alloc_conn(rqstp, dir);
1804 }
1805
1806 /* must be called under client_lock */
nfsd4_del_conns(struct nfsd4_session * s)1807 static void nfsd4_del_conns(struct nfsd4_session *s)
1808 {
1809 struct nfs4_client *clp = s->se_client;
1810 struct nfsd4_conn *c;
1811
1812 spin_lock(&clp->cl_lock);
1813 while (!list_empty(&s->se_conns)) {
1814 c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession);
1815 list_del_init(&c->cn_persession);
1816 spin_unlock(&clp->cl_lock);
1817
1818 unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user);
1819 free_conn(c);
1820
1821 spin_lock(&clp->cl_lock);
1822 }
1823 spin_unlock(&clp->cl_lock);
1824 }
1825
__free_session(struct nfsd4_session * ses)1826 static void __free_session(struct nfsd4_session *ses)
1827 {
1828 free_session_slots(ses);
1829 kfree(ses);
1830 }
1831
free_session(struct nfsd4_session * ses)1832 static void free_session(struct nfsd4_session *ses)
1833 {
1834 nfsd4_del_conns(ses);
1835 nfsd4_put_drc_mem(&ses->se_fchannel);
1836 __free_session(ses);
1837 }
1838
init_session(struct svc_rqst * rqstp,struct nfsd4_session * new,struct nfs4_client * clp,struct nfsd4_create_session * cses)1839 static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses)
1840 {
1841 int idx;
1842 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
1843
1844 new->se_client = clp;
1845 gen_sessionid(new);
1846
1847 INIT_LIST_HEAD(&new->se_conns);
1848
1849 new->se_cb_seq_nr = 1;
1850 new->se_flags = cses->flags;
1851 new->se_cb_prog = cses->callback_prog;
1852 new->se_cb_sec = cses->cb_sec;
1853 atomic_set(&new->se_ref, 0);
1854 idx = hash_sessionid(&new->se_sessionid);
1855 list_add(&new->se_hash, &nn->sessionid_hashtbl[idx]);
1856 spin_lock(&clp->cl_lock);
1857 list_add(&new->se_perclnt, &clp->cl_sessions);
1858 spin_unlock(&clp->cl_lock);
1859
1860 {
1861 struct sockaddr *sa = svc_addr(rqstp);
1862 /*
1863 * This is a little silly; with sessions there's no real
1864 * use for the callback address. Use the peer address
1865 * as a reasonable default for now, but consider fixing
1866 * the rpc client not to require an address in the
1867 * future:
1868 */
1869 rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa);
1870 clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa);
1871 }
1872 }
1873
1874 /* caller must hold client_lock */
1875 static struct nfsd4_session *
__find_in_sessionid_hashtbl(struct nfs4_sessionid * sessionid,struct net * net)1876 __find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net)
1877 {
1878 struct nfsd4_session *elem;
1879 int idx;
1880 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
1881
1882 lockdep_assert_held(&nn->client_lock);
1883
1884 dump_sessionid(__func__, sessionid);
1885 idx = hash_sessionid(sessionid);
1886 /* Search in the appropriate list */
1887 list_for_each_entry(elem, &nn->sessionid_hashtbl[idx], se_hash) {
1888 if (!memcmp(elem->se_sessionid.data, sessionid->data,
1889 NFS4_MAX_SESSIONID_LEN)) {
1890 return elem;
1891 }
1892 }
1893
1894 dprintk("%s: session not found\n", __func__);
1895 return NULL;
1896 }
1897
1898 static struct nfsd4_session *
find_in_sessionid_hashtbl(struct nfs4_sessionid * sessionid,struct net * net,__be32 * ret)1899 find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net,
1900 __be32 *ret)
1901 {
1902 struct nfsd4_session *session;
1903 __be32 status = nfserr_badsession;
1904
1905 session = __find_in_sessionid_hashtbl(sessionid, net);
1906 if (!session)
1907 goto out;
1908 status = nfsd4_get_session_locked(session);
1909 if (status)
1910 session = NULL;
1911 out:
1912 *ret = status;
1913 return session;
1914 }
1915
1916 /* caller must hold client_lock */
1917 static void
unhash_session(struct nfsd4_session * ses)1918 unhash_session(struct nfsd4_session *ses)
1919 {
1920 struct nfs4_client *clp = ses->se_client;
1921 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1922
1923 lockdep_assert_held(&nn->client_lock);
1924
1925 list_del(&ses->se_hash);
1926 spin_lock(&ses->se_client->cl_lock);
1927 list_del(&ses->se_perclnt);
1928 spin_unlock(&ses->se_client->cl_lock);
1929 }
1930
1931 /* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */
1932 static int
STALE_CLIENTID(clientid_t * clid,struct nfsd_net * nn)1933 STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn)
1934 {
1935 /*
1936 * We're assuming the clid was not given out from a boot
1937 * precisely 2^32 (about 136 years) before this one. That seems
1938 * a safe assumption:
1939 */
1940 if (clid->cl_boot == (u32)nn->boot_time)
1941 return 0;
1942 trace_nfsd_clid_stale(clid);
1943 return 1;
1944 }
1945
1946 /*
1947 * XXX Should we use a slab cache ?
1948 * This type of memory management is somewhat inefficient, but we use it
1949 * anyway since SETCLIENTID is not a common operation.
1950 */
alloc_client(struct xdr_netobj name)1951 static struct nfs4_client *alloc_client(struct xdr_netobj name)
1952 {
1953 struct nfs4_client *clp;
1954 int i;
1955
1956 clp = kmem_cache_zalloc(client_slab, GFP_KERNEL);
1957 if (clp == NULL)
1958 return NULL;
1959 xdr_netobj_dup(&clp->cl_name, &name, GFP_KERNEL);
1960 if (clp->cl_name.data == NULL)
1961 goto err_no_name;
1962 clp->cl_ownerstr_hashtbl = kmalloc_array(OWNER_HASH_SIZE,
1963 sizeof(struct list_head),
1964 GFP_KERNEL);
1965 if (!clp->cl_ownerstr_hashtbl)
1966 goto err_no_hashtbl;
1967 for (i = 0; i < OWNER_HASH_SIZE; i++)
1968 INIT_LIST_HEAD(&clp->cl_ownerstr_hashtbl[i]);
1969 INIT_LIST_HEAD(&clp->cl_sessions);
1970 idr_init(&clp->cl_stateids);
1971 atomic_set(&clp->cl_rpc_users, 0);
1972 clp->cl_cb_state = NFSD4_CB_UNKNOWN;
1973 INIT_LIST_HEAD(&clp->cl_idhash);
1974 INIT_LIST_HEAD(&clp->cl_openowners);
1975 INIT_LIST_HEAD(&clp->cl_delegations);
1976 INIT_LIST_HEAD(&clp->cl_lru);
1977 INIT_LIST_HEAD(&clp->cl_revoked);
1978 #ifdef CONFIG_NFSD_PNFS
1979 INIT_LIST_HEAD(&clp->cl_lo_states);
1980 #endif
1981 INIT_LIST_HEAD(&clp->async_copies);
1982 spin_lock_init(&clp->async_lock);
1983 spin_lock_init(&clp->cl_lock);
1984 rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
1985 return clp;
1986 err_no_hashtbl:
1987 kfree(clp->cl_name.data);
1988 err_no_name:
1989 kmem_cache_free(client_slab, clp);
1990 return NULL;
1991 }
1992
__free_client(struct kref * k)1993 static void __free_client(struct kref *k)
1994 {
1995 struct nfsdfs_client *c = container_of(k, struct nfsdfs_client, cl_ref);
1996 struct nfs4_client *clp = container_of(c, struct nfs4_client, cl_nfsdfs);
1997
1998 free_svc_cred(&clp->cl_cred);
1999 kfree(clp->cl_ownerstr_hashtbl);
2000 kfree(clp->cl_name.data);
2001 kfree(clp->cl_nii_domain.data);
2002 kfree(clp->cl_nii_name.data);
2003 idr_destroy(&clp->cl_stateids);
2004 kmem_cache_free(client_slab, clp);
2005 }
2006
drop_client(struct nfs4_client * clp)2007 static void drop_client(struct nfs4_client *clp)
2008 {
2009 kref_put(&clp->cl_nfsdfs.cl_ref, __free_client);
2010 }
2011
2012 static void
free_client(struct nfs4_client * clp)2013 free_client(struct nfs4_client *clp)
2014 {
2015 while (!list_empty(&clp->cl_sessions)) {
2016 struct nfsd4_session *ses;
2017 ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
2018 se_perclnt);
2019 list_del(&ses->se_perclnt);
2020 WARN_ON_ONCE(atomic_read(&ses->se_ref));
2021 free_session(ses);
2022 }
2023 rpc_destroy_wait_queue(&clp->cl_cb_waitq);
2024 if (clp->cl_nfsd_dentry) {
2025 nfsd_client_rmdir(clp->cl_nfsd_dentry);
2026 clp->cl_nfsd_dentry = NULL;
2027 wake_up_all(&expiry_wq);
2028 }
2029 drop_client(clp);
2030 }
2031
2032 /* must be called under the client_lock */
2033 static void
unhash_client_locked(struct nfs4_client * clp)2034 unhash_client_locked(struct nfs4_client *clp)
2035 {
2036 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2037 struct nfsd4_session *ses;
2038
2039 lockdep_assert_held(&nn->client_lock);
2040
2041 /* Mark the client as expired! */
2042 clp->cl_time = 0;
2043 /* Make it invisible */
2044 if (!list_empty(&clp->cl_idhash)) {
2045 list_del_init(&clp->cl_idhash);
2046 if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
2047 rb_erase(&clp->cl_namenode, &nn->conf_name_tree);
2048 else
2049 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
2050 }
2051 list_del_init(&clp->cl_lru);
2052 spin_lock(&clp->cl_lock);
2053 list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
2054 list_del_init(&ses->se_hash);
2055 spin_unlock(&clp->cl_lock);
2056 }
2057
2058 static void
unhash_client(struct nfs4_client * clp)2059 unhash_client(struct nfs4_client *clp)
2060 {
2061 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2062
2063 spin_lock(&nn->client_lock);
2064 unhash_client_locked(clp);
2065 spin_unlock(&nn->client_lock);
2066 }
2067
mark_client_expired_locked(struct nfs4_client * clp)2068 static __be32 mark_client_expired_locked(struct nfs4_client *clp)
2069 {
2070 if (atomic_read(&clp->cl_rpc_users))
2071 return nfserr_jukebox;
2072 unhash_client_locked(clp);
2073 return nfs_ok;
2074 }
2075
2076 static void
__destroy_client(struct nfs4_client * clp)2077 __destroy_client(struct nfs4_client *clp)
2078 {
2079 int i;
2080 struct nfs4_openowner *oo;
2081 struct nfs4_delegation *dp;
2082 struct list_head reaplist;
2083
2084 INIT_LIST_HEAD(&reaplist);
2085 spin_lock(&state_lock);
2086 while (!list_empty(&clp->cl_delegations)) {
2087 dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
2088 WARN_ON(!unhash_delegation_locked(dp));
2089 list_add(&dp->dl_recall_lru, &reaplist);
2090 }
2091 spin_unlock(&state_lock);
2092 while (!list_empty(&reaplist)) {
2093 dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
2094 list_del_init(&dp->dl_recall_lru);
2095 destroy_unhashed_deleg(dp);
2096 }
2097 while (!list_empty(&clp->cl_revoked)) {
2098 dp = list_entry(clp->cl_revoked.next, struct nfs4_delegation, dl_recall_lru);
2099 list_del_init(&dp->dl_recall_lru);
2100 nfs4_put_stid(&dp->dl_stid);
2101 }
2102 while (!list_empty(&clp->cl_openowners)) {
2103 oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient);
2104 nfs4_get_stateowner(&oo->oo_owner);
2105 release_openowner(oo);
2106 }
2107 for (i = 0; i < OWNER_HASH_SIZE; i++) {
2108 struct nfs4_stateowner *so, *tmp;
2109
2110 list_for_each_entry_safe(so, tmp, &clp->cl_ownerstr_hashtbl[i],
2111 so_strhash) {
2112 /* Should be no openowners at this point */
2113 WARN_ON_ONCE(so->so_is_open_owner);
2114 remove_blocked_locks(lockowner(so));
2115 }
2116 }
2117 nfsd4_return_all_client_layouts(clp);
2118 nfsd4_shutdown_copy(clp);
2119 nfsd4_shutdown_callback(clp);
2120 if (clp->cl_cb_conn.cb_xprt)
2121 svc_xprt_put(clp->cl_cb_conn.cb_xprt);
2122 free_client(clp);
2123 wake_up_all(&expiry_wq);
2124 }
2125
2126 static void
destroy_client(struct nfs4_client * clp)2127 destroy_client(struct nfs4_client *clp)
2128 {
2129 unhash_client(clp);
2130 __destroy_client(clp);
2131 }
2132
inc_reclaim_complete(struct nfs4_client * clp)2133 static void inc_reclaim_complete(struct nfs4_client *clp)
2134 {
2135 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2136
2137 if (!nn->track_reclaim_completes)
2138 return;
2139 if (!nfsd4_find_reclaim_client(clp->cl_name, nn))
2140 return;
2141 if (atomic_inc_return(&nn->nr_reclaim_complete) ==
2142 nn->reclaim_str_hashtbl_size) {
2143 printk(KERN_INFO "NFSD: all clients done reclaiming, ending NFSv4 grace period (net %x)\n",
2144 clp->net->ns.inum);
2145 nfsd4_end_grace(nn);
2146 }
2147 }
2148
expire_client(struct nfs4_client * clp)2149 static void expire_client(struct nfs4_client *clp)
2150 {
2151 unhash_client(clp);
2152 nfsd4_client_record_remove(clp);
2153 __destroy_client(clp);
2154 }
2155
copy_verf(struct nfs4_client * target,nfs4_verifier * source)2156 static void copy_verf(struct nfs4_client *target, nfs4_verifier *source)
2157 {
2158 memcpy(target->cl_verifier.data, source->data,
2159 sizeof(target->cl_verifier.data));
2160 }
2161
copy_clid(struct nfs4_client * target,struct nfs4_client * source)2162 static void copy_clid(struct nfs4_client *target, struct nfs4_client *source)
2163 {
2164 target->cl_clientid.cl_boot = source->cl_clientid.cl_boot;
2165 target->cl_clientid.cl_id = source->cl_clientid.cl_id;
2166 }
2167
copy_cred(struct svc_cred * target,struct svc_cred * source)2168 static int copy_cred(struct svc_cred *target, struct svc_cred *source)
2169 {
2170 target->cr_principal = kstrdup(source->cr_principal, GFP_KERNEL);
2171 target->cr_raw_principal = kstrdup(source->cr_raw_principal,
2172 GFP_KERNEL);
2173 target->cr_targ_princ = kstrdup(source->cr_targ_princ, GFP_KERNEL);
2174 if ((source->cr_principal && !target->cr_principal) ||
2175 (source->cr_raw_principal && !target->cr_raw_principal) ||
2176 (source->cr_targ_princ && !target->cr_targ_princ))
2177 return -ENOMEM;
2178
2179 target->cr_flavor = source->cr_flavor;
2180 target->cr_uid = source->cr_uid;
2181 target->cr_gid = source->cr_gid;
2182 target->cr_group_info = source->cr_group_info;
2183 get_group_info(target->cr_group_info);
2184 target->cr_gss_mech = source->cr_gss_mech;
2185 if (source->cr_gss_mech)
2186 gss_mech_get(source->cr_gss_mech);
2187 return 0;
2188 }
2189
2190 static int
compare_blob(const struct xdr_netobj * o1,const struct xdr_netobj * o2)2191 compare_blob(const struct xdr_netobj *o1, const struct xdr_netobj *o2)
2192 {
2193 if (o1->len < o2->len)
2194 return -1;
2195 if (o1->len > o2->len)
2196 return 1;
2197 return memcmp(o1->data, o2->data, o1->len);
2198 }
2199
2200 static int
same_verf(nfs4_verifier * v1,nfs4_verifier * v2)2201 same_verf(nfs4_verifier *v1, nfs4_verifier *v2)
2202 {
2203 return 0 == memcmp(v1->data, v2->data, sizeof(v1->data));
2204 }
2205
2206 static int
same_clid(clientid_t * cl1,clientid_t * cl2)2207 same_clid(clientid_t *cl1, clientid_t *cl2)
2208 {
2209 return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id);
2210 }
2211
groups_equal(struct group_info * g1,struct group_info * g2)2212 static bool groups_equal(struct group_info *g1, struct group_info *g2)
2213 {
2214 int i;
2215
2216 if (g1->ngroups != g2->ngroups)
2217 return false;
2218 for (i=0; i<g1->ngroups; i++)
2219 if (!gid_eq(g1->gid[i], g2->gid[i]))
2220 return false;
2221 return true;
2222 }
2223
2224 /*
2225 * RFC 3530 language requires clid_inuse be returned when the
2226 * "principal" associated with a requests differs from that previously
2227 * used. We use uid, gid's, and gss principal string as our best
2228 * approximation. We also don't want to allow non-gss use of a client
2229 * established using gss: in theory cr_principal should catch that
2230 * change, but in practice cr_principal can be null even in the gss case
2231 * since gssd doesn't always pass down a principal string.
2232 */
is_gss_cred(struct svc_cred * cr)2233 static bool is_gss_cred(struct svc_cred *cr)
2234 {
2235 /* Is cr_flavor one of the gss "pseudoflavors"?: */
2236 return (cr->cr_flavor > RPC_AUTH_MAXFLAVOR);
2237 }
2238
2239
2240 static bool
same_creds(struct svc_cred * cr1,struct svc_cred * cr2)2241 same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
2242 {
2243 if ((is_gss_cred(cr1) != is_gss_cred(cr2))
2244 || (!uid_eq(cr1->cr_uid, cr2->cr_uid))
2245 || (!gid_eq(cr1->cr_gid, cr2->cr_gid))
2246 || !groups_equal(cr1->cr_group_info, cr2->cr_group_info))
2247 return false;
2248 /* XXX: check that cr_targ_princ fields match ? */
2249 if (cr1->cr_principal == cr2->cr_principal)
2250 return true;
2251 if (!cr1->cr_principal || !cr2->cr_principal)
2252 return false;
2253 return 0 == strcmp(cr1->cr_principal, cr2->cr_principal);
2254 }
2255
svc_rqst_integrity_protected(struct svc_rqst * rqstp)2256 static bool svc_rqst_integrity_protected(struct svc_rqst *rqstp)
2257 {
2258 struct svc_cred *cr = &rqstp->rq_cred;
2259 u32 service;
2260
2261 if (!cr->cr_gss_mech)
2262 return false;
2263 service = gss_pseudoflavor_to_service(cr->cr_gss_mech, cr->cr_flavor);
2264 return service == RPC_GSS_SVC_INTEGRITY ||
2265 service == RPC_GSS_SVC_PRIVACY;
2266 }
2267
nfsd4_mach_creds_match(struct nfs4_client * cl,struct svc_rqst * rqstp)2268 bool nfsd4_mach_creds_match(struct nfs4_client *cl, struct svc_rqst *rqstp)
2269 {
2270 struct svc_cred *cr = &rqstp->rq_cred;
2271
2272 if (!cl->cl_mach_cred)
2273 return true;
2274 if (cl->cl_cred.cr_gss_mech != cr->cr_gss_mech)
2275 return false;
2276 if (!svc_rqst_integrity_protected(rqstp))
2277 return false;
2278 if (cl->cl_cred.cr_raw_principal)
2279 return 0 == strcmp(cl->cl_cred.cr_raw_principal,
2280 cr->cr_raw_principal);
2281 if (!cr->cr_principal)
2282 return false;
2283 return 0 == strcmp(cl->cl_cred.cr_principal, cr->cr_principal);
2284 }
2285
gen_confirm(struct nfs4_client * clp,struct nfsd_net * nn)2286 static void gen_confirm(struct nfs4_client *clp, struct nfsd_net *nn)
2287 {
2288 __be32 verf[2];
2289
2290 /*
2291 * This is opaque to client, so no need to byte-swap. Use
2292 * __force to keep sparse happy
2293 */
2294 verf[0] = (__force __be32)(u32)ktime_get_real_seconds();
2295 verf[1] = (__force __be32)nn->clverifier_counter++;
2296 memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data));
2297 }
2298
gen_clid(struct nfs4_client * clp,struct nfsd_net * nn)2299 static void gen_clid(struct nfs4_client *clp, struct nfsd_net *nn)
2300 {
2301 clp->cl_clientid.cl_boot = (u32)nn->boot_time;
2302 clp->cl_clientid.cl_id = nn->clientid_counter++;
2303 gen_confirm(clp, nn);
2304 }
2305
2306 static struct nfs4_stid *
find_stateid_locked(struct nfs4_client * cl,stateid_t * t)2307 find_stateid_locked(struct nfs4_client *cl, stateid_t *t)
2308 {
2309 struct nfs4_stid *ret;
2310
2311 ret = idr_find(&cl->cl_stateids, t->si_opaque.so_id);
2312 if (!ret || !ret->sc_type)
2313 return NULL;
2314 return ret;
2315 }
2316
2317 static struct nfs4_stid *
find_stateid_by_type(struct nfs4_client * cl,stateid_t * t,char typemask)2318 find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask)
2319 {
2320 struct nfs4_stid *s;
2321
2322 spin_lock(&cl->cl_lock);
2323 s = find_stateid_locked(cl, t);
2324 if (s != NULL) {
2325 if (typemask & s->sc_type)
2326 refcount_inc(&s->sc_count);
2327 else
2328 s = NULL;
2329 }
2330 spin_unlock(&cl->cl_lock);
2331 return s;
2332 }
2333
get_nfsdfs_clp(struct inode * inode)2334 static struct nfs4_client *get_nfsdfs_clp(struct inode *inode)
2335 {
2336 struct nfsdfs_client *nc;
2337 nc = get_nfsdfs_client(inode);
2338 if (!nc)
2339 return NULL;
2340 return container_of(nc, struct nfs4_client, cl_nfsdfs);
2341 }
2342
seq_quote_mem(struct seq_file * m,char * data,int len)2343 static void seq_quote_mem(struct seq_file *m, char *data, int len)
2344 {
2345 seq_printf(m, "\"");
2346 seq_escape_mem_ascii(m, data, len);
2347 seq_printf(m, "\"");
2348 }
2349
client_info_show(struct seq_file * m,void * v)2350 static int client_info_show(struct seq_file *m, void *v)
2351 {
2352 struct inode *inode = m->private;
2353 struct nfs4_client *clp;
2354 u64 clid;
2355
2356 clp = get_nfsdfs_clp(inode);
2357 if (!clp)
2358 return -ENXIO;
2359 memcpy(&clid, &clp->cl_clientid, sizeof(clid));
2360 seq_printf(m, "clientid: 0x%llx\n", clid);
2361 seq_printf(m, "address: \"%pISpc\"\n", (struct sockaddr *)&clp->cl_addr);
2362 seq_printf(m, "name: ");
2363 seq_quote_mem(m, clp->cl_name.data, clp->cl_name.len);
2364 seq_printf(m, "\nminor version: %d\n", clp->cl_minorversion);
2365 if (clp->cl_nii_domain.data) {
2366 seq_printf(m, "Implementation domain: ");
2367 seq_quote_mem(m, clp->cl_nii_domain.data,
2368 clp->cl_nii_domain.len);
2369 seq_printf(m, "\nImplementation name: ");
2370 seq_quote_mem(m, clp->cl_nii_name.data, clp->cl_nii_name.len);
2371 seq_printf(m, "\nImplementation time: [%lld, %ld]\n",
2372 clp->cl_nii_time.tv_sec, clp->cl_nii_time.tv_nsec);
2373 }
2374 drop_client(clp);
2375
2376 return 0;
2377 }
2378
client_info_open(struct inode * inode,struct file * file)2379 static int client_info_open(struct inode *inode, struct file *file)
2380 {
2381 return single_open(file, client_info_show, inode);
2382 }
2383
2384 static const struct file_operations client_info_fops = {
2385 .open = client_info_open,
2386 .read = seq_read,
2387 .llseek = seq_lseek,
2388 .release = single_release,
2389 };
2390
states_start(struct seq_file * s,loff_t * pos)2391 static void *states_start(struct seq_file *s, loff_t *pos)
2392 __acquires(&clp->cl_lock)
2393 {
2394 struct nfs4_client *clp = s->private;
2395 unsigned long id = *pos;
2396 void *ret;
2397
2398 spin_lock(&clp->cl_lock);
2399 ret = idr_get_next_ul(&clp->cl_stateids, &id);
2400 *pos = id;
2401 return ret;
2402 }
2403
states_next(struct seq_file * s,void * v,loff_t * pos)2404 static void *states_next(struct seq_file *s, void *v, loff_t *pos)
2405 {
2406 struct nfs4_client *clp = s->private;
2407 unsigned long id = *pos;
2408 void *ret;
2409
2410 id = *pos;
2411 id++;
2412 ret = idr_get_next_ul(&clp->cl_stateids, &id);
2413 *pos = id;
2414 return ret;
2415 }
2416
states_stop(struct seq_file * s,void * v)2417 static void states_stop(struct seq_file *s, void *v)
2418 __releases(&clp->cl_lock)
2419 {
2420 struct nfs4_client *clp = s->private;
2421
2422 spin_unlock(&clp->cl_lock);
2423 }
2424
nfs4_show_fname(struct seq_file * s,struct nfsd_file * f)2425 static void nfs4_show_fname(struct seq_file *s, struct nfsd_file *f)
2426 {
2427 seq_printf(s, "filename: \"%pD2\"", f->nf_file);
2428 }
2429
nfs4_show_superblock(struct seq_file * s,struct nfsd_file * f)2430 static void nfs4_show_superblock(struct seq_file *s, struct nfsd_file *f)
2431 {
2432 struct inode *inode = f->nf_inode;
2433
2434 seq_printf(s, "superblock: \"%02x:%02x:%ld\"",
2435 MAJOR(inode->i_sb->s_dev),
2436 MINOR(inode->i_sb->s_dev),
2437 inode->i_ino);
2438 }
2439
nfs4_show_owner(struct seq_file * s,struct nfs4_stateowner * oo)2440 static void nfs4_show_owner(struct seq_file *s, struct nfs4_stateowner *oo)
2441 {
2442 seq_printf(s, "owner: ");
2443 seq_quote_mem(s, oo->so_owner.data, oo->so_owner.len);
2444 }
2445
nfs4_show_stateid(struct seq_file * s,stateid_t * stid)2446 static void nfs4_show_stateid(struct seq_file *s, stateid_t *stid)
2447 {
2448 seq_printf(s, "0x%.8x", stid->si_generation);
2449 seq_printf(s, "%12phN", &stid->si_opaque);
2450 }
2451
nfs4_show_open(struct seq_file * s,struct nfs4_stid * st)2452 static int nfs4_show_open(struct seq_file *s, struct nfs4_stid *st)
2453 {
2454 struct nfs4_ol_stateid *ols;
2455 struct nfs4_file *nf;
2456 struct nfsd_file *file;
2457 struct nfs4_stateowner *oo;
2458 unsigned int access, deny;
2459
2460 if (st->sc_type != NFS4_OPEN_STID && st->sc_type != NFS4_LOCK_STID)
2461 return 0; /* XXX: or SEQ_SKIP? */
2462 ols = openlockstateid(st);
2463 oo = ols->st_stateowner;
2464 nf = st->sc_file;
2465 file = find_any_file(nf);
2466 if (!file)
2467 return 0;
2468
2469 seq_printf(s, "- ");
2470 nfs4_show_stateid(s, &st->sc_stateid);
2471 seq_printf(s, ": { type: open, ");
2472
2473 access = bmap_to_share_mode(ols->st_access_bmap);
2474 deny = bmap_to_share_mode(ols->st_deny_bmap);
2475
2476 seq_printf(s, "access: %s%s, ",
2477 access & NFS4_SHARE_ACCESS_READ ? "r" : "-",
2478 access & NFS4_SHARE_ACCESS_WRITE ? "w" : "-");
2479 seq_printf(s, "deny: %s%s, ",
2480 deny & NFS4_SHARE_ACCESS_READ ? "r" : "-",
2481 deny & NFS4_SHARE_ACCESS_WRITE ? "w" : "-");
2482
2483 nfs4_show_superblock(s, file);
2484 seq_printf(s, ", ");
2485 nfs4_show_fname(s, file);
2486 seq_printf(s, ", ");
2487 nfs4_show_owner(s, oo);
2488 seq_printf(s, " }\n");
2489 nfsd_file_put(file);
2490
2491 return 0;
2492 }
2493
nfs4_show_lock(struct seq_file * s,struct nfs4_stid * st)2494 static int nfs4_show_lock(struct seq_file *s, struct nfs4_stid *st)
2495 {
2496 struct nfs4_ol_stateid *ols;
2497 struct nfs4_file *nf;
2498 struct nfsd_file *file;
2499 struct nfs4_stateowner *oo;
2500
2501 ols = openlockstateid(st);
2502 oo = ols->st_stateowner;
2503 nf = st->sc_file;
2504 file = find_any_file(nf);
2505 if (!file)
2506 return 0;
2507
2508 seq_printf(s, "- ");
2509 nfs4_show_stateid(s, &st->sc_stateid);
2510 seq_printf(s, ": { type: lock, ");
2511
2512 /*
2513 * Note: a lock stateid isn't really the same thing as a lock,
2514 * it's the locking state held by one owner on a file, and there
2515 * may be multiple (or no) lock ranges associated with it.
2516 * (Same for the matter is true of open stateids.)
2517 */
2518
2519 nfs4_show_superblock(s, file);
2520 /* XXX: open stateid? */
2521 seq_printf(s, ", ");
2522 nfs4_show_fname(s, file);
2523 seq_printf(s, ", ");
2524 nfs4_show_owner(s, oo);
2525 seq_printf(s, " }\n");
2526 nfsd_file_put(file);
2527
2528 return 0;
2529 }
2530
nfs4_show_deleg(struct seq_file * s,struct nfs4_stid * st)2531 static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st)
2532 {
2533 struct nfs4_delegation *ds;
2534 struct nfs4_file *nf;
2535 struct nfsd_file *file;
2536
2537 ds = delegstateid(st);
2538 nf = st->sc_file;
2539 file = find_deleg_file(nf);
2540 if (!file)
2541 return 0;
2542
2543 seq_printf(s, "- ");
2544 nfs4_show_stateid(s, &st->sc_stateid);
2545 seq_printf(s, ": { type: deleg, ");
2546
2547 /* Kinda dead code as long as we only support read delegs: */
2548 seq_printf(s, "access: %s, ",
2549 ds->dl_type == NFS4_OPEN_DELEGATE_READ ? "r" : "w");
2550
2551 /* XXX: lease time, whether it's being recalled. */
2552
2553 nfs4_show_superblock(s, file);
2554 seq_printf(s, ", ");
2555 nfs4_show_fname(s, file);
2556 seq_printf(s, " }\n");
2557 nfsd_file_put(file);
2558
2559 return 0;
2560 }
2561
nfs4_show_layout(struct seq_file * s,struct nfs4_stid * st)2562 static int nfs4_show_layout(struct seq_file *s, struct nfs4_stid *st)
2563 {
2564 struct nfs4_layout_stateid *ls;
2565 struct nfsd_file *file;
2566
2567 ls = container_of(st, struct nfs4_layout_stateid, ls_stid);
2568 file = ls->ls_file;
2569
2570 seq_printf(s, "- ");
2571 nfs4_show_stateid(s, &st->sc_stateid);
2572 seq_printf(s, ": { type: layout, ");
2573
2574 /* XXX: What else would be useful? */
2575
2576 nfs4_show_superblock(s, file);
2577 seq_printf(s, ", ");
2578 nfs4_show_fname(s, file);
2579 seq_printf(s, " }\n");
2580
2581 return 0;
2582 }
2583
states_show(struct seq_file * s,void * v)2584 static int states_show(struct seq_file *s, void *v)
2585 {
2586 struct nfs4_stid *st = v;
2587
2588 switch (st->sc_type) {
2589 case NFS4_OPEN_STID:
2590 return nfs4_show_open(s, st);
2591 case NFS4_LOCK_STID:
2592 return nfs4_show_lock(s, st);
2593 case NFS4_DELEG_STID:
2594 return nfs4_show_deleg(s, st);
2595 case NFS4_LAYOUT_STID:
2596 return nfs4_show_layout(s, st);
2597 default:
2598 return 0; /* XXX: or SEQ_SKIP? */
2599 }
2600 /* XXX: copy stateids? */
2601 }
2602
2603 static struct seq_operations states_seq_ops = {
2604 .start = states_start,
2605 .next = states_next,
2606 .stop = states_stop,
2607 .show = states_show
2608 };
2609
client_states_open(struct inode * inode,struct file * file)2610 static int client_states_open(struct inode *inode, struct file *file)
2611 {
2612 struct seq_file *s;
2613 struct nfs4_client *clp;
2614 int ret;
2615
2616 clp = get_nfsdfs_clp(inode);
2617 if (!clp)
2618 return -ENXIO;
2619
2620 ret = seq_open(file, &states_seq_ops);
2621 if (ret)
2622 return ret;
2623 s = file->private_data;
2624 s->private = clp;
2625 return 0;
2626 }
2627
client_opens_release(struct inode * inode,struct file * file)2628 static int client_opens_release(struct inode *inode, struct file *file)
2629 {
2630 struct seq_file *m = file->private_data;
2631 struct nfs4_client *clp = m->private;
2632
2633 /* XXX: alternatively, we could get/drop in seq start/stop */
2634 drop_client(clp);
2635 return 0;
2636 }
2637
2638 static const struct file_operations client_states_fops = {
2639 .open = client_states_open,
2640 .read = seq_read,
2641 .llseek = seq_lseek,
2642 .release = client_opens_release,
2643 };
2644
2645 /*
2646 * Normally we refuse to destroy clients that are in use, but here the
2647 * administrator is telling us to just do it. We also want to wait
2648 * so the caller has a guarantee that the client's locks are gone by
2649 * the time the write returns:
2650 */
force_expire_client(struct nfs4_client * clp)2651 static void force_expire_client(struct nfs4_client *clp)
2652 {
2653 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2654 bool already_expired;
2655
2656 spin_lock(&nn->client_lock);
2657 clp->cl_time = 0;
2658 spin_unlock(&nn->client_lock);
2659
2660 wait_event(expiry_wq, atomic_read(&clp->cl_rpc_users) == 0);
2661 spin_lock(&nn->client_lock);
2662 already_expired = list_empty(&clp->cl_lru);
2663 if (!already_expired)
2664 unhash_client_locked(clp);
2665 spin_unlock(&nn->client_lock);
2666
2667 if (!already_expired)
2668 expire_client(clp);
2669 else
2670 wait_event(expiry_wq, clp->cl_nfsd_dentry == NULL);
2671 }
2672
client_ctl_write(struct file * file,const char __user * buf,size_t size,loff_t * pos)2673 static ssize_t client_ctl_write(struct file *file, const char __user *buf,
2674 size_t size, loff_t *pos)
2675 {
2676 char *data;
2677 struct nfs4_client *clp;
2678
2679 data = simple_transaction_get(file, buf, size);
2680 if (IS_ERR(data))
2681 return PTR_ERR(data);
2682 if (size != 7 || 0 != memcmp(data, "expire\n", 7))
2683 return -EINVAL;
2684 clp = get_nfsdfs_clp(file_inode(file));
2685 if (!clp)
2686 return -ENXIO;
2687 force_expire_client(clp);
2688 drop_client(clp);
2689 return 7;
2690 }
2691
2692 static const struct file_operations client_ctl_fops = {
2693 .write = client_ctl_write,
2694 .release = simple_transaction_release,
2695 };
2696
2697 static const struct tree_descr client_files[] = {
2698 [0] = {"info", &client_info_fops, S_IRUSR},
2699 [1] = {"states", &client_states_fops, S_IRUSR},
2700 [2] = {"ctl", &client_ctl_fops, S_IWUSR},
2701 [3] = {""},
2702 };
2703
create_client(struct xdr_netobj name,struct svc_rqst * rqstp,nfs4_verifier * verf)2704 static struct nfs4_client *create_client(struct xdr_netobj name,
2705 struct svc_rqst *rqstp, nfs4_verifier *verf)
2706 {
2707 struct nfs4_client *clp;
2708 struct sockaddr *sa = svc_addr(rqstp);
2709 int ret;
2710 struct net *net = SVC_NET(rqstp);
2711 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2712
2713 clp = alloc_client(name);
2714 if (clp == NULL)
2715 return NULL;
2716
2717 ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred);
2718 if (ret) {
2719 free_client(clp);
2720 return NULL;
2721 }
2722 gen_clid(clp, nn);
2723 kref_init(&clp->cl_nfsdfs.cl_ref);
2724 nfsd4_init_cb(&clp->cl_cb_null, clp, NULL, NFSPROC4_CLNT_CB_NULL);
2725 clp->cl_time = ktime_get_boottime_seconds();
2726 clear_bit(0, &clp->cl_cb_slot_busy);
2727 copy_verf(clp, verf);
2728 memcpy(&clp->cl_addr, sa, sizeof(struct sockaddr_storage));
2729 clp->cl_cb_session = NULL;
2730 clp->net = net;
2731 clp->cl_nfsd_dentry = nfsd_client_mkdir(nn, &clp->cl_nfsdfs,
2732 clp->cl_clientid.cl_id - nn->clientid_base,
2733 client_files);
2734 if (!clp->cl_nfsd_dentry) {
2735 free_client(clp);
2736 return NULL;
2737 }
2738 return clp;
2739 }
2740
2741 static void
add_clp_to_name_tree(struct nfs4_client * new_clp,struct rb_root * root)2742 add_clp_to_name_tree(struct nfs4_client *new_clp, struct rb_root *root)
2743 {
2744 struct rb_node **new = &(root->rb_node), *parent = NULL;
2745 struct nfs4_client *clp;
2746
2747 while (*new) {
2748 clp = rb_entry(*new, struct nfs4_client, cl_namenode);
2749 parent = *new;
2750
2751 if (compare_blob(&clp->cl_name, &new_clp->cl_name) > 0)
2752 new = &((*new)->rb_left);
2753 else
2754 new = &((*new)->rb_right);
2755 }
2756
2757 rb_link_node(&new_clp->cl_namenode, parent, new);
2758 rb_insert_color(&new_clp->cl_namenode, root);
2759 }
2760
2761 static struct nfs4_client *
find_clp_in_name_tree(struct xdr_netobj * name,struct rb_root * root)2762 find_clp_in_name_tree(struct xdr_netobj *name, struct rb_root *root)
2763 {
2764 int cmp;
2765 struct rb_node *node = root->rb_node;
2766 struct nfs4_client *clp;
2767
2768 while (node) {
2769 clp = rb_entry(node, struct nfs4_client, cl_namenode);
2770 cmp = compare_blob(&clp->cl_name, name);
2771 if (cmp > 0)
2772 node = node->rb_left;
2773 else if (cmp < 0)
2774 node = node->rb_right;
2775 else
2776 return clp;
2777 }
2778 return NULL;
2779 }
2780
2781 static void
add_to_unconfirmed(struct nfs4_client * clp)2782 add_to_unconfirmed(struct nfs4_client *clp)
2783 {
2784 unsigned int idhashval;
2785 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2786
2787 lockdep_assert_held(&nn->client_lock);
2788
2789 clear_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
2790 add_clp_to_name_tree(clp, &nn->unconf_name_tree);
2791 idhashval = clientid_hashval(clp->cl_clientid.cl_id);
2792 list_add(&clp->cl_idhash, &nn->unconf_id_hashtbl[idhashval]);
2793 renew_client_locked(clp);
2794 }
2795
2796 static void
move_to_confirmed(struct nfs4_client * clp)2797 move_to_confirmed(struct nfs4_client *clp)
2798 {
2799 unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id);
2800 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2801
2802 lockdep_assert_held(&nn->client_lock);
2803
2804 dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp);
2805 list_move(&clp->cl_idhash, &nn->conf_id_hashtbl[idhashval]);
2806 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
2807 add_clp_to_name_tree(clp, &nn->conf_name_tree);
2808 set_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
2809 renew_client_locked(clp);
2810 }
2811
2812 static struct nfs4_client *
find_client_in_id_table(struct list_head * tbl,clientid_t * clid,bool sessions)2813 find_client_in_id_table(struct list_head *tbl, clientid_t *clid, bool sessions)
2814 {
2815 struct nfs4_client *clp;
2816 unsigned int idhashval = clientid_hashval(clid->cl_id);
2817
2818 list_for_each_entry(clp, &tbl[idhashval], cl_idhash) {
2819 if (same_clid(&clp->cl_clientid, clid)) {
2820 if ((bool)clp->cl_minorversion != sessions)
2821 return NULL;
2822 renew_client_locked(clp);
2823 return clp;
2824 }
2825 }
2826 return NULL;
2827 }
2828
2829 static struct nfs4_client *
find_confirmed_client(clientid_t * clid,bool sessions,struct nfsd_net * nn)2830 find_confirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
2831 {
2832 struct list_head *tbl = nn->conf_id_hashtbl;
2833
2834 lockdep_assert_held(&nn->client_lock);
2835 return find_client_in_id_table(tbl, clid, sessions);
2836 }
2837
2838 static struct nfs4_client *
find_unconfirmed_client(clientid_t * clid,bool sessions,struct nfsd_net * nn)2839 find_unconfirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
2840 {
2841 struct list_head *tbl = nn->unconf_id_hashtbl;
2842
2843 lockdep_assert_held(&nn->client_lock);
2844 return find_client_in_id_table(tbl, clid, sessions);
2845 }
2846
clp_used_exchangeid(struct nfs4_client * clp)2847 static bool clp_used_exchangeid(struct nfs4_client *clp)
2848 {
2849 return clp->cl_exchange_flags != 0;
2850 }
2851
2852 static struct nfs4_client *
find_confirmed_client_by_name(struct xdr_netobj * name,struct nfsd_net * nn)2853 find_confirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
2854 {
2855 lockdep_assert_held(&nn->client_lock);
2856 return find_clp_in_name_tree(name, &nn->conf_name_tree);
2857 }
2858
2859 static struct nfs4_client *
find_unconfirmed_client_by_name(struct xdr_netobj * name,struct nfsd_net * nn)2860 find_unconfirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
2861 {
2862 lockdep_assert_held(&nn->client_lock);
2863 return find_clp_in_name_tree(name, &nn->unconf_name_tree);
2864 }
2865
2866 static void
gen_callback(struct nfs4_client * clp,struct nfsd4_setclientid * se,struct svc_rqst * rqstp)2867 gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp)
2868 {
2869 struct nfs4_cb_conn *conn = &clp->cl_cb_conn;
2870 struct sockaddr *sa = svc_addr(rqstp);
2871 u32 scopeid = rpc_get_scope_id(sa);
2872 unsigned short expected_family;
2873
2874 /* Currently, we only support tcp and tcp6 for the callback channel */
2875 if (se->se_callback_netid_len == 3 &&
2876 !memcmp(se->se_callback_netid_val, "tcp", 3))
2877 expected_family = AF_INET;
2878 else if (se->se_callback_netid_len == 4 &&
2879 !memcmp(se->se_callback_netid_val, "tcp6", 4))
2880 expected_family = AF_INET6;
2881 else
2882 goto out_err;
2883
2884 conn->cb_addrlen = rpc_uaddr2sockaddr(clp->net, se->se_callback_addr_val,
2885 se->se_callback_addr_len,
2886 (struct sockaddr *)&conn->cb_addr,
2887 sizeof(conn->cb_addr));
2888
2889 if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family)
2890 goto out_err;
2891
2892 if (conn->cb_addr.ss_family == AF_INET6)
2893 ((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid;
2894
2895 conn->cb_prog = se->se_callback_prog;
2896 conn->cb_ident = se->se_callback_ident;
2897 memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen);
2898 trace_nfsd_cb_args(clp, conn);
2899 return;
2900 out_err:
2901 conn->cb_addr.ss_family = AF_UNSPEC;
2902 conn->cb_addrlen = 0;
2903 trace_nfsd_cb_nodelegs(clp);
2904 return;
2905 }
2906
2907 /*
2908 * Cache a reply. nfsd4_check_resp_size() has bounded the cache size.
2909 */
2910 static void
nfsd4_store_cache_entry(struct nfsd4_compoundres * resp)2911 nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
2912 {
2913 struct xdr_buf *buf = resp->xdr.buf;
2914 struct nfsd4_slot *slot = resp->cstate.slot;
2915 unsigned int base;
2916
2917 dprintk("--> %s slot %p\n", __func__, slot);
2918
2919 slot->sl_flags |= NFSD4_SLOT_INITIALIZED;
2920 slot->sl_opcnt = resp->opcnt;
2921 slot->sl_status = resp->cstate.status;
2922 free_svc_cred(&slot->sl_cred);
2923 copy_cred(&slot->sl_cred, &resp->rqstp->rq_cred);
2924
2925 if (!nfsd4_cache_this(resp)) {
2926 slot->sl_flags &= ~NFSD4_SLOT_CACHED;
2927 return;
2928 }
2929 slot->sl_flags |= NFSD4_SLOT_CACHED;
2930
2931 base = resp->cstate.data_offset;
2932 slot->sl_datalen = buf->len - base;
2933 if (read_bytes_from_xdr_buf(buf, base, slot->sl_data, slot->sl_datalen))
2934 WARN(1, "%s: sessions DRC could not cache compound\n",
2935 __func__);
2936 return;
2937 }
2938
2939 /*
2940 * Encode the replay sequence operation from the slot values.
2941 * If cachethis is FALSE encode the uncached rep error on the next
2942 * operation which sets resp->p and increments resp->opcnt for
2943 * nfs4svc_encode_compoundres.
2944 *
2945 */
2946 static __be32
nfsd4_enc_sequence_replay(struct nfsd4_compoundargs * args,struct nfsd4_compoundres * resp)2947 nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args,
2948 struct nfsd4_compoundres *resp)
2949 {
2950 struct nfsd4_op *op;
2951 struct nfsd4_slot *slot = resp->cstate.slot;
2952
2953 /* Encode the replayed sequence operation */
2954 op = &args->ops[resp->opcnt - 1];
2955 nfsd4_encode_operation(resp, op);
2956
2957 if (slot->sl_flags & NFSD4_SLOT_CACHED)
2958 return op->status;
2959 if (args->opcnt == 1) {
2960 /*
2961 * The original operation wasn't a solo sequence--we
2962 * always cache those--so this retry must not match the
2963 * original:
2964 */
2965 op->status = nfserr_seq_false_retry;
2966 } else {
2967 op = &args->ops[resp->opcnt++];
2968 op->status = nfserr_retry_uncached_rep;
2969 nfsd4_encode_operation(resp, op);
2970 }
2971 return op->status;
2972 }
2973
2974 /*
2975 * The sequence operation is not cached because we can use the slot and
2976 * session values.
2977 */
2978 static __be32
nfsd4_replay_cache_entry(struct nfsd4_compoundres * resp,struct nfsd4_sequence * seq)2979 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
2980 struct nfsd4_sequence *seq)
2981 {
2982 struct nfsd4_slot *slot = resp->cstate.slot;
2983 struct xdr_stream *xdr = &resp->xdr;
2984 __be32 *p;
2985 __be32 status;
2986
2987 dprintk("--> %s slot %p\n", __func__, slot);
2988
2989 status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp);
2990 if (status)
2991 return status;
2992
2993 p = xdr_reserve_space(xdr, slot->sl_datalen);
2994 if (!p) {
2995 WARN_ON_ONCE(1);
2996 return nfserr_serverfault;
2997 }
2998 xdr_encode_opaque_fixed(p, slot->sl_data, slot->sl_datalen);
2999 xdr_commit_encode(xdr);
3000
3001 resp->opcnt = slot->sl_opcnt;
3002 return slot->sl_status;
3003 }
3004
3005 /*
3006 * Set the exchange_id flags returned by the server.
3007 */
3008 static void
nfsd4_set_ex_flags(struct nfs4_client * new,struct nfsd4_exchange_id * clid)3009 nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid)
3010 {
3011 #ifdef CONFIG_NFSD_PNFS
3012 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_PNFS_MDS;
3013 #else
3014 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS;
3015 #endif
3016
3017 /* Referrals are supported, Migration is not. */
3018 new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER;
3019
3020 /* set the wire flags to return to client. */
3021 clid->flags = new->cl_exchange_flags;
3022 }
3023
client_has_openowners(struct nfs4_client * clp)3024 static bool client_has_openowners(struct nfs4_client *clp)
3025 {
3026 struct nfs4_openowner *oo;
3027
3028 list_for_each_entry(oo, &clp->cl_openowners, oo_perclient) {
3029 if (!list_empty(&oo->oo_owner.so_stateids))
3030 return true;
3031 }
3032 return false;
3033 }
3034
client_has_state(struct nfs4_client * clp)3035 static bool client_has_state(struct nfs4_client *clp)
3036 {
3037 return client_has_openowners(clp)
3038 #ifdef CONFIG_NFSD_PNFS
3039 || !list_empty(&clp->cl_lo_states)
3040 #endif
3041 || !list_empty(&clp->cl_delegations)
3042 || !list_empty(&clp->cl_sessions)
3043 || !list_empty(&clp->async_copies);
3044 }
3045
copy_impl_id(struct nfs4_client * clp,struct nfsd4_exchange_id * exid)3046 static __be32 copy_impl_id(struct nfs4_client *clp,
3047 struct nfsd4_exchange_id *exid)
3048 {
3049 if (!exid->nii_domain.data)
3050 return 0;
3051 xdr_netobj_dup(&clp->cl_nii_domain, &exid->nii_domain, GFP_KERNEL);
3052 if (!clp->cl_nii_domain.data)
3053 return nfserr_jukebox;
3054 xdr_netobj_dup(&clp->cl_nii_name, &exid->nii_name, GFP_KERNEL);
3055 if (!clp->cl_nii_name.data)
3056 return nfserr_jukebox;
3057 clp->cl_nii_time = exid->nii_time;
3058 return 0;
3059 }
3060
3061 __be32
nfsd4_exchange_id(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)3062 nfsd4_exchange_id(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3063 union nfsd4_op_u *u)
3064 {
3065 struct nfsd4_exchange_id *exid = &u->exchange_id;
3066 struct nfs4_client *conf, *new;
3067 struct nfs4_client *unconf = NULL;
3068 __be32 status;
3069 char addr_str[INET6_ADDRSTRLEN];
3070 nfs4_verifier verf = exid->verifier;
3071 struct sockaddr *sa = svc_addr(rqstp);
3072 bool update = exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A;
3073 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3074
3075 rpc_ntop(sa, addr_str, sizeof(addr_str));
3076 dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p "
3077 "ip_addr=%s flags %x, spa_how %d\n",
3078 __func__, rqstp, exid, exid->clname.len, exid->clname.data,
3079 addr_str, exid->flags, exid->spa_how);
3080
3081 if (exid->flags & ~EXCHGID4_FLAG_MASK_A)
3082 return nfserr_inval;
3083
3084 new = create_client(exid->clname, rqstp, &verf);
3085 if (new == NULL)
3086 return nfserr_jukebox;
3087 status = copy_impl_id(new, exid);
3088 if (status)
3089 goto out_nolock;
3090
3091 switch (exid->spa_how) {
3092 case SP4_MACH_CRED:
3093 exid->spo_must_enforce[0] = 0;
3094 exid->spo_must_enforce[1] = (
3095 1 << (OP_BIND_CONN_TO_SESSION - 32) |
3096 1 << (OP_EXCHANGE_ID - 32) |
3097 1 << (OP_CREATE_SESSION - 32) |
3098 1 << (OP_DESTROY_SESSION - 32) |
3099 1 << (OP_DESTROY_CLIENTID - 32));
3100
3101 exid->spo_must_allow[0] &= (1 << (OP_CLOSE) |
3102 1 << (OP_OPEN_DOWNGRADE) |
3103 1 << (OP_LOCKU) |
3104 1 << (OP_DELEGRETURN));
3105
3106 exid->spo_must_allow[1] &= (
3107 1 << (OP_TEST_STATEID - 32) |
3108 1 << (OP_FREE_STATEID - 32));
3109 if (!svc_rqst_integrity_protected(rqstp)) {
3110 status = nfserr_inval;
3111 goto out_nolock;
3112 }
3113 /*
3114 * Sometimes userspace doesn't give us a principal.
3115 * Which is a bug, really. Anyway, we can't enforce
3116 * MACH_CRED in that case, better to give up now:
3117 */
3118 if (!new->cl_cred.cr_principal &&
3119 !new->cl_cred.cr_raw_principal) {
3120 status = nfserr_serverfault;
3121 goto out_nolock;
3122 }
3123 new->cl_mach_cred = true;
3124 case SP4_NONE:
3125 break;
3126 default: /* checked by xdr code */
3127 WARN_ON_ONCE(1);
3128 fallthrough;
3129 case SP4_SSV:
3130 status = nfserr_encr_alg_unsupp;
3131 goto out_nolock;
3132 }
3133
3134 /* Cases below refer to rfc 5661 section 18.35.4: */
3135 spin_lock(&nn->client_lock);
3136 conf = find_confirmed_client_by_name(&exid->clname, nn);
3137 if (conf) {
3138 bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred);
3139 bool verfs_match = same_verf(&verf, &conf->cl_verifier);
3140
3141 if (update) {
3142 if (!clp_used_exchangeid(conf)) { /* buggy client */
3143 status = nfserr_inval;
3144 goto out;
3145 }
3146 if (!nfsd4_mach_creds_match(conf, rqstp)) {
3147 status = nfserr_wrong_cred;
3148 goto out;
3149 }
3150 if (!creds_match) { /* case 9 */
3151 status = nfserr_perm;
3152 goto out;
3153 }
3154 if (!verfs_match) { /* case 8 */
3155 status = nfserr_not_same;
3156 goto out;
3157 }
3158 /* case 6 */
3159 exid->flags |= EXCHGID4_FLAG_CONFIRMED_R;
3160 goto out_copy;
3161 }
3162 if (!creds_match) { /* case 3 */
3163 if (client_has_state(conf)) {
3164 status = nfserr_clid_inuse;
3165 goto out;
3166 }
3167 goto out_new;
3168 }
3169 if (verfs_match) { /* case 2 */
3170 conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
3171 goto out_copy;
3172 }
3173 /* case 5, client reboot */
3174 conf = NULL;
3175 goto out_new;
3176 }
3177
3178 if (update) { /* case 7 */
3179 status = nfserr_noent;
3180 goto out;
3181 }
3182
3183 unconf = find_unconfirmed_client_by_name(&exid->clname, nn);
3184 if (unconf) /* case 4, possible retry or client restart */
3185 unhash_client_locked(unconf);
3186
3187 /* case 1 (normal case) */
3188 out_new:
3189 if (conf) {
3190 status = mark_client_expired_locked(conf);
3191 if (status)
3192 goto out;
3193 }
3194 new->cl_minorversion = cstate->minorversion;
3195 new->cl_spo_must_allow.u.words[0] = exid->spo_must_allow[0];
3196 new->cl_spo_must_allow.u.words[1] = exid->spo_must_allow[1];
3197
3198 add_to_unconfirmed(new);
3199 swap(new, conf);
3200 out_copy:
3201 exid->clientid.cl_boot = conf->cl_clientid.cl_boot;
3202 exid->clientid.cl_id = conf->cl_clientid.cl_id;
3203
3204 exid->seqid = conf->cl_cs_slot.sl_seqid + 1;
3205 nfsd4_set_ex_flags(conf, exid);
3206
3207 dprintk("nfsd4_exchange_id seqid %d flags %x\n",
3208 conf->cl_cs_slot.sl_seqid, conf->cl_exchange_flags);
3209 status = nfs_ok;
3210
3211 out:
3212 spin_unlock(&nn->client_lock);
3213 out_nolock:
3214 if (new)
3215 expire_client(new);
3216 if (unconf)
3217 expire_client(unconf);
3218 return status;
3219 }
3220
3221 static __be32
check_slot_seqid(u32 seqid,u32 slot_seqid,int slot_inuse)3222 check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse)
3223 {
3224 dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid,
3225 slot_seqid);
3226
3227 /* The slot is in use, and no response has been sent. */
3228 if (slot_inuse) {
3229 if (seqid == slot_seqid)
3230 return nfserr_jukebox;
3231 else
3232 return nfserr_seq_misordered;
3233 }
3234 /* Note unsigned 32-bit arithmetic handles wraparound: */
3235 if (likely(seqid == slot_seqid + 1))
3236 return nfs_ok;
3237 if (seqid == slot_seqid)
3238 return nfserr_replay_cache;
3239 return nfserr_seq_misordered;
3240 }
3241
3242 /*
3243 * Cache the create session result into the create session single DRC
3244 * slot cache by saving the xdr structure. sl_seqid has been set.
3245 * Do this for solo or embedded create session operations.
3246 */
3247 static void
nfsd4_cache_create_session(struct nfsd4_create_session * cr_ses,struct nfsd4_clid_slot * slot,__be32 nfserr)3248 nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses,
3249 struct nfsd4_clid_slot *slot, __be32 nfserr)
3250 {
3251 slot->sl_status = nfserr;
3252 memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses));
3253 }
3254
3255 static __be32
nfsd4_replay_create_session(struct nfsd4_create_session * cr_ses,struct nfsd4_clid_slot * slot)3256 nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses,
3257 struct nfsd4_clid_slot *slot)
3258 {
3259 memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses));
3260 return slot->sl_status;
3261 }
3262
3263 #define NFSD_MIN_REQ_HDR_SEQ_SZ ((\
3264 2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \
3265 1 + /* MIN tag is length with zero, only length */ \
3266 3 + /* version, opcount, opcode */ \
3267 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
3268 /* seqid, slotID, slotID, cache */ \
3269 4 ) * sizeof(__be32))
3270
3271 #define NFSD_MIN_RESP_HDR_SEQ_SZ ((\
3272 2 + /* verifier: AUTH_NULL, length 0 */\
3273 1 + /* status */ \
3274 1 + /* MIN tag is length with zero, only length */ \
3275 3 + /* opcount, opcode, opstatus*/ \
3276 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
3277 /* seqid, slotID, slotID, slotID, status */ \
3278 5 ) * sizeof(__be32))
3279
check_forechannel_attrs(struct nfsd4_channel_attrs * ca,struct nfsd_net * nn)3280 static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
3281 {
3282 u32 maxrpc = nn->nfsd_serv->sv_max_mesg;
3283
3284 if (ca->maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ)
3285 return nfserr_toosmall;
3286 if (ca->maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ)
3287 return nfserr_toosmall;
3288 ca->headerpadsz = 0;
3289 ca->maxreq_sz = min_t(u32, ca->maxreq_sz, maxrpc);
3290 ca->maxresp_sz = min_t(u32, ca->maxresp_sz, maxrpc);
3291 ca->maxops = min_t(u32, ca->maxops, NFSD_MAX_OPS_PER_COMPOUND);
3292 ca->maxresp_cached = min_t(u32, ca->maxresp_cached,
3293 NFSD_SLOT_CACHE_SIZE + NFSD_MIN_HDR_SEQ_SZ);
3294 ca->maxreqs = min_t(u32, ca->maxreqs, NFSD_MAX_SLOTS_PER_SESSION);
3295 /*
3296 * Note decreasing slot size below client's request may make it
3297 * difficult for client to function correctly, whereas
3298 * decreasing the number of slots will (just?) affect
3299 * performance. When short on memory we therefore prefer to
3300 * decrease number of slots instead of their size. Clients that
3301 * request larger slots than they need will get poor results:
3302 * Note that we always allow at least one slot, because our
3303 * accounting is soft and provides no guarantees either way.
3304 */
3305 ca->maxreqs = nfsd4_get_drc_mem(ca, nn);
3306
3307 return nfs_ok;
3308 }
3309
3310 /*
3311 * Server's NFSv4.1 backchannel support is AUTH_SYS-only for now.
3312 * These are based on similar macros in linux/sunrpc/msg_prot.h .
3313 */
3314 #define RPC_MAX_HEADER_WITH_AUTH_SYS \
3315 (RPC_CALLHDRSIZE + 2 * (2 + UNX_CALLSLACK))
3316
3317 #define RPC_MAX_REPHEADER_WITH_AUTH_SYS \
3318 (RPC_REPHDRSIZE + (2 + NUL_REPLYSLACK))
3319
3320 #define NFSD_CB_MAX_REQ_SZ ((NFS4_enc_cb_recall_sz + \
3321 RPC_MAX_HEADER_WITH_AUTH_SYS) * sizeof(__be32))
3322 #define NFSD_CB_MAX_RESP_SZ ((NFS4_dec_cb_recall_sz + \
3323 RPC_MAX_REPHEADER_WITH_AUTH_SYS) * \
3324 sizeof(__be32))
3325
check_backchannel_attrs(struct nfsd4_channel_attrs * ca)3326 static __be32 check_backchannel_attrs(struct nfsd4_channel_attrs *ca)
3327 {
3328 ca->headerpadsz = 0;
3329
3330 if (ca->maxreq_sz < NFSD_CB_MAX_REQ_SZ)
3331 return nfserr_toosmall;
3332 if (ca->maxresp_sz < NFSD_CB_MAX_RESP_SZ)
3333 return nfserr_toosmall;
3334 ca->maxresp_cached = 0;
3335 if (ca->maxops < 2)
3336 return nfserr_toosmall;
3337
3338 return nfs_ok;
3339 }
3340
nfsd4_check_cb_sec(struct nfsd4_cb_sec * cbs)3341 static __be32 nfsd4_check_cb_sec(struct nfsd4_cb_sec *cbs)
3342 {
3343 switch (cbs->flavor) {
3344 case RPC_AUTH_NULL:
3345 case RPC_AUTH_UNIX:
3346 return nfs_ok;
3347 default:
3348 /*
3349 * GSS case: the spec doesn't allow us to return this
3350 * error. But it also doesn't allow us not to support
3351 * GSS.
3352 * I'd rather this fail hard than return some error the
3353 * client might think it can already handle:
3354 */
3355 return nfserr_encr_alg_unsupp;
3356 }
3357 }
3358
3359 __be32
nfsd4_create_session(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)3360 nfsd4_create_session(struct svc_rqst *rqstp,
3361 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
3362 {
3363 struct nfsd4_create_session *cr_ses = &u->create_session;
3364 struct sockaddr *sa = svc_addr(rqstp);
3365 struct nfs4_client *conf, *unconf;
3366 struct nfs4_client *old = NULL;
3367 struct nfsd4_session *new;
3368 struct nfsd4_conn *conn;
3369 struct nfsd4_clid_slot *cs_slot = NULL;
3370 __be32 status = 0;
3371 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3372
3373 if (cr_ses->flags & ~SESSION4_FLAG_MASK_A)
3374 return nfserr_inval;
3375 status = nfsd4_check_cb_sec(&cr_ses->cb_sec);
3376 if (status)
3377 return status;
3378 status = check_forechannel_attrs(&cr_ses->fore_channel, nn);
3379 if (status)
3380 return status;
3381 status = check_backchannel_attrs(&cr_ses->back_channel);
3382 if (status)
3383 goto out_release_drc_mem;
3384 status = nfserr_jukebox;
3385 new = alloc_session(&cr_ses->fore_channel, &cr_ses->back_channel);
3386 if (!new)
3387 goto out_release_drc_mem;
3388 conn = alloc_conn_from_crses(rqstp, cr_ses);
3389 if (!conn)
3390 goto out_free_session;
3391
3392 spin_lock(&nn->client_lock);
3393 unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn);
3394 conf = find_confirmed_client(&cr_ses->clientid, true, nn);
3395 WARN_ON_ONCE(conf && unconf);
3396
3397 if (conf) {
3398 status = nfserr_wrong_cred;
3399 if (!nfsd4_mach_creds_match(conf, rqstp))
3400 goto out_free_conn;
3401 cs_slot = &conf->cl_cs_slot;
3402 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
3403 if (status) {
3404 if (status == nfserr_replay_cache)
3405 status = nfsd4_replay_create_session(cr_ses, cs_slot);
3406 goto out_free_conn;
3407 }
3408 } else if (unconf) {
3409 if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) ||
3410 !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) {
3411 status = nfserr_clid_inuse;
3412 goto out_free_conn;
3413 }
3414 status = nfserr_wrong_cred;
3415 if (!nfsd4_mach_creds_match(unconf, rqstp))
3416 goto out_free_conn;
3417 cs_slot = &unconf->cl_cs_slot;
3418 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
3419 if (status) {
3420 /* an unconfirmed replay returns misordered */
3421 status = nfserr_seq_misordered;
3422 goto out_free_conn;
3423 }
3424 old = find_confirmed_client_by_name(&unconf->cl_name, nn);
3425 if (old) {
3426 status = mark_client_expired_locked(old);
3427 if (status) {
3428 old = NULL;
3429 goto out_free_conn;
3430 }
3431 }
3432 move_to_confirmed(unconf);
3433 conf = unconf;
3434 } else {
3435 status = nfserr_stale_clientid;
3436 goto out_free_conn;
3437 }
3438 status = nfs_ok;
3439 /* Persistent sessions are not supported */
3440 cr_ses->flags &= ~SESSION4_PERSIST;
3441 /* Upshifting from TCP to RDMA is not supported */
3442 cr_ses->flags &= ~SESSION4_RDMA;
3443
3444 init_session(rqstp, new, conf, cr_ses);
3445 nfsd4_get_session_locked(new);
3446
3447 memcpy(cr_ses->sessionid.data, new->se_sessionid.data,
3448 NFS4_MAX_SESSIONID_LEN);
3449 cs_slot->sl_seqid++;
3450 cr_ses->seqid = cs_slot->sl_seqid;
3451
3452 /* cache solo and embedded create sessions under the client_lock */
3453 nfsd4_cache_create_session(cr_ses, cs_slot, status);
3454 spin_unlock(&nn->client_lock);
3455 /* init connection and backchannel */
3456 nfsd4_init_conn(rqstp, conn, new);
3457 nfsd4_put_session(new);
3458 if (old)
3459 expire_client(old);
3460 return status;
3461 out_free_conn:
3462 spin_unlock(&nn->client_lock);
3463 free_conn(conn);
3464 if (old)
3465 expire_client(old);
3466 out_free_session:
3467 __free_session(new);
3468 out_release_drc_mem:
3469 nfsd4_put_drc_mem(&cr_ses->fore_channel);
3470 return status;
3471 }
3472
nfsd4_map_bcts_dir(u32 * dir)3473 static __be32 nfsd4_map_bcts_dir(u32 *dir)
3474 {
3475 switch (*dir) {
3476 case NFS4_CDFC4_FORE:
3477 case NFS4_CDFC4_BACK:
3478 return nfs_ok;
3479 case NFS4_CDFC4_FORE_OR_BOTH:
3480 case NFS4_CDFC4_BACK_OR_BOTH:
3481 *dir = NFS4_CDFC4_BOTH;
3482 return nfs_ok;
3483 }
3484 return nfserr_inval;
3485 }
3486
nfsd4_backchannel_ctl(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)3487 __be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp,
3488 struct nfsd4_compound_state *cstate,
3489 union nfsd4_op_u *u)
3490 {
3491 struct nfsd4_backchannel_ctl *bc = &u->backchannel_ctl;
3492 struct nfsd4_session *session = cstate->session;
3493 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3494 __be32 status;
3495
3496 status = nfsd4_check_cb_sec(&bc->bc_cb_sec);
3497 if (status)
3498 return status;
3499 spin_lock(&nn->client_lock);
3500 session->se_cb_prog = bc->bc_cb_program;
3501 session->se_cb_sec = bc->bc_cb_sec;
3502 spin_unlock(&nn->client_lock);
3503
3504 nfsd4_probe_callback(session->se_client);
3505
3506 return nfs_ok;
3507 }
3508
__nfsd4_find_conn(struct svc_xprt * xpt,struct nfsd4_session * s)3509 static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s)
3510 {
3511 struct nfsd4_conn *c;
3512
3513 list_for_each_entry(c, &s->se_conns, cn_persession) {
3514 if (c->cn_xprt == xpt) {
3515 return c;
3516 }
3517 }
3518 return NULL;
3519 }
3520
nfsd4_match_existing_connection(struct svc_rqst * rqst,struct nfsd4_session * session,u32 req,struct nfsd4_conn ** conn)3521 static __be32 nfsd4_match_existing_connection(struct svc_rqst *rqst,
3522 struct nfsd4_session *session, u32 req, struct nfsd4_conn **conn)
3523 {
3524 struct nfs4_client *clp = session->se_client;
3525 struct svc_xprt *xpt = rqst->rq_xprt;
3526 struct nfsd4_conn *c;
3527 __be32 status;
3528
3529 /* Following the last paragraph of RFC 5661 Section 18.34.3: */
3530 spin_lock(&clp->cl_lock);
3531 c = __nfsd4_find_conn(xpt, session);
3532 if (!c)
3533 status = nfserr_noent;
3534 else if (req == c->cn_flags)
3535 status = nfs_ok;
3536 else if (req == NFS4_CDFC4_FORE_OR_BOTH &&
3537 c->cn_flags != NFS4_CDFC4_BACK)
3538 status = nfs_ok;
3539 else if (req == NFS4_CDFC4_BACK_OR_BOTH &&
3540 c->cn_flags != NFS4_CDFC4_FORE)
3541 status = nfs_ok;
3542 else
3543 status = nfserr_inval;
3544 spin_unlock(&clp->cl_lock);
3545 if (status == nfs_ok && conn)
3546 *conn = c;
3547 return status;
3548 }
3549
nfsd4_bind_conn_to_session(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)3550 __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
3551 struct nfsd4_compound_state *cstate,
3552 union nfsd4_op_u *u)
3553 {
3554 struct nfsd4_bind_conn_to_session *bcts = &u->bind_conn_to_session;
3555 __be32 status;
3556 struct nfsd4_conn *conn;
3557 struct nfsd4_session *session;
3558 struct net *net = SVC_NET(rqstp);
3559 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
3560
3561 if (!nfsd4_last_compound_op(rqstp))
3562 return nfserr_not_only_op;
3563 spin_lock(&nn->client_lock);
3564 session = find_in_sessionid_hashtbl(&bcts->sessionid, net, &status);
3565 spin_unlock(&nn->client_lock);
3566 if (!session)
3567 goto out_no_session;
3568 status = nfserr_wrong_cred;
3569 if (!nfsd4_mach_creds_match(session->se_client, rqstp))
3570 goto out;
3571 status = nfsd4_match_existing_connection(rqstp, session,
3572 bcts->dir, &conn);
3573 if (status == nfs_ok) {
3574 if (bcts->dir == NFS4_CDFC4_FORE_OR_BOTH ||
3575 bcts->dir == NFS4_CDFC4_BACK)
3576 conn->cn_flags |= NFS4_CDFC4_BACK;
3577 nfsd4_probe_callback(session->se_client);
3578 goto out;
3579 }
3580 if (status == nfserr_inval)
3581 goto out;
3582 status = nfsd4_map_bcts_dir(&bcts->dir);
3583 if (status)
3584 goto out;
3585 conn = alloc_conn(rqstp, bcts->dir);
3586 status = nfserr_jukebox;
3587 if (!conn)
3588 goto out;
3589 nfsd4_init_conn(rqstp, conn, session);
3590 status = nfs_ok;
3591 out:
3592 nfsd4_put_session(session);
3593 out_no_session:
3594 return status;
3595 }
3596
nfsd4_compound_in_session(struct nfsd4_compound_state * cstate,struct nfs4_sessionid * sid)3597 static bool nfsd4_compound_in_session(struct nfsd4_compound_state *cstate, struct nfs4_sessionid *sid)
3598 {
3599 if (!cstate->session)
3600 return false;
3601 return !memcmp(sid, &cstate->session->se_sessionid, sizeof(*sid));
3602 }
3603
3604 __be32
nfsd4_destroy_session(struct svc_rqst * r,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)3605 nfsd4_destroy_session(struct svc_rqst *r, struct nfsd4_compound_state *cstate,
3606 union nfsd4_op_u *u)
3607 {
3608 struct nfs4_sessionid *sessionid = &u->destroy_session.sessionid;
3609 struct nfsd4_session *ses;
3610 __be32 status;
3611 int ref_held_by_me = 0;
3612 struct net *net = SVC_NET(r);
3613 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
3614
3615 status = nfserr_not_only_op;
3616 if (nfsd4_compound_in_session(cstate, sessionid)) {
3617 if (!nfsd4_last_compound_op(r))
3618 goto out;
3619 ref_held_by_me++;
3620 }
3621 dump_sessionid(__func__, sessionid);
3622 spin_lock(&nn->client_lock);
3623 ses = find_in_sessionid_hashtbl(sessionid, net, &status);
3624 if (!ses)
3625 goto out_client_lock;
3626 status = nfserr_wrong_cred;
3627 if (!nfsd4_mach_creds_match(ses->se_client, r))
3628 goto out_put_session;
3629 status = mark_session_dead_locked(ses, 1 + ref_held_by_me);
3630 if (status)
3631 goto out_put_session;
3632 unhash_session(ses);
3633 spin_unlock(&nn->client_lock);
3634
3635 nfsd4_probe_callback_sync(ses->se_client);
3636
3637 spin_lock(&nn->client_lock);
3638 status = nfs_ok;
3639 out_put_session:
3640 nfsd4_put_session_locked(ses);
3641 out_client_lock:
3642 spin_unlock(&nn->client_lock);
3643 out:
3644 return status;
3645 }
3646
nfsd4_sequence_check_conn(struct nfsd4_conn * new,struct nfsd4_session * ses)3647 static __be32 nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses)
3648 {
3649 struct nfs4_client *clp = ses->se_client;
3650 struct nfsd4_conn *c;
3651 __be32 status = nfs_ok;
3652 int ret;
3653
3654 spin_lock(&clp->cl_lock);
3655 c = __nfsd4_find_conn(new->cn_xprt, ses);
3656 if (c)
3657 goto out_free;
3658 status = nfserr_conn_not_bound_to_session;
3659 if (clp->cl_mach_cred)
3660 goto out_free;
3661 __nfsd4_hash_conn(new, ses);
3662 spin_unlock(&clp->cl_lock);
3663 ret = nfsd4_register_conn(new);
3664 if (ret)
3665 /* oops; xprt is already down: */
3666 nfsd4_conn_lost(&new->cn_xpt_user);
3667 return nfs_ok;
3668 out_free:
3669 spin_unlock(&clp->cl_lock);
3670 free_conn(new);
3671 return status;
3672 }
3673
nfsd4_session_too_many_ops(struct svc_rqst * rqstp,struct nfsd4_session * session)3674 static bool nfsd4_session_too_many_ops(struct svc_rqst *rqstp, struct nfsd4_session *session)
3675 {
3676 struct nfsd4_compoundargs *args = rqstp->rq_argp;
3677
3678 return args->opcnt > session->se_fchannel.maxops;
3679 }
3680
nfsd4_request_too_big(struct svc_rqst * rqstp,struct nfsd4_session * session)3681 static bool nfsd4_request_too_big(struct svc_rqst *rqstp,
3682 struct nfsd4_session *session)
3683 {
3684 struct xdr_buf *xb = &rqstp->rq_arg;
3685
3686 return xb->len > session->se_fchannel.maxreq_sz;
3687 }
3688
replay_matches_cache(struct svc_rqst * rqstp,struct nfsd4_sequence * seq,struct nfsd4_slot * slot)3689 static bool replay_matches_cache(struct svc_rqst *rqstp,
3690 struct nfsd4_sequence *seq, struct nfsd4_slot *slot)
3691 {
3692 struct nfsd4_compoundargs *argp = rqstp->rq_argp;
3693
3694 if ((bool)(slot->sl_flags & NFSD4_SLOT_CACHETHIS) !=
3695 (bool)seq->cachethis)
3696 return false;
3697 /*
3698 * If there's an error then the reply can have fewer ops than
3699 * the call.
3700 */
3701 if (slot->sl_opcnt < argp->opcnt && !slot->sl_status)
3702 return false;
3703 /*
3704 * But if we cached a reply with *more* ops than the call you're
3705 * sending us now, then this new call is clearly not really a
3706 * replay of the old one:
3707 */
3708 if (slot->sl_opcnt > argp->opcnt)
3709 return false;
3710 /* This is the only check explicitly called by spec: */
3711 if (!same_creds(&rqstp->rq_cred, &slot->sl_cred))
3712 return false;
3713 /*
3714 * There may be more comparisons we could actually do, but the
3715 * spec doesn't require us to catch every case where the calls
3716 * don't match (that would require caching the call as well as
3717 * the reply), so we don't bother.
3718 */
3719 return true;
3720 }
3721
3722 __be32
nfsd4_sequence(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)3723 nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3724 union nfsd4_op_u *u)
3725 {
3726 struct nfsd4_sequence *seq = &u->sequence;
3727 struct nfsd4_compoundres *resp = rqstp->rq_resp;
3728 struct xdr_stream *xdr = &resp->xdr;
3729 struct nfsd4_session *session;
3730 struct nfs4_client *clp;
3731 struct nfsd4_slot *slot;
3732 struct nfsd4_conn *conn;
3733 __be32 status;
3734 int buflen;
3735 struct net *net = SVC_NET(rqstp);
3736 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
3737
3738 if (resp->opcnt != 1)
3739 return nfserr_sequence_pos;
3740
3741 /*
3742 * Will be either used or freed by nfsd4_sequence_check_conn
3743 * below.
3744 */
3745 conn = alloc_conn(rqstp, NFS4_CDFC4_FORE);
3746 if (!conn)
3747 return nfserr_jukebox;
3748
3749 spin_lock(&nn->client_lock);
3750 session = find_in_sessionid_hashtbl(&seq->sessionid, net, &status);
3751 if (!session)
3752 goto out_no_session;
3753 clp = session->se_client;
3754
3755 status = nfserr_too_many_ops;
3756 if (nfsd4_session_too_many_ops(rqstp, session))
3757 goto out_put_session;
3758
3759 status = nfserr_req_too_big;
3760 if (nfsd4_request_too_big(rqstp, session))
3761 goto out_put_session;
3762
3763 status = nfserr_badslot;
3764 if (seq->slotid >= session->se_fchannel.maxreqs)
3765 goto out_put_session;
3766
3767 slot = session->se_slots[seq->slotid];
3768 dprintk("%s: slotid %d\n", __func__, seq->slotid);
3769
3770 /* We do not negotiate the number of slots yet, so set the
3771 * maxslots to the session maxreqs which is used to encode
3772 * sr_highest_slotid and the sr_target_slot id to maxslots */
3773 seq->maxslots = session->se_fchannel.maxreqs;
3774
3775 status = check_slot_seqid(seq->seqid, slot->sl_seqid,
3776 slot->sl_flags & NFSD4_SLOT_INUSE);
3777 if (status == nfserr_replay_cache) {
3778 status = nfserr_seq_misordered;
3779 if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED))
3780 goto out_put_session;
3781 status = nfserr_seq_false_retry;
3782 if (!replay_matches_cache(rqstp, seq, slot))
3783 goto out_put_session;
3784 cstate->slot = slot;
3785 cstate->session = session;
3786 cstate->clp = clp;
3787 /* Return the cached reply status and set cstate->status
3788 * for nfsd4_proc_compound processing */
3789 status = nfsd4_replay_cache_entry(resp, seq);
3790 cstate->status = nfserr_replay_cache;
3791 goto out;
3792 }
3793 if (status)
3794 goto out_put_session;
3795
3796 status = nfsd4_sequence_check_conn(conn, session);
3797 conn = NULL;
3798 if (status)
3799 goto out_put_session;
3800
3801 buflen = (seq->cachethis) ?
3802 session->se_fchannel.maxresp_cached :
3803 session->se_fchannel.maxresp_sz;
3804 status = (seq->cachethis) ? nfserr_rep_too_big_to_cache :
3805 nfserr_rep_too_big;
3806 if (xdr_restrict_buflen(xdr, buflen - rqstp->rq_auth_slack))
3807 goto out_put_session;
3808 svc_reserve(rqstp, buflen);
3809
3810 status = nfs_ok;
3811 /* Success! bump slot seqid */
3812 slot->sl_seqid = seq->seqid;
3813 slot->sl_flags |= NFSD4_SLOT_INUSE;
3814 if (seq->cachethis)
3815 slot->sl_flags |= NFSD4_SLOT_CACHETHIS;
3816 else
3817 slot->sl_flags &= ~NFSD4_SLOT_CACHETHIS;
3818
3819 cstate->slot = slot;
3820 cstate->session = session;
3821 cstate->clp = clp;
3822
3823 out:
3824 switch (clp->cl_cb_state) {
3825 case NFSD4_CB_DOWN:
3826 seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN;
3827 break;
3828 case NFSD4_CB_FAULT:
3829 seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT;
3830 break;
3831 default:
3832 seq->status_flags = 0;
3833 }
3834 if (!list_empty(&clp->cl_revoked))
3835 seq->status_flags |= SEQ4_STATUS_RECALLABLE_STATE_REVOKED;
3836 out_no_session:
3837 if (conn)
3838 free_conn(conn);
3839 spin_unlock(&nn->client_lock);
3840 return status;
3841 out_put_session:
3842 nfsd4_put_session_locked(session);
3843 goto out_no_session;
3844 }
3845
3846 void
nfsd4_sequence_done(struct nfsd4_compoundres * resp)3847 nfsd4_sequence_done(struct nfsd4_compoundres *resp)
3848 {
3849 struct nfsd4_compound_state *cs = &resp->cstate;
3850
3851 if (nfsd4_has_session(cs)) {
3852 if (cs->status != nfserr_replay_cache) {
3853 nfsd4_store_cache_entry(resp);
3854 cs->slot->sl_flags &= ~NFSD4_SLOT_INUSE;
3855 }
3856 /* Drop session reference that was taken in nfsd4_sequence() */
3857 nfsd4_put_session(cs->session);
3858 } else if (cs->clp)
3859 put_client_renew(cs->clp);
3860 }
3861
3862 __be32
nfsd4_destroy_clientid(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)3863 nfsd4_destroy_clientid(struct svc_rqst *rqstp,
3864 struct nfsd4_compound_state *cstate,
3865 union nfsd4_op_u *u)
3866 {
3867 struct nfsd4_destroy_clientid *dc = &u->destroy_clientid;
3868 struct nfs4_client *conf, *unconf;
3869 struct nfs4_client *clp = NULL;
3870 __be32 status = 0;
3871 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3872
3873 spin_lock(&nn->client_lock);
3874 unconf = find_unconfirmed_client(&dc->clientid, true, nn);
3875 conf = find_confirmed_client(&dc->clientid, true, nn);
3876 WARN_ON_ONCE(conf && unconf);
3877
3878 if (conf) {
3879 if (client_has_state(conf)) {
3880 status = nfserr_clientid_busy;
3881 goto out;
3882 }
3883 status = mark_client_expired_locked(conf);
3884 if (status)
3885 goto out;
3886 clp = conf;
3887 } else if (unconf)
3888 clp = unconf;
3889 else {
3890 status = nfserr_stale_clientid;
3891 goto out;
3892 }
3893 if (!nfsd4_mach_creds_match(clp, rqstp)) {
3894 clp = NULL;
3895 status = nfserr_wrong_cred;
3896 goto out;
3897 }
3898 unhash_client_locked(clp);
3899 out:
3900 spin_unlock(&nn->client_lock);
3901 if (clp)
3902 expire_client(clp);
3903 return status;
3904 }
3905
3906 __be32
nfsd4_reclaim_complete(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)3907 nfsd4_reclaim_complete(struct svc_rqst *rqstp,
3908 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
3909 {
3910 struct nfsd4_reclaim_complete *rc = &u->reclaim_complete;
3911 __be32 status = 0;
3912
3913 if (rc->rca_one_fs) {
3914 if (!cstate->current_fh.fh_dentry)
3915 return nfserr_nofilehandle;
3916 /*
3917 * We don't take advantage of the rca_one_fs case.
3918 * That's OK, it's optional, we can safely ignore it.
3919 */
3920 return nfs_ok;
3921 }
3922
3923 status = nfserr_complete_already;
3924 if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE,
3925 &cstate->session->se_client->cl_flags))
3926 goto out;
3927
3928 status = nfserr_stale_clientid;
3929 if (is_client_expired(cstate->session->se_client))
3930 /*
3931 * The following error isn't really legal.
3932 * But we only get here if the client just explicitly
3933 * destroyed the client. Surely it no longer cares what
3934 * error it gets back on an operation for the dead
3935 * client.
3936 */
3937 goto out;
3938
3939 status = nfs_ok;
3940 nfsd4_client_record_create(cstate->session->se_client);
3941 inc_reclaim_complete(cstate->session->se_client);
3942 out:
3943 return status;
3944 }
3945
3946 __be32
nfsd4_setclientid(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)3947 nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3948 union nfsd4_op_u *u)
3949 {
3950 struct nfsd4_setclientid *setclid = &u->setclientid;
3951 struct xdr_netobj clname = setclid->se_name;
3952 nfs4_verifier clverifier = setclid->se_verf;
3953 struct nfs4_client *conf, *new;
3954 struct nfs4_client *unconf = NULL;
3955 __be32 status;
3956 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3957
3958 new = create_client(clname, rqstp, &clverifier);
3959 if (new == NULL)
3960 return nfserr_jukebox;
3961 /* Cases below refer to rfc 3530 section 14.2.33: */
3962 spin_lock(&nn->client_lock);
3963 conf = find_confirmed_client_by_name(&clname, nn);
3964 if (conf && client_has_state(conf)) {
3965 /* case 0: */
3966 status = nfserr_clid_inuse;
3967 if (clp_used_exchangeid(conf))
3968 goto out;
3969 if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
3970 trace_nfsd_clid_inuse_err(conf);
3971 goto out;
3972 }
3973 }
3974 unconf = find_unconfirmed_client_by_name(&clname, nn);
3975 if (unconf)
3976 unhash_client_locked(unconf);
3977 /* We need to handle only case 1: probable callback update */
3978 if (conf && same_verf(&conf->cl_verifier, &clverifier)) {
3979 copy_clid(new, conf);
3980 gen_confirm(new, nn);
3981 }
3982 new->cl_minorversion = 0;
3983 gen_callback(new, setclid, rqstp);
3984 add_to_unconfirmed(new);
3985 setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot;
3986 setclid->se_clientid.cl_id = new->cl_clientid.cl_id;
3987 memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data));
3988 new = NULL;
3989 status = nfs_ok;
3990 out:
3991 spin_unlock(&nn->client_lock);
3992 if (new)
3993 free_client(new);
3994 if (unconf)
3995 expire_client(unconf);
3996 return status;
3997 }
3998
3999
4000 __be32
nfsd4_setclientid_confirm(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)4001 nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
4002 struct nfsd4_compound_state *cstate,
4003 union nfsd4_op_u *u)
4004 {
4005 struct nfsd4_setclientid_confirm *setclientid_confirm =
4006 &u->setclientid_confirm;
4007 struct nfs4_client *conf, *unconf;
4008 struct nfs4_client *old = NULL;
4009 nfs4_verifier confirm = setclientid_confirm->sc_confirm;
4010 clientid_t * clid = &setclientid_confirm->sc_clientid;
4011 __be32 status;
4012 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4013
4014 if (STALE_CLIENTID(clid, nn))
4015 return nfserr_stale_clientid;
4016
4017 spin_lock(&nn->client_lock);
4018 conf = find_confirmed_client(clid, false, nn);
4019 unconf = find_unconfirmed_client(clid, false, nn);
4020 /*
4021 * We try hard to give out unique clientid's, so if we get an
4022 * attempt to confirm the same clientid with a different cred,
4023 * the client may be buggy; this should never happen.
4024 *
4025 * Nevertheless, RFC 7530 recommends INUSE for this case:
4026 */
4027 status = nfserr_clid_inuse;
4028 if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred))
4029 goto out;
4030 if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred))
4031 goto out;
4032 /* cases below refer to rfc 3530 section 14.2.34: */
4033 if (!unconf || !same_verf(&confirm, &unconf->cl_confirm)) {
4034 if (conf && same_verf(&confirm, &conf->cl_confirm)) {
4035 /* case 2: probable retransmit */
4036 status = nfs_ok;
4037 } else /* case 4: client hasn't noticed we rebooted yet? */
4038 status = nfserr_stale_clientid;
4039 goto out;
4040 }
4041 status = nfs_ok;
4042 if (conf) { /* case 1: callback update */
4043 old = unconf;
4044 unhash_client_locked(old);
4045 nfsd4_change_callback(conf, &unconf->cl_cb_conn);
4046 } else { /* case 3: normal case; new or rebooted client */
4047 old = find_confirmed_client_by_name(&unconf->cl_name, nn);
4048 if (old) {
4049 status = nfserr_clid_inuse;
4050 if (client_has_state(old)
4051 && !same_creds(&unconf->cl_cred,
4052 &old->cl_cred)) {
4053 old = NULL;
4054 goto out;
4055 }
4056 status = mark_client_expired_locked(old);
4057 if (status) {
4058 old = NULL;
4059 goto out;
4060 }
4061 }
4062 move_to_confirmed(unconf);
4063 conf = unconf;
4064 }
4065 get_client_locked(conf);
4066 spin_unlock(&nn->client_lock);
4067 nfsd4_probe_callback(conf);
4068 spin_lock(&nn->client_lock);
4069 put_client_renew_locked(conf);
4070 out:
4071 spin_unlock(&nn->client_lock);
4072 if (old)
4073 expire_client(old);
4074 return status;
4075 }
4076
nfsd4_alloc_file(void)4077 static struct nfs4_file *nfsd4_alloc_file(void)
4078 {
4079 return kmem_cache_alloc(file_slab, GFP_KERNEL);
4080 }
4081
4082 /* OPEN Share state helper functions */
nfsd4_init_file(struct knfsd_fh * fh,unsigned int hashval,struct nfs4_file * fp)4083 static void nfsd4_init_file(struct knfsd_fh *fh, unsigned int hashval,
4084 struct nfs4_file *fp)
4085 {
4086 lockdep_assert_held(&state_lock);
4087
4088 refcount_set(&fp->fi_ref, 1);
4089 spin_lock_init(&fp->fi_lock);
4090 INIT_LIST_HEAD(&fp->fi_stateids);
4091 INIT_LIST_HEAD(&fp->fi_delegations);
4092 INIT_LIST_HEAD(&fp->fi_clnt_odstate);
4093 fh_copy_shallow(&fp->fi_fhandle, fh);
4094 fp->fi_deleg_file = NULL;
4095 fp->fi_had_conflict = false;
4096 fp->fi_share_deny = 0;
4097 memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
4098 memset(fp->fi_access, 0, sizeof(fp->fi_access));
4099 #ifdef CONFIG_NFSD_PNFS
4100 INIT_LIST_HEAD(&fp->fi_lo_states);
4101 atomic_set(&fp->fi_lo_recalls, 0);
4102 #endif
4103 hlist_add_head_rcu(&fp->fi_hash, &file_hashtbl[hashval]);
4104 }
4105
4106 void
nfsd4_free_slabs(void)4107 nfsd4_free_slabs(void)
4108 {
4109 kmem_cache_destroy(client_slab);
4110 kmem_cache_destroy(openowner_slab);
4111 kmem_cache_destroy(lockowner_slab);
4112 kmem_cache_destroy(file_slab);
4113 kmem_cache_destroy(stateid_slab);
4114 kmem_cache_destroy(deleg_slab);
4115 kmem_cache_destroy(odstate_slab);
4116 }
4117
4118 int
nfsd4_init_slabs(void)4119 nfsd4_init_slabs(void)
4120 {
4121 client_slab = kmem_cache_create("nfsd4_clients",
4122 sizeof(struct nfs4_client), 0, 0, NULL);
4123 if (client_slab == NULL)
4124 goto out;
4125 openowner_slab = kmem_cache_create("nfsd4_openowners",
4126 sizeof(struct nfs4_openowner), 0, 0, NULL);
4127 if (openowner_slab == NULL)
4128 goto out_free_client_slab;
4129 lockowner_slab = kmem_cache_create("nfsd4_lockowners",
4130 sizeof(struct nfs4_lockowner), 0, 0, NULL);
4131 if (lockowner_slab == NULL)
4132 goto out_free_openowner_slab;
4133 file_slab = kmem_cache_create("nfsd4_files",
4134 sizeof(struct nfs4_file), 0, 0, NULL);
4135 if (file_slab == NULL)
4136 goto out_free_lockowner_slab;
4137 stateid_slab = kmem_cache_create("nfsd4_stateids",
4138 sizeof(struct nfs4_ol_stateid), 0, 0, NULL);
4139 if (stateid_slab == NULL)
4140 goto out_free_file_slab;
4141 deleg_slab = kmem_cache_create("nfsd4_delegations",
4142 sizeof(struct nfs4_delegation), 0, 0, NULL);
4143 if (deleg_slab == NULL)
4144 goto out_free_stateid_slab;
4145 odstate_slab = kmem_cache_create("nfsd4_odstate",
4146 sizeof(struct nfs4_clnt_odstate), 0, 0, NULL);
4147 if (odstate_slab == NULL)
4148 goto out_free_deleg_slab;
4149 return 0;
4150
4151 out_free_deleg_slab:
4152 kmem_cache_destroy(deleg_slab);
4153 out_free_stateid_slab:
4154 kmem_cache_destroy(stateid_slab);
4155 out_free_file_slab:
4156 kmem_cache_destroy(file_slab);
4157 out_free_lockowner_slab:
4158 kmem_cache_destroy(lockowner_slab);
4159 out_free_openowner_slab:
4160 kmem_cache_destroy(openowner_slab);
4161 out_free_client_slab:
4162 kmem_cache_destroy(client_slab);
4163 out:
4164 return -ENOMEM;
4165 }
4166
init_nfs4_replay(struct nfs4_replay * rp)4167 static void init_nfs4_replay(struct nfs4_replay *rp)
4168 {
4169 rp->rp_status = nfserr_serverfault;
4170 rp->rp_buflen = 0;
4171 rp->rp_buf = rp->rp_ibuf;
4172 mutex_init(&rp->rp_mutex);
4173 }
4174
nfsd4_cstate_assign_replay(struct nfsd4_compound_state * cstate,struct nfs4_stateowner * so)4175 static void nfsd4_cstate_assign_replay(struct nfsd4_compound_state *cstate,
4176 struct nfs4_stateowner *so)
4177 {
4178 if (!nfsd4_has_session(cstate)) {
4179 mutex_lock(&so->so_replay.rp_mutex);
4180 cstate->replay_owner = nfs4_get_stateowner(so);
4181 }
4182 }
4183
nfsd4_cstate_clear_replay(struct nfsd4_compound_state * cstate)4184 void nfsd4_cstate_clear_replay(struct nfsd4_compound_state *cstate)
4185 {
4186 struct nfs4_stateowner *so = cstate->replay_owner;
4187
4188 if (so != NULL) {
4189 cstate->replay_owner = NULL;
4190 mutex_unlock(&so->so_replay.rp_mutex);
4191 nfs4_put_stateowner(so);
4192 }
4193 }
4194
alloc_stateowner(struct kmem_cache * slab,struct xdr_netobj * owner,struct nfs4_client * clp)4195 static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp)
4196 {
4197 struct nfs4_stateowner *sop;
4198
4199 sop = kmem_cache_alloc(slab, GFP_KERNEL);
4200 if (!sop)
4201 return NULL;
4202
4203 xdr_netobj_dup(&sop->so_owner, owner, GFP_KERNEL);
4204 if (!sop->so_owner.data) {
4205 kmem_cache_free(slab, sop);
4206 return NULL;
4207 }
4208
4209 INIT_LIST_HEAD(&sop->so_stateids);
4210 sop->so_client = clp;
4211 init_nfs4_replay(&sop->so_replay);
4212 atomic_set(&sop->so_count, 1);
4213 return sop;
4214 }
4215
hash_openowner(struct nfs4_openowner * oo,struct nfs4_client * clp,unsigned int strhashval)4216 static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval)
4217 {
4218 lockdep_assert_held(&clp->cl_lock);
4219
4220 list_add(&oo->oo_owner.so_strhash,
4221 &clp->cl_ownerstr_hashtbl[strhashval]);
4222 list_add(&oo->oo_perclient, &clp->cl_openowners);
4223 }
4224
nfs4_unhash_openowner(struct nfs4_stateowner * so)4225 static void nfs4_unhash_openowner(struct nfs4_stateowner *so)
4226 {
4227 unhash_openowner_locked(openowner(so));
4228 }
4229
nfs4_free_openowner(struct nfs4_stateowner * so)4230 static void nfs4_free_openowner(struct nfs4_stateowner *so)
4231 {
4232 struct nfs4_openowner *oo = openowner(so);
4233
4234 kmem_cache_free(openowner_slab, oo);
4235 }
4236
4237 static const struct nfs4_stateowner_operations openowner_ops = {
4238 .so_unhash = nfs4_unhash_openowner,
4239 .so_free = nfs4_free_openowner,
4240 };
4241
4242 static struct nfs4_ol_stateid *
nfsd4_find_existing_open(struct nfs4_file * fp,struct nfsd4_open * open)4243 nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
4244 {
4245 struct nfs4_ol_stateid *local, *ret = NULL;
4246 struct nfs4_openowner *oo = open->op_openowner;
4247
4248 lockdep_assert_held(&fp->fi_lock);
4249
4250 list_for_each_entry(local, &fp->fi_stateids, st_perfile) {
4251 /* ignore lock owners */
4252 if (local->st_stateowner->so_is_open_owner == 0)
4253 continue;
4254 if (local->st_stateowner != &oo->oo_owner)
4255 continue;
4256 if (local->st_stid.sc_type == NFS4_OPEN_STID) {
4257 ret = local;
4258 refcount_inc(&ret->st_stid.sc_count);
4259 break;
4260 }
4261 }
4262 return ret;
4263 }
4264
4265 static __be32
nfsd4_verify_open_stid(struct nfs4_stid * s)4266 nfsd4_verify_open_stid(struct nfs4_stid *s)
4267 {
4268 __be32 ret = nfs_ok;
4269
4270 switch (s->sc_type) {
4271 default:
4272 break;
4273 case 0:
4274 case NFS4_CLOSED_STID:
4275 case NFS4_CLOSED_DELEG_STID:
4276 ret = nfserr_bad_stateid;
4277 break;
4278 case NFS4_REVOKED_DELEG_STID:
4279 ret = nfserr_deleg_revoked;
4280 }
4281 return ret;
4282 }
4283
4284 /* Lock the stateid st_mutex, and deal with races with CLOSE */
4285 static __be32
nfsd4_lock_ol_stateid(struct nfs4_ol_stateid * stp)4286 nfsd4_lock_ol_stateid(struct nfs4_ol_stateid *stp)
4287 {
4288 __be32 ret;
4289
4290 mutex_lock_nested(&stp->st_mutex, LOCK_STATEID_MUTEX);
4291 ret = nfsd4_verify_open_stid(&stp->st_stid);
4292 if (ret != nfs_ok)
4293 mutex_unlock(&stp->st_mutex);
4294 return ret;
4295 }
4296
4297 static struct nfs4_ol_stateid *
nfsd4_find_and_lock_existing_open(struct nfs4_file * fp,struct nfsd4_open * open)4298 nfsd4_find_and_lock_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
4299 {
4300 struct nfs4_ol_stateid *stp;
4301 for (;;) {
4302 spin_lock(&fp->fi_lock);
4303 stp = nfsd4_find_existing_open(fp, open);
4304 spin_unlock(&fp->fi_lock);
4305 if (!stp || nfsd4_lock_ol_stateid(stp) == nfs_ok)
4306 break;
4307 nfs4_put_stid(&stp->st_stid);
4308 }
4309 return stp;
4310 }
4311
4312 static struct nfs4_openowner *
alloc_init_open_stateowner(unsigned int strhashval,struct nfsd4_open * open,struct nfsd4_compound_state * cstate)4313 alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
4314 struct nfsd4_compound_state *cstate)
4315 {
4316 struct nfs4_client *clp = cstate->clp;
4317 struct nfs4_openowner *oo, *ret;
4318
4319 oo = alloc_stateowner(openowner_slab, &open->op_owner, clp);
4320 if (!oo)
4321 return NULL;
4322 oo->oo_owner.so_ops = &openowner_ops;
4323 oo->oo_owner.so_is_open_owner = 1;
4324 oo->oo_owner.so_seqid = open->op_seqid;
4325 oo->oo_flags = 0;
4326 if (nfsd4_has_session(cstate))
4327 oo->oo_flags |= NFS4_OO_CONFIRMED;
4328 oo->oo_time = 0;
4329 oo->oo_last_closed_stid = NULL;
4330 INIT_LIST_HEAD(&oo->oo_close_lru);
4331 spin_lock(&clp->cl_lock);
4332 ret = find_openstateowner_str_locked(strhashval, open, clp);
4333 if (ret == NULL) {
4334 hash_openowner(oo, clp, strhashval);
4335 ret = oo;
4336 } else
4337 nfs4_free_stateowner(&oo->oo_owner);
4338
4339 spin_unlock(&clp->cl_lock);
4340 return ret;
4341 }
4342
4343 static struct nfs4_ol_stateid *
init_open_stateid(struct nfs4_file * fp,struct nfsd4_open * open)4344 init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open)
4345 {
4346
4347 struct nfs4_openowner *oo = open->op_openowner;
4348 struct nfs4_ol_stateid *retstp = NULL;
4349 struct nfs4_ol_stateid *stp;
4350
4351 stp = open->op_stp;
4352 /* We are moving these outside of the spinlocks to avoid the warnings */
4353 mutex_init(&stp->st_mutex);
4354 mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX);
4355
4356 retry:
4357 spin_lock(&oo->oo_owner.so_client->cl_lock);
4358 spin_lock(&fp->fi_lock);
4359
4360 retstp = nfsd4_find_existing_open(fp, open);
4361 if (retstp)
4362 goto out_unlock;
4363
4364 open->op_stp = NULL;
4365 refcount_inc(&stp->st_stid.sc_count);
4366 stp->st_stid.sc_type = NFS4_OPEN_STID;
4367 INIT_LIST_HEAD(&stp->st_locks);
4368 stp->st_stateowner = nfs4_get_stateowner(&oo->oo_owner);
4369 get_nfs4_file(fp);
4370 stp->st_stid.sc_file = fp;
4371 stp->st_access_bmap = 0;
4372 stp->st_deny_bmap = 0;
4373 stp->st_openstp = NULL;
4374 list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
4375 list_add(&stp->st_perfile, &fp->fi_stateids);
4376
4377 out_unlock:
4378 spin_unlock(&fp->fi_lock);
4379 spin_unlock(&oo->oo_owner.so_client->cl_lock);
4380 if (retstp) {
4381 /* Handle races with CLOSE */
4382 if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
4383 nfs4_put_stid(&retstp->st_stid);
4384 goto retry;
4385 }
4386 /* To keep mutex tracking happy */
4387 mutex_unlock(&stp->st_mutex);
4388 stp = retstp;
4389 }
4390 return stp;
4391 }
4392
4393 /*
4394 * In the 4.0 case we need to keep the owners around a little while to handle
4395 * CLOSE replay. We still do need to release any file access that is held by
4396 * them before returning however.
4397 */
4398 static void
move_to_close_lru(struct nfs4_ol_stateid * s,struct net * net)4399 move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net)
4400 {
4401 struct nfs4_ol_stateid *last;
4402 struct nfs4_openowner *oo = openowner(s->st_stateowner);
4403 struct nfsd_net *nn = net_generic(s->st_stid.sc_client->net,
4404 nfsd_net_id);
4405
4406 dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo);
4407
4408 /*
4409 * We know that we hold one reference via nfsd4_close, and another
4410 * "persistent" reference for the client. If the refcount is higher
4411 * than 2, then there are still calls in progress that are using this
4412 * stateid. We can't put the sc_file reference until they are finished.
4413 * Wait for the refcount to drop to 2. Since it has been unhashed,
4414 * there should be no danger of the refcount going back up again at
4415 * this point.
4416 */
4417 wait_event(close_wq, refcount_read(&s->st_stid.sc_count) == 2);
4418
4419 release_all_access(s);
4420 if (s->st_stid.sc_file) {
4421 put_nfs4_file(s->st_stid.sc_file);
4422 s->st_stid.sc_file = NULL;
4423 }
4424
4425 spin_lock(&nn->client_lock);
4426 last = oo->oo_last_closed_stid;
4427 oo->oo_last_closed_stid = s;
4428 list_move_tail(&oo->oo_close_lru, &nn->close_lru);
4429 oo->oo_time = ktime_get_boottime_seconds();
4430 spin_unlock(&nn->client_lock);
4431 if (last)
4432 nfs4_put_stid(&last->st_stid);
4433 }
4434
4435 /* search file_hashtbl[] for file */
4436 static struct nfs4_file *
find_file_locked(struct knfsd_fh * fh,unsigned int hashval)4437 find_file_locked(struct knfsd_fh *fh, unsigned int hashval)
4438 {
4439 struct nfs4_file *fp;
4440
4441 hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash,
4442 lockdep_is_held(&state_lock)) {
4443 if (fh_match(&fp->fi_fhandle, fh)) {
4444 if (refcount_inc_not_zero(&fp->fi_ref))
4445 return fp;
4446 }
4447 }
4448 return NULL;
4449 }
4450
4451 struct nfs4_file *
find_file(struct knfsd_fh * fh)4452 find_file(struct knfsd_fh *fh)
4453 {
4454 struct nfs4_file *fp;
4455 unsigned int hashval = file_hashval(fh);
4456
4457 rcu_read_lock();
4458 fp = find_file_locked(fh, hashval);
4459 rcu_read_unlock();
4460 return fp;
4461 }
4462
4463 static struct nfs4_file *
find_or_add_file(struct nfs4_file * new,struct knfsd_fh * fh)4464 find_or_add_file(struct nfs4_file *new, struct knfsd_fh *fh)
4465 {
4466 struct nfs4_file *fp;
4467 unsigned int hashval = file_hashval(fh);
4468
4469 rcu_read_lock();
4470 fp = find_file_locked(fh, hashval);
4471 rcu_read_unlock();
4472 if (fp)
4473 return fp;
4474
4475 spin_lock(&state_lock);
4476 fp = find_file_locked(fh, hashval);
4477 if (likely(fp == NULL)) {
4478 nfsd4_init_file(fh, hashval, new);
4479 fp = new;
4480 }
4481 spin_unlock(&state_lock);
4482
4483 return fp;
4484 }
4485
4486 /*
4487 * Called to check deny when READ with all zero stateid or
4488 * WRITE with all zero or all one stateid
4489 */
4490 static __be32
nfs4_share_conflict(struct svc_fh * current_fh,unsigned int deny_type)4491 nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type)
4492 {
4493 struct nfs4_file *fp;
4494 __be32 ret = nfs_ok;
4495
4496 fp = find_file(¤t_fh->fh_handle);
4497 if (!fp)
4498 return ret;
4499 /* Check for conflicting share reservations */
4500 spin_lock(&fp->fi_lock);
4501 if (fp->fi_share_deny & deny_type)
4502 ret = nfserr_locked;
4503 spin_unlock(&fp->fi_lock);
4504 put_nfs4_file(fp);
4505 return ret;
4506 }
4507
nfsd4_cb_recall_prepare(struct nfsd4_callback * cb)4508 static void nfsd4_cb_recall_prepare(struct nfsd4_callback *cb)
4509 {
4510 struct nfs4_delegation *dp = cb_to_delegation(cb);
4511 struct nfsd_net *nn = net_generic(dp->dl_stid.sc_client->net,
4512 nfsd_net_id);
4513
4514 block_delegations(&dp->dl_stid.sc_file->fi_fhandle);
4515
4516 /*
4517 * We can't do this in nfsd_break_deleg_cb because it is
4518 * already holding inode->i_lock.
4519 *
4520 * If the dl_time != 0, then we know that it has already been
4521 * queued for a lease break. Don't queue it again.
4522 */
4523 spin_lock(&state_lock);
4524 if (delegation_hashed(dp) && dp->dl_time == 0) {
4525 dp->dl_time = ktime_get_boottime_seconds();
4526 list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru);
4527 }
4528 spin_unlock(&state_lock);
4529 }
4530
nfsd4_cb_recall_done(struct nfsd4_callback * cb,struct rpc_task * task)4531 static int nfsd4_cb_recall_done(struct nfsd4_callback *cb,
4532 struct rpc_task *task)
4533 {
4534 struct nfs4_delegation *dp = cb_to_delegation(cb);
4535
4536 if (dp->dl_stid.sc_type == NFS4_CLOSED_DELEG_STID ||
4537 dp->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID)
4538 return 1;
4539
4540 switch (task->tk_status) {
4541 case 0:
4542 return 1;
4543 case -NFS4ERR_DELAY:
4544 rpc_delay(task, 2 * HZ);
4545 return 0;
4546 case -EBADHANDLE:
4547 case -NFS4ERR_BAD_STATEID:
4548 /*
4549 * Race: client probably got cb_recall before open reply
4550 * granting delegation.
4551 */
4552 if (dp->dl_retries--) {
4553 rpc_delay(task, 2 * HZ);
4554 return 0;
4555 }
4556 fallthrough;
4557 default:
4558 return 1;
4559 }
4560 }
4561
nfsd4_cb_recall_release(struct nfsd4_callback * cb)4562 static void nfsd4_cb_recall_release(struct nfsd4_callback *cb)
4563 {
4564 struct nfs4_delegation *dp = cb_to_delegation(cb);
4565
4566 nfs4_put_stid(&dp->dl_stid);
4567 }
4568
4569 static const struct nfsd4_callback_ops nfsd4_cb_recall_ops = {
4570 .prepare = nfsd4_cb_recall_prepare,
4571 .done = nfsd4_cb_recall_done,
4572 .release = nfsd4_cb_recall_release,
4573 };
4574
nfsd_break_one_deleg(struct nfs4_delegation * dp)4575 static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
4576 {
4577 /*
4578 * We're assuming the state code never drops its reference
4579 * without first removing the lease. Since we're in this lease
4580 * callback (and since the lease code is serialized by the
4581 * i_lock) we know the server hasn't removed the lease yet, and
4582 * we know it's safe to take a reference.
4583 */
4584 refcount_inc(&dp->dl_stid.sc_count);
4585 nfsd4_run_cb(&dp->dl_recall);
4586 }
4587
4588 /* Called from break_lease() with i_lock held. */
4589 static bool
nfsd_break_deleg_cb(struct file_lock * fl)4590 nfsd_break_deleg_cb(struct file_lock *fl)
4591 {
4592 bool ret = false;
4593 struct nfs4_delegation *dp = (struct nfs4_delegation *)fl->fl_owner;
4594 struct nfs4_file *fp = dp->dl_stid.sc_file;
4595
4596 trace_nfsd_deleg_break(&dp->dl_stid.sc_stateid);
4597
4598 /*
4599 * We don't want the locks code to timeout the lease for us;
4600 * we'll remove it ourself if a delegation isn't returned
4601 * in time:
4602 */
4603 fl->fl_break_time = 0;
4604
4605 spin_lock(&fp->fi_lock);
4606 fp->fi_had_conflict = true;
4607 nfsd_break_one_deleg(dp);
4608 spin_unlock(&fp->fi_lock);
4609 return ret;
4610 }
4611
4612 /**
4613 * nfsd_breaker_owns_lease - Check if lease conflict was resolved
4614 * @fl: Lock state to check
4615 *
4616 * Return values:
4617 * %true: Lease conflict was resolved
4618 * %false: Lease conflict was not resolved.
4619 */
nfsd_breaker_owns_lease(struct file_lock * fl)4620 static bool nfsd_breaker_owns_lease(struct file_lock *fl)
4621 {
4622 struct nfs4_delegation *dl = fl->fl_owner;
4623 struct svc_rqst *rqst;
4624 struct nfs4_client *clp;
4625
4626 if (!i_am_nfsd())
4627 return false;
4628 rqst = kthread_data(current);
4629 /* Note rq_prog == NFS_ACL_PROGRAM is also possible: */
4630 if (rqst->rq_prog != NFS_PROGRAM || rqst->rq_vers < 4)
4631 return false;
4632 clp = *(rqst->rq_lease_breaker);
4633 return dl->dl_stid.sc_client == clp;
4634 }
4635
4636 static int
nfsd_change_deleg_cb(struct file_lock * onlist,int arg,struct list_head * dispose)4637 nfsd_change_deleg_cb(struct file_lock *onlist, int arg,
4638 struct list_head *dispose)
4639 {
4640 if (arg & F_UNLCK)
4641 return lease_modify(onlist, arg, dispose);
4642 else
4643 return -EAGAIN;
4644 }
4645
4646 static const struct lock_manager_operations nfsd_lease_mng_ops = {
4647 .lm_breaker_owns_lease = nfsd_breaker_owns_lease,
4648 .lm_break = nfsd_break_deleg_cb,
4649 .lm_change = nfsd_change_deleg_cb,
4650 };
4651
nfsd4_check_seqid(struct nfsd4_compound_state * cstate,struct nfs4_stateowner * so,u32 seqid)4652 static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid)
4653 {
4654 if (nfsd4_has_session(cstate))
4655 return nfs_ok;
4656 if (seqid == so->so_seqid - 1)
4657 return nfserr_replay_me;
4658 if (seqid == so->so_seqid)
4659 return nfs_ok;
4660 return nfserr_bad_seqid;
4661 }
4662
lookup_clientid(clientid_t * clid,struct nfsd4_compound_state * cstate,struct nfsd_net * nn,bool sessions)4663 static __be32 lookup_clientid(clientid_t *clid,
4664 struct nfsd4_compound_state *cstate,
4665 struct nfsd_net *nn,
4666 bool sessions)
4667 {
4668 struct nfs4_client *found;
4669
4670 if (cstate->clp) {
4671 found = cstate->clp;
4672 if (!same_clid(&found->cl_clientid, clid))
4673 return nfserr_stale_clientid;
4674 return nfs_ok;
4675 }
4676
4677 if (STALE_CLIENTID(clid, nn))
4678 return nfserr_stale_clientid;
4679
4680 /*
4681 * For v4.1+ we get the client in the SEQUENCE op. If we don't have one
4682 * cached already then we know this is for is for v4.0 and "sessions"
4683 * will be false.
4684 */
4685 WARN_ON_ONCE(cstate->session);
4686 spin_lock(&nn->client_lock);
4687 found = find_confirmed_client(clid, sessions, nn);
4688 if (!found) {
4689 spin_unlock(&nn->client_lock);
4690 return nfserr_expired;
4691 }
4692 atomic_inc(&found->cl_rpc_users);
4693 spin_unlock(&nn->client_lock);
4694
4695 /* Cache the nfs4_client in cstate! */
4696 cstate->clp = found;
4697 return nfs_ok;
4698 }
4699
4700 __be32
nfsd4_process_open1(struct nfsd4_compound_state * cstate,struct nfsd4_open * open,struct nfsd_net * nn)4701 nfsd4_process_open1(struct nfsd4_compound_state *cstate,
4702 struct nfsd4_open *open, struct nfsd_net *nn)
4703 {
4704 clientid_t *clientid = &open->op_clientid;
4705 struct nfs4_client *clp = NULL;
4706 unsigned int strhashval;
4707 struct nfs4_openowner *oo = NULL;
4708 __be32 status;
4709
4710 if (STALE_CLIENTID(&open->op_clientid, nn))
4711 return nfserr_stale_clientid;
4712 /*
4713 * In case we need it later, after we've already created the
4714 * file and don't want to risk a further failure:
4715 */
4716 open->op_file = nfsd4_alloc_file();
4717 if (open->op_file == NULL)
4718 return nfserr_jukebox;
4719
4720 status = lookup_clientid(clientid, cstate, nn, false);
4721 if (status)
4722 return status;
4723 clp = cstate->clp;
4724
4725 strhashval = ownerstr_hashval(&open->op_owner);
4726 oo = find_openstateowner_str(strhashval, open, clp);
4727 open->op_openowner = oo;
4728 if (!oo) {
4729 goto new_owner;
4730 }
4731 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
4732 /* Replace unconfirmed owners without checking for replay. */
4733 release_openowner(oo);
4734 open->op_openowner = NULL;
4735 goto new_owner;
4736 }
4737 status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid);
4738 if (status)
4739 return status;
4740 goto alloc_stateid;
4741 new_owner:
4742 oo = alloc_init_open_stateowner(strhashval, open, cstate);
4743 if (oo == NULL)
4744 return nfserr_jukebox;
4745 open->op_openowner = oo;
4746 alloc_stateid:
4747 open->op_stp = nfs4_alloc_open_stateid(clp);
4748 if (!open->op_stp)
4749 return nfserr_jukebox;
4750
4751 if (nfsd4_has_session(cstate) &&
4752 (cstate->current_fh.fh_export->ex_flags & NFSEXP_PNFS)) {
4753 open->op_odstate = alloc_clnt_odstate(clp);
4754 if (!open->op_odstate)
4755 return nfserr_jukebox;
4756 }
4757
4758 return nfs_ok;
4759 }
4760
4761 static inline __be32
nfs4_check_delegmode(struct nfs4_delegation * dp,int flags)4762 nfs4_check_delegmode(struct nfs4_delegation *dp, int flags)
4763 {
4764 if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ))
4765 return nfserr_openmode;
4766 else
4767 return nfs_ok;
4768 }
4769
share_access_to_flags(u32 share_access)4770 static int share_access_to_flags(u32 share_access)
4771 {
4772 return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE;
4773 }
4774
find_deleg_stateid(struct nfs4_client * cl,stateid_t * s)4775 static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, stateid_t *s)
4776 {
4777 struct nfs4_stid *ret;
4778
4779 ret = find_stateid_by_type(cl, s,
4780 NFS4_DELEG_STID|NFS4_REVOKED_DELEG_STID);
4781 if (!ret)
4782 return NULL;
4783 return delegstateid(ret);
4784 }
4785
nfsd4_is_deleg_cur(struct nfsd4_open * open)4786 static bool nfsd4_is_deleg_cur(struct nfsd4_open *open)
4787 {
4788 return open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR ||
4789 open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH;
4790 }
4791
4792 static __be32
nfs4_check_deleg(struct nfs4_client * cl,struct nfsd4_open * open,struct nfs4_delegation ** dp)4793 nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open,
4794 struct nfs4_delegation **dp)
4795 {
4796 int flags;
4797 __be32 status = nfserr_bad_stateid;
4798 struct nfs4_delegation *deleg;
4799
4800 deleg = find_deleg_stateid(cl, &open->op_delegate_stateid);
4801 if (deleg == NULL)
4802 goto out;
4803 if (deleg->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID) {
4804 nfs4_put_stid(&deleg->dl_stid);
4805 if (cl->cl_minorversion)
4806 status = nfserr_deleg_revoked;
4807 goto out;
4808 }
4809 flags = share_access_to_flags(open->op_share_access);
4810 status = nfs4_check_delegmode(deleg, flags);
4811 if (status) {
4812 nfs4_put_stid(&deleg->dl_stid);
4813 goto out;
4814 }
4815 *dp = deleg;
4816 out:
4817 if (!nfsd4_is_deleg_cur(open))
4818 return nfs_ok;
4819 if (status)
4820 return status;
4821 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
4822 return nfs_ok;
4823 }
4824
nfs4_access_to_access(u32 nfs4_access)4825 static inline int nfs4_access_to_access(u32 nfs4_access)
4826 {
4827 int flags = 0;
4828
4829 if (nfs4_access & NFS4_SHARE_ACCESS_READ)
4830 flags |= NFSD_MAY_READ;
4831 if (nfs4_access & NFS4_SHARE_ACCESS_WRITE)
4832 flags |= NFSD_MAY_WRITE;
4833 return flags;
4834 }
4835
4836 static inline __be32
nfsd4_truncate(struct svc_rqst * rqstp,struct svc_fh * fh,struct nfsd4_open * open)4837 nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
4838 struct nfsd4_open *open)
4839 {
4840 struct iattr iattr = {
4841 .ia_valid = ATTR_SIZE,
4842 .ia_size = 0,
4843 };
4844 if (!open->op_truncate)
4845 return 0;
4846 if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
4847 return nfserr_inval;
4848 return nfsd_setattr(rqstp, fh, &iattr, 0, (time64_t)0);
4849 }
4850
nfs4_get_vfs_file(struct svc_rqst * rqstp,struct nfs4_file * fp,struct svc_fh * cur_fh,struct nfs4_ol_stateid * stp,struct nfsd4_open * open)4851 static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
4852 struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp,
4853 struct nfsd4_open *open)
4854 {
4855 struct nfsd_file *nf = NULL;
4856 __be32 status;
4857 int oflag = nfs4_access_to_omode(open->op_share_access);
4858 int access = nfs4_access_to_access(open->op_share_access);
4859 unsigned char old_access_bmap, old_deny_bmap;
4860
4861 spin_lock(&fp->fi_lock);
4862
4863 /*
4864 * Are we trying to set a deny mode that would conflict with
4865 * current access?
4866 */
4867 status = nfs4_file_check_deny(fp, open->op_share_deny);
4868 if (status != nfs_ok) {
4869 spin_unlock(&fp->fi_lock);
4870 goto out;
4871 }
4872
4873 /* set access to the file */
4874 status = nfs4_file_get_access(fp, open->op_share_access);
4875 if (status != nfs_ok) {
4876 spin_unlock(&fp->fi_lock);
4877 goto out;
4878 }
4879
4880 /* Set access bits in stateid */
4881 old_access_bmap = stp->st_access_bmap;
4882 set_access(open->op_share_access, stp);
4883
4884 /* Set new deny mask */
4885 old_deny_bmap = stp->st_deny_bmap;
4886 set_deny(open->op_share_deny, stp);
4887 fp->fi_share_deny |= (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
4888
4889 if (!fp->fi_fds[oflag]) {
4890 spin_unlock(&fp->fi_lock);
4891 status = nfsd_file_acquire(rqstp, cur_fh, access, &nf);
4892 if (status)
4893 goto out_put_access;
4894 spin_lock(&fp->fi_lock);
4895 if (!fp->fi_fds[oflag]) {
4896 fp->fi_fds[oflag] = nf;
4897 nf = NULL;
4898 }
4899 }
4900 spin_unlock(&fp->fi_lock);
4901 if (nf)
4902 nfsd_file_put(nf);
4903
4904 status = nfserrno(nfsd_open_break_lease(cur_fh->fh_dentry->d_inode,
4905 access));
4906 if (status)
4907 goto out_put_access;
4908
4909 status = nfsd4_truncate(rqstp, cur_fh, open);
4910 if (status)
4911 goto out_put_access;
4912 out:
4913 return status;
4914 out_put_access:
4915 stp->st_access_bmap = old_access_bmap;
4916 nfs4_file_put_access(fp, open->op_share_access);
4917 reset_union_bmap_deny(bmap_to_share_mode(old_deny_bmap), stp);
4918 goto out;
4919 }
4920
4921 static __be32
nfs4_upgrade_open(struct svc_rqst * rqstp,struct nfs4_file * fp,struct svc_fh * cur_fh,struct nfs4_ol_stateid * stp,struct nfsd4_open * open)4922 nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, struct nfsd4_open *open)
4923 {
4924 __be32 status;
4925 unsigned char old_deny_bmap = stp->st_deny_bmap;
4926
4927 if (!test_access(open->op_share_access, stp))
4928 return nfs4_get_vfs_file(rqstp, fp, cur_fh, stp, open);
4929
4930 /* test and set deny mode */
4931 spin_lock(&fp->fi_lock);
4932 status = nfs4_file_check_deny(fp, open->op_share_deny);
4933 if (status == nfs_ok) {
4934 set_deny(open->op_share_deny, stp);
4935 fp->fi_share_deny |=
4936 (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
4937 }
4938 spin_unlock(&fp->fi_lock);
4939
4940 if (status != nfs_ok)
4941 return status;
4942
4943 status = nfsd4_truncate(rqstp, cur_fh, open);
4944 if (status != nfs_ok)
4945 reset_union_bmap_deny(old_deny_bmap, stp);
4946 return status;
4947 }
4948
4949 /* Should we give out recallable state?: */
nfsd4_cb_channel_good(struct nfs4_client * clp)4950 static bool nfsd4_cb_channel_good(struct nfs4_client *clp)
4951 {
4952 if (clp->cl_cb_state == NFSD4_CB_UP)
4953 return true;
4954 /*
4955 * In the sessions case, since we don't have to establish a
4956 * separate connection for callbacks, we assume it's OK
4957 * until we hear otherwise:
4958 */
4959 return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN;
4960 }
4961
nfs4_alloc_init_lease(struct nfs4_delegation * dp,int flag)4962 static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp,
4963 int flag)
4964 {
4965 struct file_lock *fl;
4966
4967 fl = locks_alloc_lock();
4968 if (!fl)
4969 return NULL;
4970 fl->fl_lmops = &nfsd_lease_mng_ops;
4971 fl->fl_flags = FL_DELEG;
4972 fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
4973 fl->fl_end = OFFSET_MAX;
4974 fl->fl_owner = (fl_owner_t)dp;
4975 fl->fl_pid = current->tgid;
4976 fl->fl_file = dp->dl_stid.sc_file->fi_deleg_file->nf_file;
4977 return fl;
4978 }
4979
4980 static struct nfs4_delegation *
nfs4_set_delegation(struct nfs4_client * clp,struct svc_fh * fh,struct nfs4_file * fp,struct nfs4_clnt_odstate * odstate)4981 nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
4982 struct nfs4_file *fp, struct nfs4_clnt_odstate *odstate)
4983 {
4984 int status = 0;
4985 struct nfs4_delegation *dp;
4986 struct nfsd_file *nf;
4987 struct file_lock *fl;
4988
4989 /*
4990 * The fi_had_conflict and nfs_get_existing_delegation checks
4991 * here are just optimizations; we'll need to recheck them at
4992 * the end:
4993 */
4994 if (fp->fi_had_conflict)
4995 return ERR_PTR(-EAGAIN);
4996
4997 nf = find_readable_file(fp);
4998 if (!nf) {
4999 /* We should always have a readable file here */
5000 WARN_ON_ONCE(1);
5001 return ERR_PTR(-EBADF);
5002 }
5003 spin_lock(&state_lock);
5004 spin_lock(&fp->fi_lock);
5005 if (nfs4_delegation_exists(clp, fp))
5006 status = -EAGAIN;
5007 else if (!fp->fi_deleg_file) {
5008 fp->fi_deleg_file = nf;
5009 /* increment early to prevent fi_deleg_file from being
5010 * cleared */
5011 fp->fi_delegees = 1;
5012 nf = NULL;
5013 } else
5014 fp->fi_delegees++;
5015 spin_unlock(&fp->fi_lock);
5016 spin_unlock(&state_lock);
5017 if (nf)
5018 nfsd_file_put(nf);
5019 if (status)
5020 return ERR_PTR(status);
5021
5022 status = -ENOMEM;
5023 dp = alloc_init_deleg(clp, fp, fh, odstate);
5024 if (!dp)
5025 goto out_delegees;
5026
5027 fl = nfs4_alloc_init_lease(dp, NFS4_OPEN_DELEGATE_READ);
5028 if (!fl)
5029 goto out_clnt_odstate;
5030
5031 status = vfs_setlease(fp->fi_deleg_file->nf_file, fl->fl_type, &fl, NULL);
5032 if (fl)
5033 locks_free_lock(fl);
5034 if (status)
5035 goto out_clnt_odstate;
5036
5037 spin_lock(&state_lock);
5038 spin_lock(&fp->fi_lock);
5039 if (fp->fi_had_conflict)
5040 status = -EAGAIN;
5041 else
5042 status = hash_delegation_locked(dp, fp);
5043 spin_unlock(&fp->fi_lock);
5044 spin_unlock(&state_lock);
5045
5046 if (status)
5047 goto out_unlock;
5048
5049 return dp;
5050 out_unlock:
5051 vfs_setlease(fp->fi_deleg_file->nf_file, F_UNLCK, NULL, (void **)&dp);
5052 out_clnt_odstate:
5053 put_clnt_odstate(dp->dl_clnt_odstate);
5054 nfs4_put_stid(&dp->dl_stid);
5055 out_delegees:
5056 put_deleg_file(fp);
5057 return ERR_PTR(status);
5058 }
5059
nfsd4_open_deleg_none_ext(struct nfsd4_open * open,int status)5060 static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status)
5061 {
5062 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
5063 if (status == -EAGAIN)
5064 open->op_why_no_deleg = WND4_CONTENTION;
5065 else {
5066 open->op_why_no_deleg = WND4_RESOURCE;
5067 switch (open->op_deleg_want) {
5068 case NFS4_SHARE_WANT_READ_DELEG:
5069 case NFS4_SHARE_WANT_WRITE_DELEG:
5070 case NFS4_SHARE_WANT_ANY_DELEG:
5071 break;
5072 case NFS4_SHARE_WANT_CANCEL:
5073 open->op_why_no_deleg = WND4_CANCELLED;
5074 break;
5075 case NFS4_SHARE_WANT_NO_DELEG:
5076 WARN_ON_ONCE(1);
5077 }
5078 }
5079 }
5080
5081 /*
5082 * Attempt to hand out a delegation.
5083 *
5084 * Note we don't support write delegations, and won't until the vfs has
5085 * proper support for them.
5086 */
5087 static void
nfs4_open_delegation(struct svc_fh * fh,struct nfsd4_open * open,struct nfs4_ol_stateid * stp)5088 nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open,
5089 struct nfs4_ol_stateid *stp)
5090 {
5091 struct nfs4_delegation *dp;
5092 struct nfs4_openowner *oo = openowner(stp->st_stateowner);
5093 struct nfs4_client *clp = stp->st_stid.sc_client;
5094 int cb_up;
5095 int status = 0;
5096
5097 cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client);
5098 open->op_recall = 0;
5099 switch (open->op_claim_type) {
5100 case NFS4_OPEN_CLAIM_PREVIOUS:
5101 if (!cb_up)
5102 open->op_recall = 1;
5103 if (open->op_delegate_type != NFS4_OPEN_DELEGATE_READ)
5104 goto out_no_deleg;
5105 break;
5106 case NFS4_OPEN_CLAIM_NULL:
5107 case NFS4_OPEN_CLAIM_FH:
5108 /*
5109 * Let's not give out any delegations till everyone's
5110 * had the chance to reclaim theirs, *and* until
5111 * NLM locks have all been reclaimed:
5112 */
5113 if (locks_in_grace(clp->net))
5114 goto out_no_deleg;
5115 if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED))
5116 goto out_no_deleg;
5117 /*
5118 * Also, if the file was opened for write or
5119 * create, there's a good chance the client's
5120 * about to write to it, resulting in an
5121 * immediate recall (since we don't support
5122 * write delegations):
5123 */
5124 if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE)
5125 goto out_no_deleg;
5126 if (open->op_create == NFS4_OPEN_CREATE)
5127 goto out_no_deleg;
5128 break;
5129 default:
5130 goto out_no_deleg;
5131 }
5132 dp = nfs4_set_delegation(clp, fh, stp->st_stid.sc_file, stp->st_clnt_odstate);
5133 if (IS_ERR(dp))
5134 goto out_no_deleg;
5135
5136 memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid));
5137
5138 trace_nfsd_deleg_read(&dp->dl_stid.sc_stateid);
5139 open->op_delegate_type = NFS4_OPEN_DELEGATE_READ;
5140 nfs4_put_stid(&dp->dl_stid);
5141 return;
5142 out_no_deleg:
5143 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE;
5144 if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS &&
5145 open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) {
5146 dprintk("NFSD: WARNING: refusing delegation reclaim\n");
5147 open->op_recall = 1;
5148 }
5149
5150 /* 4.1 client asking for a delegation? */
5151 if (open->op_deleg_want)
5152 nfsd4_open_deleg_none_ext(open, status);
5153 return;
5154 }
5155
nfsd4_deleg_xgrade_none_ext(struct nfsd4_open * open,struct nfs4_delegation * dp)5156 static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open,
5157 struct nfs4_delegation *dp)
5158 {
5159 if (open->op_deleg_want == NFS4_SHARE_WANT_READ_DELEG &&
5160 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
5161 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
5162 open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE;
5163 } else if (open->op_deleg_want == NFS4_SHARE_WANT_WRITE_DELEG &&
5164 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
5165 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
5166 open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE;
5167 }
5168 /* Otherwise the client must be confused wanting a delegation
5169 * it already has, therefore we don't return
5170 * NFS4_OPEN_DELEGATE_NONE_EXT and reason.
5171 */
5172 }
5173
5174 __be32
nfsd4_process_open2(struct svc_rqst * rqstp,struct svc_fh * current_fh,struct nfsd4_open * open)5175 nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
5176 {
5177 struct nfsd4_compoundres *resp = rqstp->rq_resp;
5178 struct nfs4_client *cl = open->op_openowner->oo_owner.so_client;
5179 struct nfs4_file *fp = NULL;
5180 struct nfs4_ol_stateid *stp = NULL;
5181 struct nfs4_delegation *dp = NULL;
5182 __be32 status;
5183 bool new_stp = false;
5184
5185 /*
5186 * Lookup file; if found, lookup stateid and check open request,
5187 * and check for delegations in the process of being recalled.
5188 * If not found, create the nfs4_file struct
5189 */
5190 fp = find_or_add_file(open->op_file, ¤t_fh->fh_handle);
5191 if (fp != open->op_file) {
5192 status = nfs4_check_deleg(cl, open, &dp);
5193 if (status)
5194 goto out;
5195 stp = nfsd4_find_and_lock_existing_open(fp, open);
5196 } else {
5197 open->op_file = NULL;
5198 status = nfserr_bad_stateid;
5199 if (nfsd4_is_deleg_cur(open))
5200 goto out;
5201 }
5202
5203 if (!stp) {
5204 stp = init_open_stateid(fp, open);
5205 if (!open->op_stp)
5206 new_stp = true;
5207 }
5208
5209 /*
5210 * OPEN the file, or upgrade an existing OPEN.
5211 * If truncate fails, the OPEN fails.
5212 *
5213 * stp is already locked.
5214 */
5215 if (!new_stp) {
5216 /* Stateid was found, this is an OPEN upgrade */
5217 status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
5218 if (status) {
5219 mutex_unlock(&stp->st_mutex);
5220 goto out;
5221 }
5222 } else {
5223 status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open);
5224 if (status) {
5225 stp->st_stid.sc_type = NFS4_CLOSED_STID;
5226 release_open_stateid(stp);
5227 mutex_unlock(&stp->st_mutex);
5228 goto out;
5229 }
5230
5231 stp->st_clnt_odstate = find_or_hash_clnt_odstate(fp,
5232 open->op_odstate);
5233 if (stp->st_clnt_odstate == open->op_odstate)
5234 open->op_odstate = NULL;
5235 }
5236
5237 nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid);
5238 mutex_unlock(&stp->st_mutex);
5239
5240 if (nfsd4_has_session(&resp->cstate)) {
5241 if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
5242 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
5243 open->op_why_no_deleg = WND4_NOT_WANTED;
5244 goto nodeleg;
5245 }
5246 }
5247
5248 /*
5249 * Attempt to hand out a delegation. No error return, because the
5250 * OPEN succeeds even if we fail.
5251 */
5252 nfs4_open_delegation(current_fh, open, stp);
5253 nodeleg:
5254 status = nfs_ok;
5255 trace_nfsd_open(&stp->st_stid.sc_stateid);
5256 out:
5257 /* 4.1 client trying to upgrade/downgrade delegation? */
5258 if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp &&
5259 open->op_deleg_want)
5260 nfsd4_deleg_xgrade_none_ext(open, dp);
5261
5262 if (fp)
5263 put_nfs4_file(fp);
5264 if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
5265 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
5266 /*
5267 * To finish the open response, we just need to set the rflags.
5268 */
5269 open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX;
5270 if (nfsd4_has_session(&resp->cstate))
5271 open->op_rflags |= NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK;
5272 else if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED))
5273 open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM;
5274
5275 if (dp)
5276 nfs4_put_stid(&dp->dl_stid);
5277 if (stp)
5278 nfs4_put_stid(&stp->st_stid);
5279
5280 return status;
5281 }
5282
nfsd4_cleanup_open_state(struct nfsd4_compound_state * cstate,struct nfsd4_open * open)5283 void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate,
5284 struct nfsd4_open *open)
5285 {
5286 if (open->op_openowner) {
5287 struct nfs4_stateowner *so = &open->op_openowner->oo_owner;
5288
5289 nfsd4_cstate_assign_replay(cstate, so);
5290 nfs4_put_stateowner(so);
5291 }
5292 if (open->op_file)
5293 kmem_cache_free(file_slab, open->op_file);
5294 if (open->op_stp)
5295 nfs4_put_stid(&open->op_stp->st_stid);
5296 if (open->op_odstate)
5297 kmem_cache_free(odstate_slab, open->op_odstate);
5298 }
5299
5300 __be32
nfsd4_renew(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)5301 nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5302 union nfsd4_op_u *u)
5303 {
5304 clientid_t *clid = &u->renew;
5305 struct nfs4_client *clp;
5306 __be32 status;
5307 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
5308
5309 trace_nfsd_clid_renew(clid);
5310 status = lookup_clientid(clid, cstate, nn, false);
5311 if (status)
5312 goto out;
5313 clp = cstate->clp;
5314 status = nfserr_cb_path_down;
5315 if (!list_empty(&clp->cl_delegations)
5316 && clp->cl_cb_state != NFSD4_CB_UP)
5317 goto out;
5318 status = nfs_ok;
5319 out:
5320 return status;
5321 }
5322
5323 void
nfsd4_end_grace(struct nfsd_net * nn)5324 nfsd4_end_grace(struct nfsd_net *nn)
5325 {
5326 /* do nothing if grace period already ended */
5327 if (nn->grace_ended)
5328 return;
5329
5330 trace_nfsd_grace_complete(nn);
5331 nn->grace_ended = true;
5332 /*
5333 * If the server goes down again right now, an NFSv4
5334 * client will still be allowed to reclaim after it comes back up,
5335 * even if it hasn't yet had a chance to reclaim state this time.
5336 *
5337 */
5338 nfsd4_record_grace_done(nn);
5339 /*
5340 * At this point, NFSv4 clients can still reclaim. But if the
5341 * server crashes, any that have not yet reclaimed will be out
5342 * of luck on the next boot.
5343 *
5344 * (NFSv4.1+ clients are considered to have reclaimed once they
5345 * call RECLAIM_COMPLETE. NFSv4.0 clients are considered to
5346 * have reclaimed after their first OPEN.)
5347 */
5348 locks_end_grace(&nn->nfsd4_manager);
5349 /*
5350 * At this point, and once lockd and/or any other containers
5351 * exit their grace period, further reclaims will fail and
5352 * regular locking can resume.
5353 */
5354 }
5355
5356 /*
5357 * If we've waited a lease period but there are still clients trying to
5358 * reclaim, wait a little longer to give them a chance to finish.
5359 */
clients_still_reclaiming(struct nfsd_net * nn)5360 static bool clients_still_reclaiming(struct nfsd_net *nn)
5361 {
5362 time64_t double_grace_period_end = nn->boot_time +
5363 2 * nn->nfsd4_lease;
5364
5365 if (nn->track_reclaim_completes &&
5366 atomic_read(&nn->nr_reclaim_complete) ==
5367 nn->reclaim_str_hashtbl_size)
5368 return false;
5369 if (!nn->somebody_reclaimed)
5370 return false;
5371 nn->somebody_reclaimed = false;
5372 /*
5373 * If we've given them *two* lease times to reclaim, and they're
5374 * still not done, give up:
5375 */
5376 if (ktime_get_boottime_seconds() > double_grace_period_end)
5377 return false;
5378 return true;
5379 }
5380
5381 static time64_t
nfs4_laundromat(struct nfsd_net * nn)5382 nfs4_laundromat(struct nfsd_net *nn)
5383 {
5384 struct nfs4_client *clp;
5385 struct nfs4_openowner *oo;
5386 struct nfs4_delegation *dp;
5387 struct nfs4_ol_stateid *stp;
5388 struct nfsd4_blocked_lock *nbl;
5389 struct list_head *pos, *next, reaplist;
5390 time64_t cutoff = ktime_get_boottime_seconds() - nn->nfsd4_lease;
5391 time64_t t, new_timeo = nn->nfsd4_lease;
5392 struct nfs4_cpntf_state *cps;
5393 copy_stateid_t *cps_t;
5394 int i;
5395
5396 if (clients_still_reclaiming(nn)) {
5397 new_timeo = 0;
5398 goto out;
5399 }
5400 nfsd4_end_grace(nn);
5401 INIT_LIST_HEAD(&reaplist);
5402
5403 spin_lock(&nn->s2s_cp_lock);
5404 idr_for_each_entry(&nn->s2s_cp_stateids, cps_t, i) {
5405 cps = container_of(cps_t, struct nfs4_cpntf_state, cp_stateid);
5406 if (cps->cp_stateid.sc_type == NFS4_COPYNOTIFY_STID &&
5407 cps->cpntf_time < cutoff)
5408 _free_cpntf_state_locked(nn, cps);
5409 }
5410 spin_unlock(&nn->s2s_cp_lock);
5411
5412 spin_lock(&nn->client_lock);
5413 list_for_each_safe(pos, next, &nn->client_lru) {
5414 clp = list_entry(pos, struct nfs4_client, cl_lru);
5415 if (clp->cl_time > cutoff) {
5416 t = clp->cl_time - cutoff;
5417 new_timeo = min(new_timeo, t);
5418 break;
5419 }
5420 if (mark_client_expired_locked(clp)) {
5421 trace_nfsd_clid_expired(&clp->cl_clientid);
5422 continue;
5423 }
5424 list_add(&clp->cl_lru, &reaplist);
5425 }
5426 spin_unlock(&nn->client_lock);
5427 list_for_each_safe(pos, next, &reaplist) {
5428 clp = list_entry(pos, struct nfs4_client, cl_lru);
5429 trace_nfsd_clid_purged(&clp->cl_clientid);
5430 list_del_init(&clp->cl_lru);
5431 expire_client(clp);
5432 }
5433 spin_lock(&state_lock);
5434 list_for_each_safe(pos, next, &nn->del_recall_lru) {
5435 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
5436 if (dp->dl_time > cutoff) {
5437 t = dp->dl_time - cutoff;
5438 new_timeo = min(new_timeo, t);
5439 break;
5440 }
5441 WARN_ON(!unhash_delegation_locked(dp));
5442 list_add(&dp->dl_recall_lru, &reaplist);
5443 }
5444 spin_unlock(&state_lock);
5445 while (!list_empty(&reaplist)) {
5446 dp = list_first_entry(&reaplist, struct nfs4_delegation,
5447 dl_recall_lru);
5448 list_del_init(&dp->dl_recall_lru);
5449 revoke_delegation(dp);
5450 }
5451
5452 spin_lock(&nn->client_lock);
5453 while (!list_empty(&nn->close_lru)) {
5454 oo = list_first_entry(&nn->close_lru, struct nfs4_openowner,
5455 oo_close_lru);
5456 if (oo->oo_time > cutoff) {
5457 t = oo->oo_time - cutoff;
5458 new_timeo = min(new_timeo, t);
5459 break;
5460 }
5461 list_del_init(&oo->oo_close_lru);
5462 stp = oo->oo_last_closed_stid;
5463 oo->oo_last_closed_stid = NULL;
5464 spin_unlock(&nn->client_lock);
5465 nfs4_put_stid(&stp->st_stid);
5466 spin_lock(&nn->client_lock);
5467 }
5468 spin_unlock(&nn->client_lock);
5469
5470 /*
5471 * It's possible for a client to try and acquire an already held lock
5472 * that is being held for a long time, and then lose interest in it.
5473 * So, we clean out any un-revisited request after a lease period
5474 * under the assumption that the client is no longer interested.
5475 *
5476 * RFC5661, sec. 9.6 states that the client must not rely on getting
5477 * notifications and must continue to poll for locks, even when the
5478 * server supports them. Thus this shouldn't lead to clients blocking
5479 * indefinitely once the lock does become free.
5480 */
5481 BUG_ON(!list_empty(&reaplist));
5482 spin_lock(&nn->blocked_locks_lock);
5483 while (!list_empty(&nn->blocked_locks_lru)) {
5484 nbl = list_first_entry(&nn->blocked_locks_lru,
5485 struct nfsd4_blocked_lock, nbl_lru);
5486 if (nbl->nbl_time > cutoff) {
5487 t = nbl->nbl_time - cutoff;
5488 new_timeo = min(new_timeo, t);
5489 break;
5490 }
5491 list_move(&nbl->nbl_lru, &reaplist);
5492 list_del_init(&nbl->nbl_list);
5493 }
5494 spin_unlock(&nn->blocked_locks_lock);
5495
5496 while (!list_empty(&reaplist)) {
5497 nbl = list_first_entry(&reaplist,
5498 struct nfsd4_blocked_lock, nbl_lru);
5499 list_del_init(&nbl->nbl_lru);
5500 free_blocked_lock(nbl);
5501 }
5502 out:
5503 new_timeo = max_t(time64_t, new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT);
5504 return new_timeo;
5505 }
5506
5507 static struct workqueue_struct *laundry_wq;
5508 static void laundromat_main(struct work_struct *);
5509
5510 static void
laundromat_main(struct work_struct * laundry)5511 laundromat_main(struct work_struct *laundry)
5512 {
5513 time64_t t;
5514 struct delayed_work *dwork = to_delayed_work(laundry);
5515 struct nfsd_net *nn = container_of(dwork, struct nfsd_net,
5516 laundromat_work);
5517
5518 t = nfs4_laundromat(nn);
5519 queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ);
5520 }
5521
nfs4_check_fh(struct svc_fh * fhp,struct nfs4_stid * stp)5522 static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stid *stp)
5523 {
5524 if (!fh_match(&fhp->fh_handle, &stp->sc_file->fi_fhandle))
5525 return nfserr_bad_stateid;
5526 return nfs_ok;
5527 }
5528
5529 static inline int
access_permit_read(struct nfs4_ol_stateid * stp)5530 access_permit_read(struct nfs4_ol_stateid *stp)
5531 {
5532 return test_access(NFS4_SHARE_ACCESS_READ, stp) ||
5533 test_access(NFS4_SHARE_ACCESS_BOTH, stp) ||
5534 test_access(NFS4_SHARE_ACCESS_WRITE, stp);
5535 }
5536
5537 static inline int
access_permit_write(struct nfs4_ol_stateid * stp)5538 access_permit_write(struct nfs4_ol_stateid *stp)
5539 {
5540 return test_access(NFS4_SHARE_ACCESS_WRITE, stp) ||
5541 test_access(NFS4_SHARE_ACCESS_BOTH, stp);
5542 }
5543
5544 static
nfs4_check_openmode(struct nfs4_ol_stateid * stp,int flags)5545 __be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags)
5546 {
5547 __be32 status = nfserr_openmode;
5548
5549 /* For lock stateid's, we test the parent open, not the lock: */
5550 if (stp->st_openstp)
5551 stp = stp->st_openstp;
5552 if ((flags & WR_STATE) && !access_permit_write(stp))
5553 goto out;
5554 if ((flags & RD_STATE) && !access_permit_read(stp))
5555 goto out;
5556 status = nfs_ok;
5557 out:
5558 return status;
5559 }
5560
5561 static inline __be32
check_special_stateids(struct net * net,svc_fh * current_fh,stateid_t * stateid,int flags)5562 check_special_stateids(struct net *net, svc_fh *current_fh, stateid_t *stateid, int flags)
5563 {
5564 if (ONE_STATEID(stateid) && (flags & RD_STATE))
5565 return nfs_ok;
5566 else if (opens_in_grace(net)) {
5567 /* Answer in remaining cases depends on existence of
5568 * conflicting state; so we must wait out the grace period. */
5569 return nfserr_grace;
5570 } else if (flags & WR_STATE)
5571 return nfs4_share_conflict(current_fh,
5572 NFS4_SHARE_DENY_WRITE);
5573 else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */
5574 return nfs4_share_conflict(current_fh,
5575 NFS4_SHARE_DENY_READ);
5576 }
5577
5578 /*
5579 * Allow READ/WRITE during grace period on recovered state only for files
5580 * that are not able to provide mandatory locking.
5581 */
5582 static inline int
grace_disallows_io(struct net * net,struct inode * inode)5583 grace_disallows_io(struct net *net, struct inode *inode)
5584 {
5585 return opens_in_grace(net) && mandatory_lock(inode);
5586 }
5587
check_stateid_generation(stateid_t * in,stateid_t * ref,bool has_session)5588 static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session)
5589 {
5590 /*
5591 * When sessions are used the stateid generation number is ignored
5592 * when it is zero.
5593 */
5594 if (has_session && in->si_generation == 0)
5595 return nfs_ok;
5596
5597 if (in->si_generation == ref->si_generation)
5598 return nfs_ok;
5599
5600 /* If the client sends us a stateid from the future, it's buggy: */
5601 if (nfsd4_stateid_generation_after(in, ref))
5602 return nfserr_bad_stateid;
5603 /*
5604 * However, we could see a stateid from the past, even from a
5605 * non-buggy client. For example, if the client sends a lock
5606 * while some IO is outstanding, the lock may bump si_generation
5607 * while the IO is still in flight. The client could avoid that
5608 * situation by waiting for responses on all the IO requests,
5609 * but better performance may result in retrying IO that
5610 * receives an old_stateid error if requests are rarely
5611 * reordered in flight:
5612 */
5613 return nfserr_old_stateid;
5614 }
5615
nfsd4_stid_check_stateid_generation(stateid_t * in,struct nfs4_stid * s,bool has_session)5616 static __be32 nfsd4_stid_check_stateid_generation(stateid_t *in, struct nfs4_stid *s, bool has_session)
5617 {
5618 __be32 ret;
5619
5620 spin_lock(&s->sc_lock);
5621 ret = nfsd4_verify_open_stid(s);
5622 if (ret == nfs_ok)
5623 ret = check_stateid_generation(in, &s->sc_stateid, has_session);
5624 spin_unlock(&s->sc_lock);
5625 return ret;
5626 }
5627
nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid * ols)5628 static __be32 nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid *ols)
5629 {
5630 if (ols->st_stateowner->so_is_open_owner &&
5631 !(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
5632 return nfserr_bad_stateid;
5633 return nfs_ok;
5634 }
5635
nfsd4_validate_stateid(struct nfs4_client * cl,stateid_t * stateid)5636 static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
5637 {
5638 struct nfs4_stid *s;
5639 __be32 status = nfserr_bad_stateid;
5640
5641 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
5642 CLOSE_STATEID(stateid))
5643 return status;
5644 if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid))
5645 return status;
5646 spin_lock(&cl->cl_lock);
5647 s = find_stateid_locked(cl, stateid);
5648 if (!s)
5649 goto out_unlock;
5650 status = nfsd4_stid_check_stateid_generation(stateid, s, 1);
5651 if (status)
5652 goto out_unlock;
5653 switch (s->sc_type) {
5654 case NFS4_DELEG_STID:
5655 status = nfs_ok;
5656 break;
5657 case NFS4_REVOKED_DELEG_STID:
5658 status = nfserr_deleg_revoked;
5659 break;
5660 case NFS4_OPEN_STID:
5661 case NFS4_LOCK_STID:
5662 status = nfsd4_check_openowner_confirmed(openlockstateid(s));
5663 break;
5664 default:
5665 printk("unknown stateid type %x\n", s->sc_type);
5666 fallthrough;
5667 case NFS4_CLOSED_STID:
5668 case NFS4_CLOSED_DELEG_STID:
5669 status = nfserr_bad_stateid;
5670 }
5671 out_unlock:
5672 spin_unlock(&cl->cl_lock);
5673 return status;
5674 }
5675
5676 __be32
nfsd4_lookup_stateid(struct nfsd4_compound_state * cstate,stateid_t * stateid,unsigned char typemask,struct nfs4_stid ** s,struct nfsd_net * nn)5677 nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
5678 stateid_t *stateid, unsigned char typemask,
5679 struct nfs4_stid **s, struct nfsd_net *nn)
5680 {
5681 __be32 status;
5682 bool return_revoked = false;
5683
5684 /*
5685 * only return revoked delegations if explicitly asked.
5686 * otherwise we report revoked or bad_stateid status.
5687 */
5688 if (typemask & NFS4_REVOKED_DELEG_STID)
5689 return_revoked = true;
5690 else if (typemask & NFS4_DELEG_STID)
5691 typemask |= NFS4_REVOKED_DELEG_STID;
5692
5693 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
5694 CLOSE_STATEID(stateid))
5695 return nfserr_bad_stateid;
5696 status = lookup_clientid(&stateid->si_opaque.so_clid, cstate, nn,
5697 false);
5698 if (status == nfserr_stale_clientid) {
5699 if (cstate->session)
5700 return nfserr_bad_stateid;
5701 return nfserr_stale_stateid;
5702 }
5703 if (status)
5704 return status;
5705 *s = find_stateid_by_type(cstate->clp, stateid, typemask);
5706 if (!*s)
5707 return nfserr_bad_stateid;
5708 if (((*s)->sc_type == NFS4_REVOKED_DELEG_STID) && !return_revoked) {
5709 nfs4_put_stid(*s);
5710 if (cstate->minorversion)
5711 return nfserr_deleg_revoked;
5712 return nfserr_bad_stateid;
5713 }
5714 return nfs_ok;
5715 }
5716
5717 static struct nfsd_file *
nfs4_find_file(struct nfs4_stid * s,int flags)5718 nfs4_find_file(struct nfs4_stid *s, int flags)
5719 {
5720 if (!s)
5721 return NULL;
5722
5723 switch (s->sc_type) {
5724 case NFS4_DELEG_STID:
5725 if (WARN_ON_ONCE(!s->sc_file->fi_deleg_file))
5726 return NULL;
5727 return nfsd_file_get(s->sc_file->fi_deleg_file);
5728 case NFS4_OPEN_STID:
5729 case NFS4_LOCK_STID:
5730 if (flags & RD_STATE)
5731 return find_readable_file(s->sc_file);
5732 else
5733 return find_writeable_file(s->sc_file);
5734 }
5735
5736 return NULL;
5737 }
5738
5739 static __be32
nfs4_check_olstateid(struct nfs4_ol_stateid * ols,int flags)5740 nfs4_check_olstateid(struct nfs4_ol_stateid *ols, int flags)
5741 {
5742 __be32 status;
5743
5744 status = nfsd4_check_openowner_confirmed(ols);
5745 if (status)
5746 return status;
5747 return nfs4_check_openmode(ols, flags);
5748 }
5749
5750 static __be32
nfs4_check_file(struct svc_rqst * rqstp,struct svc_fh * fhp,struct nfs4_stid * s,struct nfsd_file ** nfp,int flags)5751 nfs4_check_file(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfs4_stid *s,
5752 struct nfsd_file **nfp, int flags)
5753 {
5754 int acc = (flags & RD_STATE) ? NFSD_MAY_READ : NFSD_MAY_WRITE;
5755 struct nfsd_file *nf;
5756 __be32 status;
5757
5758 nf = nfs4_find_file(s, flags);
5759 if (nf) {
5760 status = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry,
5761 acc | NFSD_MAY_OWNER_OVERRIDE);
5762 if (status) {
5763 nfsd_file_put(nf);
5764 goto out;
5765 }
5766 } else {
5767 status = nfsd_file_acquire(rqstp, fhp, acc, &nf);
5768 if (status)
5769 return status;
5770 }
5771 *nfp = nf;
5772 out:
5773 return status;
5774 }
5775 static void
_free_cpntf_state_locked(struct nfsd_net * nn,struct nfs4_cpntf_state * cps)5776 _free_cpntf_state_locked(struct nfsd_net *nn, struct nfs4_cpntf_state *cps)
5777 {
5778 WARN_ON_ONCE(cps->cp_stateid.sc_type != NFS4_COPYNOTIFY_STID);
5779 if (!refcount_dec_and_test(&cps->cp_stateid.sc_count))
5780 return;
5781 list_del(&cps->cp_list);
5782 idr_remove(&nn->s2s_cp_stateids,
5783 cps->cp_stateid.stid.si_opaque.so_id);
5784 kfree(cps);
5785 }
5786 /*
5787 * A READ from an inter server to server COPY will have a
5788 * copy stateid. Look up the copy notify stateid from the
5789 * idr structure and take a reference on it.
5790 */
manage_cpntf_state(struct nfsd_net * nn,stateid_t * st,struct nfs4_client * clp,struct nfs4_cpntf_state ** cps)5791 __be32 manage_cpntf_state(struct nfsd_net *nn, stateid_t *st,
5792 struct nfs4_client *clp,
5793 struct nfs4_cpntf_state **cps)
5794 {
5795 copy_stateid_t *cps_t;
5796 struct nfs4_cpntf_state *state = NULL;
5797
5798 if (st->si_opaque.so_clid.cl_id != nn->s2s_cp_cl_id)
5799 return nfserr_bad_stateid;
5800 spin_lock(&nn->s2s_cp_lock);
5801 cps_t = idr_find(&nn->s2s_cp_stateids, st->si_opaque.so_id);
5802 if (cps_t) {
5803 state = container_of(cps_t, struct nfs4_cpntf_state,
5804 cp_stateid);
5805 if (state->cp_stateid.sc_type != NFS4_COPYNOTIFY_STID) {
5806 state = NULL;
5807 goto unlock;
5808 }
5809 if (!clp)
5810 refcount_inc(&state->cp_stateid.sc_count);
5811 else
5812 _free_cpntf_state_locked(nn, state);
5813 }
5814 unlock:
5815 spin_unlock(&nn->s2s_cp_lock);
5816 if (!state)
5817 return nfserr_bad_stateid;
5818 if (!clp && state)
5819 *cps = state;
5820 return 0;
5821 }
5822
find_cpntf_state(struct nfsd_net * nn,stateid_t * st,struct nfs4_stid ** stid)5823 static __be32 find_cpntf_state(struct nfsd_net *nn, stateid_t *st,
5824 struct nfs4_stid **stid)
5825 {
5826 __be32 status;
5827 struct nfs4_cpntf_state *cps = NULL;
5828 struct nfsd4_compound_state cstate;
5829
5830 status = manage_cpntf_state(nn, st, NULL, &cps);
5831 if (status)
5832 return status;
5833
5834 cps->cpntf_time = ktime_get_boottime_seconds();
5835 memset(&cstate, 0, sizeof(cstate));
5836 status = lookup_clientid(&cps->cp_p_clid, &cstate, nn, true);
5837 if (status)
5838 goto out;
5839 status = nfsd4_lookup_stateid(&cstate, &cps->cp_p_stateid,
5840 NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID,
5841 stid, nn);
5842 put_client_renew(cstate.clp);
5843 out:
5844 nfs4_put_cpntf_state(nn, cps);
5845 return status;
5846 }
5847
nfs4_put_cpntf_state(struct nfsd_net * nn,struct nfs4_cpntf_state * cps)5848 void nfs4_put_cpntf_state(struct nfsd_net *nn, struct nfs4_cpntf_state *cps)
5849 {
5850 spin_lock(&nn->s2s_cp_lock);
5851 _free_cpntf_state_locked(nn, cps);
5852 spin_unlock(&nn->s2s_cp_lock);
5853 }
5854
5855 /*
5856 * Checks for stateid operations
5857 */
5858 __be32
nfs4_preprocess_stateid_op(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,struct svc_fh * fhp,stateid_t * stateid,int flags,struct nfsd_file ** nfp,struct nfs4_stid ** cstid)5859 nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
5860 struct nfsd4_compound_state *cstate, struct svc_fh *fhp,
5861 stateid_t *stateid, int flags, struct nfsd_file **nfp,
5862 struct nfs4_stid **cstid)
5863 {
5864 struct inode *ino = d_inode(fhp->fh_dentry);
5865 struct net *net = SVC_NET(rqstp);
5866 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
5867 struct nfs4_stid *s = NULL;
5868 __be32 status;
5869
5870 if (nfp)
5871 *nfp = NULL;
5872
5873 if (grace_disallows_io(net, ino))
5874 return nfserr_grace;
5875
5876 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) {
5877 status = check_special_stateids(net, fhp, stateid, flags);
5878 goto done;
5879 }
5880
5881 status = nfsd4_lookup_stateid(cstate, stateid,
5882 NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID,
5883 &s, nn);
5884 if (status == nfserr_bad_stateid)
5885 status = find_cpntf_state(nn, stateid, &s);
5886 if (status)
5887 return status;
5888 status = nfsd4_stid_check_stateid_generation(stateid, s,
5889 nfsd4_has_session(cstate));
5890 if (status)
5891 goto out;
5892
5893 switch (s->sc_type) {
5894 case NFS4_DELEG_STID:
5895 status = nfs4_check_delegmode(delegstateid(s), flags);
5896 break;
5897 case NFS4_OPEN_STID:
5898 case NFS4_LOCK_STID:
5899 status = nfs4_check_olstateid(openlockstateid(s), flags);
5900 break;
5901 default:
5902 status = nfserr_bad_stateid;
5903 break;
5904 }
5905 if (status)
5906 goto out;
5907 status = nfs4_check_fh(fhp, s);
5908
5909 done:
5910 if (status == nfs_ok && nfp)
5911 status = nfs4_check_file(rqstp, fhp, s, nfp, flags);
5912 out:
5913 if (s) {
5914 if (!status && cstid)
5915 *cstid = s;
5916 else
5917 nfs4_put_stid(s);
5918 }
5919 return status;
5920 }
5921
5922 /*
5923 * Test if the stateid is valid
5924 */
5925 __be32
nfsd4_test_stateid(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)5926 nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5927 union nfsd4_op_u *u)
5928 {
5929 struct nfsd4_test_stateid *test_stateid = &u->test_stateid;
5930 struct nfsd4_test_stateid_id *stateid;
5931 struct nfs4_client *cl = cstate->session->se_client;
5932
5933 list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list)
5934 stateid->ts_id_status =
5935 nfsd4_validate_stateid(cl, &stateid->ts_id_stateid);
5936
5937 return nfs_ok;
5938 }
5939
5940 static __be32
nfsd4_free_lock_stateid(stateid_t * stateid,struct nfs4_stid * s)5941 nfsd4_free_lock_stateid(stateid_t *stateid, struct nfs4_stid *s)
5942 {
5943 struct nfs4_ol_stateid *stp = openlockstateid(s);
5944 __be32 ret;
5945
5946 ret = nfsd4_lock_ol_stateid(stp);
5947 if (ret)
5948 goto out_put_stid;
5949
5950 ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
5951 if (ret)
5952 goto out;
5953
5954 ret = nfserr_locks_held;
5955 if (check_for_locks(stp->st_stid.sc_file,
5956 lockowner(stp->st_stateowner)))
5957 goto out;
5958
5959 release_lock_stateid(stp);
5960 ret = nfs_ok;
5961
5962 out:
5963 mutex_unlock(&stp->st_mutex);
5964 out_put_stid:
5965 nfs4_put_stid(s);
5966 return ret;
5967 }
5968
5969 __be32
nfsd4_free_stateid(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)5970 nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5971 union nfsd4_op_u *u)
5972 {
5973 struct nfsd4_free_stateid *free_stateid = &u->free_stateid;
5974 stateid_t *stateid = &free_stateid->fr_stateid;
5975 struct nfs4_stid *s;
5976 struct nfs4_delegation *dp;
5977 struct nfs4_client *cl = cstate->session->se_client;
5978 __be32 ret = nfserr_bad_stateid;
5979
5980 spin_lock(&cl->cl_lock);
5981 s = find_stateid_locked(cl, stateid);
5982 if (!s)
5983 goto out_unlock;
5984 spin_lock(&s->sc_lock);
5985 switch (s->sc_type) {
5986 case NFS4_DELEG_STID:
5987 ret = nfserr_locks_held;
5988 break;
5989 case NFS4_OPEN_STID:
5990 ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
5991 if (ret)
5992 break;
5993 ret = nfserr_locks_held;
5994 break;
5995 case NFS4_LOCK_STID:
5996 spin_unlock(&s->sc_lock);
5997 refcount_inc(&s->sc_count);
5998 spin_unlock(&cl->cl_lock);
5999 ret = nfsd4_free_lock_stateid(stateid, s);
6000 goto out;
6001 case NFS4_REVOKED_DELEG_STID:
6002 spin_unlock(&s->sc_lock);
6003 dp = delegstateid(s);
6004 list_del_init(&dp->dl_recall_lru);
6005 spin_unlock(&cl->cl_lock);
6006 nfs4_put_stid(s);
6007 ret = nfs_ok;
6008 goto out;
6009 /* Default falls through and returns nfserr_bad_stateid */
6010 }
6011 spin_unlock(&s->sc_lock);
6012 out_unlock:
6013 spin_unlock(&cl->cl_lock);
6014 out:
6015 return ret;
6016 }
6017
6018 static inline int
setlkflg(int type)6019 setlkflg (int type)
6020 {
6021 return (type == NFS4_READW_LT || type == NFS4_READ_LT) ?
6022 RD_STATE : WR_STATE;
6023 }
6024
nfs4_seqid_op_checks(struct nfsd4_compound_state * cstate,stateid_t * stateid,u32 seqid,struct nfs4_ol_stateid * stp)6025 static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_t *stateid, u32 seqid, struct nfs4_ol_stateid *stp)
6026 {
6027 struct svc_fh *current_fh = &cstate->current_fh;
6028 struct nfs4_stateowner *sop = stp->st_stateowner;
6029 __be32 status;
6030
6031 status = nfsd4_check_seqid(cstate, sop, seqid);
6032 if (status)
6033 return status;
6034 status = nfsd4_lock_ol_stateid(stp);
6035 if (status != nfs_ok)
6036 return status;
6037 status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
6038 if (status == nfs_ok)
6039 status = nfs4_check_fh(current_fh, &stp->st_stid);
6040 if (status != nfs_ok)
6041 mutex_unlock(&stp->st_mutex);
6042 return status;
6043 }
6044
6045 /*
6046 * Checks for sequence id mutating operations.
6047 */
6048 static __be32
nfs4_preprocess_seqid_op(struct nfsd4_compound_state * cstate,u32 seqid,stateid_t * stateid,char typemask,struct nfs4_ol_stateid ** stpp,struct nfsd_net * nn)6049 nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
6050 stateid_t *stateid, char typemask,
6051 struct nfs4_ol_stateid **stpp,
6052 struct nfsd_net *nn)
6053 {
6054 __be32 status;
6055 struct nfs4_stid *s;
6056 struct nfs4_ol_stateid *stp = NULL;
6057
6058 trace_nfsd_preprocess(seqid, stateid);
6059
6060 *stpp = NULL;
6061 status = nfsd4_lookup_stateid(cstate, stateid, typemask, &s, nn);
6062 if (status)
6063 return status;
6064 stp = openlockstateid(s);
6065 nfsd4_cstate_assign_replay(cstate, stp->st_stateowner);
6066
6067 status = nfs4_seqid_op_checks(cstate, stateid, seqid, stp);
6068 if (!status)
6069 *stpp = stp;
6070 else
6071 nfs4_put_stid(&stp->st_stid);
6072 return status;
6073 }
6074
nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state * cstate,u32 seqid,stateid_t * stateid,struct nfs4_ol_stateid ** stpp,struct nfsd_net * nn)6075 static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
6076 stateid_t *stateid, struct nfs4_ol_stateid **stpp, struct nfsd_net *nn)
6077 {
6078 __be32 status;
6079 struct nfs4_openowner *oo;
6080 struct nfs4_ol_stateid *stp;
6081
6082 status = nfs4_preprocess_seqid_op(cstate, seqid, stateid,
6083 NFS4_OPEN_STID, &stp, nn);
6084 if (status)
6085 return status;
6086 oo = openowner(stp->st_stateowner);
6087 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
6088 mutex_unlock(&stp->st_mutex);
6089 nfs4_put_stid(&stp->st_stid);
6090 return nfserr_bad_stateid;
6091 }
6092 *stpp = stp;
6093 return nfs_ok;
6094 }
6095
6096 __be32
nfsd4_open_confirm(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)6097 nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6098 union nfsd4_op_u *u)
6099 {
6100 struct nfsd4_open_confirm *oc = &u->open_confirm;
6101 __be32 status;
6102 struct nfs4_openowner *oo;
6103 struct nfs4_ol_stateid *stp;
6104 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6105
6106 dprintk("NFSD: nfsd4_open_confirm on file %pd\n",
6107 cstate->current_fh.fh_dentry);
6108
6109 status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0);
6110 if (status)
6111 return status;
6112
6113 status = nfs4_preprocess_seqid_op(cstate,
6114 oc->oc_seqid, &oc->oc_req_stateid,
6115 NFS4_OPEN_STID, &stp, nn);
6116 if (status)
6117 goto out;
6118 oo = openowner(stp->st_stateowner);
6119 status = nfserr_bad_stateid;
6120 if (oo->oo_flags & NFS4_OO_CONFIRMED) {
6121 mutex_unlock(&stp->st_mutex);
6122 goto put_stateid;
6123 }
6124 oo->oo_flags |= NFS4_OO_CONFIRMED;
6125 nfs4_inc_and_copy_stateid(&oc->oc_resp_stateid, &stp->st_stid);
6126 mutex_unlock(&stp->st_mutex);
6127 trace_nfsd_open_confirm(oc->oc_seqid, &stp->st_stid.sc_stateid);
6128 nfsd4_client_record_create(oo->oo_owner.so_client);
6129 status = nfs_ok;
6130 put_stateid:
6131 nfs4_put_stid(&stp->st_stid);
6132 out:
6133 nfsd4_bump_seqid(cstate, status);
6134 return status;
6135 }
6136
nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid * stp,u32 access)6137 static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 access)
6138 {
6139 if (!test_access(access, stp))
6140 return;
6141 nfs4_file_put_access(stp->st_stid.sc_file, access);
6142 clear_access(access, stp);
6143 }
6144
nfs4_stateid_downgrade(struct nfs4_ol_stateid * stp,u32 to_access)6145 static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_access)
6146 {
6147 switch (to_access) {
6148 case NFS4_SHARE_ACCESS_READ:
6149 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_WRITE);
6150 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
6151 break;
6152 case NFS4_SHARE_ACCESS_WRITE:
6153 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_READ);
6154 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
6155 break;
6156 case NFS4_SHARE_ACCESS_BOTH:
6157 break;
6158 default:
6159 WARN_ON_ONCE(1);
6160 }
6161 }
6162
6163 __be32
nfsd4_open_downgrade(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)6164 nfsd4_open_downgrade(struct svc_rqst *rqstp,
6165 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
6166 {
6167 struct nfsd4_open_downgrade *od = &u->open_downgrade;
6168 __be32 status;
6169 struct nfs4_ol_stateid *stp;
6170 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6171
6172 dprintk("NFSD: nfsd4_open_downgrade on file %pd\n",
6173 cstate->current_fh.fh_dentry);
6174
6175 /* We don't yet support WANT bits: */
6176 if (od->od_deleg_want)
6177 dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__,
6178 od->od_deleg_want);
6179
6180 status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid,
6181 &od->od_stateid, &stp, nn);
6182 if (status)
6183 goto out;
6184 status = nfserr_inval;
6185 if (!test_access(od->od_share_access, stp)) {
6186 dprintk("NFSD: access not a subset of current bitmap: 0x%hhx, input access=%08x\n",
6187 stp->st_access_bmap, od->od_share_access);
6188 goto put_stateid;
6189 }
6190 if (!test_deny(od->od_share_deny, stp)) {
6191 dprintk("NFSD: deny not a subset of current bitmap: 0x%hhx, input deny=%08x\n",
6192 stp->st_deny_bmap, od->od_share_deny);
6193 goto put_stateid;
6194 }
6195 nfs4_stateid_downgrade(stp, od->od_share_access);
6196 reset_union_bmap_deny(od->od_share_deny, stp);
6197 nfs4_inc_and_copy_stateid(&od->od_stateid, &stp->st_stid);
6198 status = nfs_ok;
6199 put_stateid:
6200 mutex_unlock(&stp->st_mutex);
6201 nfs4_put_stid(&stp->st_stid);
6202 out:
6203 nfsd4_bump_seqid(cstate, status);
6204 return status;
6205 }
6206
nfsd4_close_open_stateid(struct nfs4_ol_stateid * s)6207 static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
6208 {
6209 struct nfs4_client *clp = s->st_stid.sc_client;
6210 bool unhashed;
6211 LIST_HEAD(reaplist);
6212 struct nfs4_ol_stateid *stp;
6213
6214 spin_lock(&clp->cl_lock);
6215 unhashed = unhash_open_stateid(s, &reaplist);
6216
6217 if (clp->cl_minorversion) {
6218 if (unhashed)
6219 put_ol_stateid_locked(s, &reaplist);
6220 spin_unlock(&clp->cl_lock);
6221 list_for_each_entry(stp, &reaplist, st_locks)
6222 nfs4_free_cpntf_statelist(clp->net, &stp->st_stid);
6223 free_ol_stateid_reaplist(&reaplist);
6224 } else {
6225 spin_unlock(&clp->cl_lock);
6226 free_ol_stateid_reaplist(&reaplist);
6227 if (unhashed)
6228 move_to_close_lru(s, clp->net);
6229 }
6230 }
6231
6232 /*
6233 * nfs4_unlock_state() called after encode
6234 */
6235 __be32
nfsd4_close(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)6236 nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6237 union nfsd4_op_u *u)
6238 {
6239 struct nfsd4_close *close = &u->close;
6240 __be32 status;
6241 struct nfs4_ol_stateid *stp;
6242 struct net *net = SVC_NET(rqstp);
6243 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
6244
6245 dprintk("NFSD: nfsd4_close on file %pd\n",
6246 cstate->current_fh.fh_dentry);
6247
6248 status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid,
6249 &close->cl_stateid,
6250 NFS4_OPEN_STID|NFS4_CLOSED_STID,
6251 &stp, nn);
6252 nfsd4_bump_seqid(cstate, status);
6253 if (status)
6254 goto out;
6255
6256 stp->st_stid.sc_type = NFS4_CLOSED_STID;
6257
6258 /*
6259 * Technically we don't _really_ have to increment or copy it, since
6260 * it should just be gone after this operation and we clobber the
6261 * copied value below, but we continue to do so here just to ensure
6262 * that racing ops see that there was a state change.
6263 */
6264 nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid);
6265
6266 nfsd4_close_open_stateid(stp);
6267 mutex_unlock(&stp->st_mutex);
6268
6269 /* v4.1+ suggests that we send a special stateid in here, since the
6270 * clients should just ignore this anyway. Since this is not useful
6271 * for v4.0 clients either, we set it to the special close_stateid
6272 * universally.
6273 *
6274 * See RFC5661 section 18.2.4, and RFC7530 section 16.2.5
6275 */
6276 memcpy(&close->cl_stateid, &close_stateid, sizeof(close->cl_stateid));
6277
6278 /* put reference from nfs4_preprocess_seqid_op */
6279 nfs4_put_stid(&stp->st_stid);
6280 out:
6281 return status;
6282 }
6283
6284 __be32
nfsd4_delegreturn(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)6285 nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6286 union nfsd4_op_u *u)
6287 {
6288 struct nfsd4_delegreturn *dr = &u->delegreturn;
6289 struct nfs4_delegation *dp;
6290 stateid_t *stateid = &dr->dr_stateid;
6291 struct nfs4_stid *s;
6292 __be32 status;
6293 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6294
6295 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
6296 return status;
6297
6298 status = nfsd4_lookup_stateid(cstate, stateid, NFS4_DELEG_STID, &s, nn);
6299 if (status)
6300 goto out;
6301 dp = delegstateid(s);
6302 status = nfsd4_stid_check_stateid_generation(stateid, &dp->dl_stid, nfsd4_has_session(cstate));
6303 if (status)
6304 goto put_stateid;
6305
6306 destroy_delegation(dp);
6307 put_stateid:
6308 nfs4_put_stid(&dp->dl_stid);
6309 out:
6310 return status;
6311 }
6312
6313 static inline u64
end_offset(u64 start,u64 len)6314 end_offset(u64 start, u64 len)
6315 {
6316 u64 end;
6317
6318 end = start + len;
6319 return end >= start ? end: NFS4_MAX_UINT64;
6320 }
6321
6322 /* last octet in a range */
6323 static inline u64
last_byte_offset(u64 start,u64 len)6324 last_byte_offset(u64 start, u64 len)
6325 {
6326 u64 end;
6327
6328 WARN_ON_ONCE(!len);
6329 end = start + len;
6330 return end > start ? end - 1: NFS4_MAX_UINT64;
6331 }
6332
6333 /*
6334 * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that
6335 * we can't properly handle lock requests that go beyond the (2^63 - 1)-th
6336 * byte, because of sign extension problems. Since NFSv4 calls for 64-bit
6337 * locking, this prevents us from being completely protocol-compliant. The
6338 * real solution to this problem is to start using unsigned file offsets in
6339 * the VFS, but this is a very deep change!
6340 */
6341 static inline void
nfs4_transform_lock_offset(struct file_lock * lock)6342 nfs4_transform_lock_offset(struct file_lock *lock)
6343 {
6344 if (lock->fl_start < 0)
6345 lock->fl_start = OFFSET_MAX;
6346 if (lock->fl_end < 0)
6347 lock->fl_end = OFFSET_MAX;
6348 }
6349
6350 static fl_owner_t
nfsd4_fl_get_owner(fl_owner_t owner)6351 nfsd4_fl_get_owner(fl_owner_t owner)
6352 {
6353 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
6354
6355 nfs4_get_stateowner(&lo->lo_owner);
6356 return owner;
6357 }
6358
6359 static void
nfsd4_fl_put_owner(fl_owner_t owner)6360 nfsd4_fl_put_owner(fl_owner_t owner)
6361 {
6362 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
6363
6364 if (lo)
6365 nfs4_put_stateowner(&lo->lo_owner);
6366 }
6367
6368 static void
nfsd4_lm_notify(struct file_lock * fl)6369 nfsd4_lm_notify(struct file_lock *fl)
6370 {
6371 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)fl->fl_owner;
6372 struct net *net = lo->lo_owner.so_client->net;
6373 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
6374 struct nfsd4_blocked_lock *nbl = container_of(fl,
6375 struct nfsd4_blocked_lock, nbl_lock);
6376 bool queue = false;
6377
6378 /* An empty list means that something else is going to be using it */
6379 spin_lock(&nn->blocked_locks_lock);
6380 if (!list_empty(&nbl->nbl_list)) {
6381 list_del_init(&nbl->nbl_list);
6382 list_del_init(&nbl->nbl_lru);
6383 queue = true;
6384 }
6385 spin_unlock(&nn->blocked_locks_lock);
6386
6387 if (queue)
6388 nfsd4_run_cb(&nbl->nbl_cb);
6389 }
6390
6391 static const struct lock_manager_operations nfsd_posix_mng_ops = {
6392 .lm_notify = nfsd4_lm_notify,
6393 .lm_get_owner = nfsd4_fl_get_owner,
6394 .lm_put_owner = nfsd4_fl_put_owner,
6395 };
6396
6397 static inline void
nfs4_set_lock_denied(struct file_lock * fl,struct nfsd4_lock_denied * deny)6398 nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny)
6399 {
6400 struct nfs4_lockowner *lo;
6401
6402 if (fl->fl_lmops == &nfsd_posix_mng_ops) {
6403 lo = (struct nfs4_lockowner *) fl->fl_owner;
6404 xdr_netobj_dup(&deny->ld_owner, &lo->lo_owner.so_owner,
6405 GFP_KERNEL);
6406 if (!deny->ld_owner.data)
6407 /* We just don't care that much */
6408 goto nevermind;
6409 deny->ld_clientid = lo->lo_owner.so_client->cl_clientid;
6410 } else {
6411 nevermind:
6412 deny->ld_owner.len = 0;
6413 deny->ld_owner.data = NULL;
6414 deny->ld_clientid.cl_boot = 0;
6415 deny->ld_clientid.cl_id = 0;
6416 }
6417 deny->ld_start = fl->fl_start;
6418 deny->ld_length = NFS4_MAX_UINT64;
6419 if (fl->fl_end != NFS4_MAX_UINT64)
6420 deny->ld_length = fl->fl_end - fl->fl_start + 1;
6421 deny->ld_type = NFS4_READ_LT;
6422 if (fl->fl_type != F_RDLCK)
6423 deny->ld_type = NFS4_WRITE_LT;
6424 }
6425
6426 static struct nfs4_lockowner *
find_lockowner_str_locked(struct nfs4_client * clp,struct xdr_netobj * owner)6427 find_lockowner_str_locked(struct nfs4_client *clp, struct xdr_netobj *owner)
6428 {
6429 unsigned int strhashval = ownerstr_hashval(owner);
6430 struct nfs4_stateowner *so;
6431
6432 lockdep_assert_held(&clp->cl_lock);
6433
6434 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[strhashval],
6435 so_strhash) {
6436 if (so->so_is_open_owner)
6437 continue;
6438 if (same_owner_str(so, owner))
6439 return lockowner(nfs4_get_stateowner(so));
6440 }
6441 return NULL;
6442 }
6443
6444 static struct nfs4_lockowner *
find_lockowner_str(struct nfs4_client * clp,struct xdr_netobj * owner)6445 find_lockowner_str(struct nfs4_client *clp, struct xdr_netobj *owner)
6446 {
6447 struct nfs4_lockowner *lo;
6448
6449 spin_lock(&clp->cl_lock);
6450 lo = find_lockowner_str_locked(clp, owner);
6451 spin_unlock(&clp->cl_lock);
6452 return lo;
6453 }
6454
nfs4_unhash_lockowner(struct nfs4_stateowner * sop)6455 static void nfs4_unhash_lockowner(struct nfs4_stateowner *sop)
6456 {
6457 unhash_lockowner_locked(lockowner(sop));
6458 }
6459
nfs4_free_lockowner(struct nfs4_stateowner * sop)6460 static void nfs4_free_lockowner(struct nfs4_stateowner *sop)
6461 {
6462 struct nfs4_lockowner *lo = lockowner(sop);
6463
6464 kmem_cache_free(lockowner_slab, lo);
6465 }
6466
6467 static const struct nfs4_stateowner_operations lockowner_ops = {
6468 .so_unhash = nfs4_unhash_lockowner,
6469 .so_free = nfs4_free_lockowner,
6470 };
6471
6472 /*
6473 * Alloc a lock owner structure.
6474 * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has
6475 * occurred.
6476 *
6477 * strhashval = ownerstr_hashval
6478 */
6479 static struct nfs4_lockowner *
alloc_init_lock_stateowner(unsigned int strhashval,struct nfs4_client * clp,struct nfs4_ol_stateid * open_stp,struct nfsd4_lock * lock)6480 alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp,
6481 struct nfs4_ol_stateid *open_stp,
6482 struct nfsd4_lock *lock)
6483 {
6484 struct nfs4_lockowner *lo, *ret;
6485
6486 lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp);
6487 if (!lo)
6488 return NULL;
6489 INIT_LIST_HEAD(&lo->lo_blocked);
6490 INIT_LIST_HEAD(&lo->lo_owner.so_stateids);
6491 lo->lo_owner.so_is_open_owner = 0;
6492 lo->lo_owner.so_seqid = lock->lk_new_lock_seqid;
6493 lo->lo_owner.so_ops = &lockowner_ops;
6494 spin_lock(&clp->cl_lock);
6495 ret = find_lockowner_str_locked(clp, &lock->lk_new_owner);
6496 if (ret == NULL) {
6497 list_add(&lo->lo_owner.so_strhash,
6498 &clp->cl_ownerstr_hashtbl[strhashval]);
6499 ret = lo;
6500 } else
6501 nfs4_free_stateowner(&lo->lo_owner);
6502
6503 spin_unlock(&clp->cl_lock);
6504 return ret;
6505 }
6506
6507 static struct nfs4_ol_stateid *
find_lock_stateid(const struct nfs4_lockowner * lo,const struct nfs4_ol_stateid * ost)6508 find_lock_stateid(const struct nfs4_lockowner *lo,
6509 const struct nfs4_ol_stateid *ost)
6510 {
6511 struct nfs4_ol_stateid *lst;
6512
6513 lockdep_assert_held(&ost->st_stid.sc_client->cl_lock);
6514
6515 /* If ost is not hashed, ost->st_locks will not be valid */
6516 if (!nfs4_ol_stateid_unhashed(ost))
6517 list_for_each_entry(lst, &ost->st_locks, st_locks) {
6518 if (lst->st_stateowner == &lo->lo_owner) {
6519 refcount_inc(&lst->st_stid.sc_count);
6520 return lst;
6521 }
6522 }
6523 return NULL;
6524 }
6525
6526 static struct nfs4_ol_stateid *
init_lock_stateid(struct nfs4_ol_stateid * stp,struct nfs4_lockowner * lo,struct nfs4_file * fp,struct inode * inode,struct nfs4_ol_stateid * open_stp)6527 init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
6528 struct nfs4_file *fp, struct inode *inode,
6529 struct nfs4_ol_stateid *open_stp)
6530 {
6531 struct nfs4_client *clp = lo->lo_owner.so_client;
6532 struct nfs4_ol_stateid *retstp;
6533
6534 mutex_init(&stp->st_mutex);
6535 mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX);
6536 retry:
6537 spin_lock(&clp->cl_lock);
6538 if (nfs4_ol_stateid_unhashed(open_stp))
6539 goto out_close;
6540 retstp = find_lock_stateid(lo, open_stp);
6541 if (retstp)
6542 goto out_found;
6543 refcount_inc(&stp->st_stid.sc_count);
6544 stp->st_stid.sc_type = NFS4_LOCK_STID;
6545 stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner);
6546 get_nfs4_file(fp);
6547 stp->st_stid.sc_file = fp;
6548 stp->st_access_bmap = 0;
6549 stp->st_deny_bmap = open_stp->st_deny_bmap;
6550 stp->st_openstp = open_stp;
6551 spin_lock(&fp->fi_lock);
6552 list_add(&stp->st_locks, &open_stp->st_locks);
6553 list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
6554 list_add(&stp->st_perfile, &fp->fi_stateids);
6555 spin_unlock(&fp->fi_lock);
6556 spin_unlock(&clp->cl_lock);
6557 return stp;
6558 out_found:
6559 spin_unlock(&clp->cl_lock);
6560 if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
6561 nfs4_put_stid(&retstp->st_stid);
6562 goto retry;
6563 }
6564 /* To keep mutex tracking happy */
6565 mutex_unlock(&stp->st_mutex);
6566 return retstp;
6567 out_close:
6568 spin_unlock(&clp->cl_lock);
6569 mutex_unlock(&stp->st_mutex);
6570 return NULL;
6571 }
6572
6573 static struct nfs4_ol_stateid *
find_or_create_lock_stateid(struct nfs4_lockowner * lo,struct nfs4_file * fi,struct inode * inode,struct nfs4_ol_stateid * ost,bool * new)6574 find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi,
6575 struct inode *inode, struct nfs4_ol_stateid *ost,
6576 bool *new)
6577 {
6578 struct nfs4_stid *ns = NULL;
6579 struct nfs4_ol_stateid *lst;
6580 struct nfs4_openowner *oo = openowner(ost->st_stateowner);
6581 struct nfs4_client *clp = oo->oo_owner.so_client;
6582
6583 *new = false;
6584 spin_lock(&clp->cl_lock);
6585 lst = find_lock_stateid(lo, ost);
6586 spin_unlock(&clp->cl_lock);
6587 if (lst != NULL) {
6588 if (nfsd4_lock_ol_stateid(lst) == nfs_ok)
6589 goto out;
6590 nfs4_put_stid(&lst->st_stid);
6591 }
6592 ns = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_lock_stateid);
6593 if (ns == NULL)
6594 return NULL;
6595
6596 lst = init_lock_stateid(openlockstateid(ns), lo, fi, inode, ost);
6597 if (lst == openlockstateid(ns))
6598 *new = true;
6599 else
6600 nfs4_put_stid(ns);
6601 out:
6602 return lst;
6603 }
6604
6605 static int
check_lock_length(u64 offset,u64 length)6606 check_lock_length(u64 offset, u64 length)
6607 {
6608 return ((length == 0) || ((length != NFS4_MAX_UINT64) &&
6609 (length > ~offset)));
6610 }
6611
get_lock_access(struct nfs4_ol_stateid * lock_stp,u32 access)6612 static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access)
6613 {
6614 struct nfs4_file *fp = lock_stp->st_stid.sc_file;
6615
6616 lockdep_assert_held(&fp->fi_lock);
6617
6618 if (test_access(access, lock_stp))
6619 return;
6620 __nfs4_file_get_access(fp, access);
6621 set_access(access, lock_stp);
6622 }
6623
6624 static __be32
lookup_or_create_lock_state(struct nfsd4_compound_state * cstate,struct nfs4_ol_stateid * ost,struct nfsd4_lock * lock,struct nfs4_ol_stateid ** plst,bool * new)6625 lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
6626 struct nfs4_ol_stateid *ost,
6627 struct nfsd4_lock *lock,
6628 struct nfs4_ol_stateid **plst, bool *new)
6629 {
6630 __be32 status;
6631 struct nfs4_file *fi = ost->st_stid.sc_file;
6632 struct nfs4_openowner *oo = openowner(ost->st_stateowner);
6633 struct nfs4_client *cl = oo->oo_owner.so_client;
6634 struct inode *inode = d_inode(cstate->current_fh.fh_dentry);
6635 struct nfs4_lockowner *lo;
6636 struct nfs4_ol_stateid *lst;
6637 unsigned int strhashval;
6638
6639 lo = find_lockowner_str(cl, &lock->lk_new_owner);
6640 if (!lo) {
6641 strhashval = ownerstr_hashval(&lock->lk_new_owner);
6642 lo = alloc_init_lock_stateowner(strhashval, cl, ost, lock);
6643 if (lo == NULL)
6644 return nfserr_jukebox;
6645 } else {
6646 /* with an existing lockowner, seqids must be the same */
6647 status = nfserr_bad_seqid;
6648 if (!cstate->minorversion &&
6649 lock->lk_new_lock_seqid != lo->lo_owner.so_seqid)
6650 goto out;
6651 }
6652
6653 lst = find_or_create_lock_stateid(lo, fi, inode, ost, new);
6654 if (lst == NULL) {
6655 status = nfserr_jukebox;
6656 goto out;
6657 }
6658
6659 status = nfs_ok;
6660 *plst = lst;
6661 out:
6662 nfs4_put_stateowner(&lo->lo_owner);
6663 return status;
6664 }
6665
6666 /*
6667 * LOCK operation
6668 */
6669 __be32
nfsd4_lock(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)6670 nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6671 union nfsd4_op_u *u)
6672 {
6673 struct nfsd4_lock *lock = &u->lock;
6674 struct nfs4_openowner *open_sop = NULL;
6675 struct nfs4_lockowner *lock_sop = NULL;
6676 struct nfs4_ol_stateid *lock_stp = NULL;
6677 struct nfs4_ol_stateid *open_stp = NULL;
6678 struct nfs4_file *fp;
6679 struct nfsd_file *nf = NULL;
6680 struct nfsd4_blocked_lock *nbl = NULL;
6681 struct file_lock *file_lock = NULL;
6682 struct file_lock *conflock = NULL;
6683 __be32 status = 0;
6684 int lkflg;
6685 int err;
6686 bool new = false;
6687 unsigned char fl_type;
6688 unsigned int fl_flags = FL_POSIX;
6689 struct net *net = SVC_NET(rqstp);
6690 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
6691
6692 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
6693 (long long) lock->lk_offset,
6694 (long long) lock->lk_length);
6695
6696 if (check_lock_length(lock->lk_offset, lock->lk_length))
6697 return nfserr_inval;
6698
6699 if ((status = fh_verify(rqstp, &cstate->current_fh,
6700 S_IFREG, NFSD_MAY_LOCK))) {
6701 dprintk("NFSD: nfsd4_lock: permission denied!\n");
6702 return status;
6703 }
6704
6705 if (lock->lk_is_new) {
6706 if (nfsd4_has_session(cstate))
6707 /* See rfc 5661 18.10.3: given clientid is ignored: */
6708 memcpy(&lock->lk_new_clientid,
6709 &cstate->session->se_client->cl_clientid,
6710 sizeof(clientid_t));
6711
6712 status = nfserr_stale_clientid;
6713 if (STALE_CLIENTID(&lock->lk_new_clientid, nn))
6714 goto out;
6715
6716 /* validate and update open stateid and open seqid */
6717 status = nfs4_preprocess_confirmed_seqid_op(cstate,
6718 lock->lk_new_open_seqid,
6719 &lock->lk_new_open_stateid,
6720 &open_stp, nn);
6721 if (status)
6722 goto out;
6723 mutex_unlock(&open_stp->st_mutex);
6724 open_sop = openowner(open_stp->st_stateowner);
6725 status = nfserr_bad_stateid;
6726 if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
6727 &lock->lk_new_clientid))
6728 goto out;
6729 status = lookup_or_create_lock_state(cstate, open_stp, lock,
6730 &lock_stp, &new);
6731 } else {
6732 status = nfs4_preprocess_seqid_op(cstate,
6733 lock->lk_old_lock_seqid,
6734 &lock->lk_old_lock_stateid,
6735 NFS4_LOCK_STID, &lock_stp, nn);
6736 }
6737 if (status)
6738 goto out;
6739 lock_sop = lockowner(lock_stp->st_stateowner);
6740
6741 lkflg = setlkflg(lock->lk_type);
6742 status = nfs4_check_openmode(lock_stp, lkflg);
6743 if (status)
6744 goto out;
6745
6746 status = nfserr_grace;
6747 if (locks_in_grace(net) && !lock->lk_reclaim)
6748 goto out;
6749 status = nfserr_no_grace;
6750 if (!locks_in_grace(net) && lock->lk_reclaim)
6751 goto out;
6752
6753 fp = lock_stp->st_stid.sc_file;
6754 switch (lock->lk_type) {
6755 case NFS4_READW_LT:
6756 if (nfsd4_has_session(cstate))
6757 fl_flags |= FL_SLEEP;
6758 fallthrough;
6759 case NFS4_READ_LT:
6760 spin_lock(&fp->fi_lock);
6761 nf = find_readable_file_locked(fp);
6762 if (nf)
6763 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ);
6764 spin_unlock(&fp->fi_lock);
6765 fl_type = F_RDLCK;
6766 break;
6767 case NFS4_WRITEW_LT:
6768 if (nfsd4_has_session(cstate))
6769 fl_flags |= FL_SLEEP;
6770 fallthrough;
6771 case NFS4_WRITE_LT:
6772 spin_lock(&fp->fi_lock);
6773 nf = find_writeable_file_locked(fp);
6774 if (nf)
6775 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE);
6776 spin_unlock(&fp->fi_lock);
6777 fl_type = F_WRLCK;
6778 break;
6779 default:
6780 status = nfserr_inval;
6781 goto out;
6782 }
6783
6784 if (!nf) {
6785 status = nfserr_openmode;
6786 goto out;
6787 }
6788
6789 nbl = find_or_allocate_block(lock_sop, &fp->fi_fhandle, nn);
6790 if (!nbl) {
6791 dprintk("NFSD: %s: unable to allocate block!\n", __func__);
6792 status = nfserr_jukebox;
6793 goto out;
6794 }
6795
6796 file_lock = &nbl->nbl_lock;
6797 file_lock->fl_type = fl_type;
6798 file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(&lock_sop->lo_owner));
6799 file_lock->fl_pid = current->tgid;
6800 file_lock->fl_file = nf->nf_file;
6801 file_lock->fl_flags = fl_flags;
6802 file_lock->fl_lmops = &nfsd_posix_mng_ops;
6803 file_lock->fl_start = lock->lk_offset;
6804 file_lock->fl_end = last_byte_offset(lock->lk_offset, lock->lk_length);
6805 nfs4_transform_lock_offset(file_lock);
6806
6807 conflock = locks_alloc_lock();
6808 if (!conflock) {
6809 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
6810 status = nfserr_jukebox;
6811 goto out;
6812 }
6813
6814 if (fl_flags & FL_SLEEP) {
6815 nbl->nbl_time = ktime_get_boottime_seconds();
6816 spin_lock(&nn->blocked_locks_lock);
6817 list_add_tail(&nbl->nbl_list, &lock_sop->lo_blocked);
6818 list_add_tail(&nbl->nbl_lru, &nn->blocked_locks_lru);
6819 spin_unlock(&nn->blocked_locks_lock);
6820 }
6821
6822 err = vfs_lock_file(nf->nf_file, F_SETLK, file_lock, conflock);
6823 switch (err) {
6824 case 0: /* success! */
6825 nfs4_inc_and_copy_stateid(&lock->lk_resp_stateid, &lock_stp->st_stid);
6826 status = 0;
6827 if (lock->lk_reclaim)
6828 nn->somebody_reclaimed = true;
6829 break;
6830 case FILE_LOCK_DEFERRED:
6831 nbl = NULL;
6832 fallthrough;
6833 case -EAGAIN: /* conflock holds conflicting lock */
6834 status = nfserr_denied;
6835 dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
6836 nfs4_set_lock_denied(conflock, &lock->lk_denied);
6837 break;
6838 case -EDEADLK:
6839 status = nfserr_deadlock;
6840 break;
6841 default:
6842 dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err);
6843 status = nfserrno(err);
6844 break;
6845 }
6846 out:
6847 if (nbl) {
6848 /* dequeue it if we queued it before */
6849 if (fl_flags & FL_SLEEP) {
6850 spin_lock(&nn->blocked_locks_lock);
6851 list_del_init(&nbl->nbl_list);
6852 list_del_init(&nbl->nbl_lru);
6853 spin_unlock(&nn->blocked_locks_lock);
6854 }
6855 free_blocked_lock(nbl);
6856 }
6857 if (nf)
6858 nfsd_file_put(nf);
6859 if (lock_stp) {
6860 /* Bump seqid manually if the 4.0 replay owner is openowner */
6861 if (cstate->replay_owner &&
6862 cstate->replay_owner != &lock_sop->lo_owner &&
6863 seqid_mutating_err(ntohl(status)))
6864 lock_sop->lo_owner.so_seqid++;
6865
6866 /*
6867 * If this is a new, never-before-used stateid, and we are
6868 * returning an error, then just go ahead and release it.
6869 */
6870 if (status && new)
6871 release_lock_stateid(lock_stp);
6872
6873 mutex_unlock(&lock_stp->st_mutex);
6874
6875 nfs4_put_stid(&lock_stp->st_stid);
6876 }
6877 if (open_stp)
6878 nfs4_put_stid(&open_stp->st_stid);
6879 nfsd4_bump_seqid(cstate, status);
6880 if (conflock)
6881 locks_free_lock(conflock);
6882 return status;
6883 }
6884
6885 /*
6886 * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN,
6887 * so we do a temporary open here just to get an open file to pass to
6888 * vfs_test_lock.
6889 */
nfsd_test_lock(struct svc_rqst * rqstp,struct svc_fh * fhp,struct file_lock * lock)6890 static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
6891 {
6892 struct nfsd_file *nf;
6893 __be32 err;
6894
6895 err = nfsd_file_acquire(rqstp, fhp, NFSD_MAY_READ, &nf);
6896 if (err)
6897 return err;
6898 fh_lock(fhp); /* to block new leases till after test_lock: */
6899 err = nfserrno(nfsd_open_break_lease(fhp->fh_dentry->d_inode,
6900 NFSD_MAY_READ));
6901 if (err)
6902 goto out;
6903 lock->fl_file = nf->nf_file;
6904 err = nfserrno(vfs_test_lock(nf->nf_file, lock));
6905 lock->fl_file = NULL;
6906 out:
6907 fh_unlock(fhp);
6908 nfsd_file_put(nf);
6909 return err;
6910 }
6911
6912 /*
6913 * LOCKT operation
6914 */
6915 __be32
nfsd4_lockt(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)6916 nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6917 union nfsd4_op_u *u)
6918 {
6919 struct nfsd4_lockt *lockt = &u->lockt;
6920 struct file_lock *file_lock = NULL;
6921 struct nfs4_lockowner *lo = NULL;
6922 __be32 status;
6923 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6924
6925 if (locks_in_grace(SVC_NET(rqstp)))
6926 return nfserr_grace;
6927
6928 if (check_lock_length(lockt->lt_offset, lockt->lt_length))
6929 return nfserr_inval;
6930
6931 if (!nfsd4_has_session(cstate)) {
6932 status = lookup_clientid(&lockt->lt_clientid, cstate, nn,
6933 false);
6934 if (status)
6935 goto out;
6936 }
6937
6938 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
6939 goto out;
6940
6941 file_lock = locks_alloc_lock();
6942 if (!file_lock) {
6943 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
6944 status = nfserr_jukebox;
6945 goto out;
6946 }
6947
6948 switch (lockt->lt_type) {
6949 case NFS4_READ_LT:
6950 case NFS4_READW_LT:
6951 file_lock->fl_type = F_RDLCK;
6952 break;
6953 case NFS4_WRITE_LT:
6954 case NFS4_WRITEW_LT:
6955 file_lock->fl_type = F_WRLCK;
6956 break;
6957 default:
6958 dprintk("NFSD: nfs4_lockt: bad lock type!\n");
6959 status = nfserr_inval;
6960 goto out;
6961 }
6962
6963 lo = find_lockowner_str(cstate->clp, &lockt->lt_owner);
6964 if (lo)
6965 file_lock->fl_owner = (fl_owner_t)lo;
6966 file_lock->fl_pid = current->tgid;
6967 file_lock->fl_flags = FL_POSIX;
6968
6969 file_lock->fl_start = lockt->lt_offset;
6970 file_lock->fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length);
6971
6972 nfs4_transform_lock_offset(file_lock);
6973
6974 status = nfsd_test_lock(rqstp, &cstate->current_fh, file_lock);
6975 if (status)
6976 goto out;
6977
6978 if (file_lock->fl_type != F_UNLCK) {
6979 status = nfserr_denied;
6980 nfs4_set_lock_denied(file_lock, &lockt->lt_denied);
6981 }
6982 out:
6983 if (lo)
6984 nfs4_put_stateowner(&lo->lo_owner);
6985 if (file_lock)
6986 locks_free_lock(file_lock);
6987 return status;
6988 }
6989
6990 __be32
nfsd4_locku(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)6991 nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6992 union nfsd4_op_u *u)
6993 {
6994 struct nfsd4_locku *locku = &u->locku;
6995 struct nfs4_ol_stateid *stp;
6996 struct nfsd_file *nf = NULL;
6997 struct file_lock *file_lock = NULL;
6998 __be32 status;
6999 int err;
7000 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
7001
7002 dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n",
7003 (long long) locku->lu_offset,
7004 (long long) locku->lu_length);
7005
7006 if (check_lock_length(locku->lu_offset, locku->lu_length))
7007 return nfserr_inval;
7008
7009 status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid,
7010 &locku->lu_stateid, NFS4_LOCK_STID,
7011 &stp, nn);
7012 if (status)
7013 goto out;
7014 nf = find_any_file(stp->st_stid.sc_file);
7015 if (!nf) {
7016 status = nfserr_lock_range;
7017 goto put_stateid;
7018 }
7019 file_lock = locks_alloc_lock();
7020 if (!file_lock) {
7021 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
7022 status = nfserr_jukebox;
7023 goto put_file;
7024 }
7025
7026 file_lock->fl_type = F_UNLCK;
7027 file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(stp->st_stateowner));
7028 file_lock->fl_pid = current->tgid;
7029 file_lock->fl_file = nf->nf_file;
7030 file_lock->fl_flags = FL_POSIX;
7031 file_lock->fl_lmops = &nfsd_posix_mng_ops;
7032 file_lock->fl_start = locku->lu_offset;
7033
7034 file_lock->fl_end = last_byte_offset(locku->lu_offset,
7035 locku->lu_length);
7036 nfs4_transform_lock_offset(file_lock);
7037
7038 err = vfs_lock_file(nf->nf_file, F_SETLK, file_lock, NULL);
7039 if (err) {
7040 dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n");
7041 goto out_nfserr;
7042 }
7043 nfs4_inc_and_copy_stateid(&locku->lu_stateid, &stp->st_stid);
7044 put_file:
7045 nfsd_file_put(nf);
7046 put_stateid:
7047 mutex_unlock(&stp->st_mutex);
7048 nfs4_put_stid(&stp->st_stid);
7049 out:
7050 nfsd4_bump_seqid(cstate, status);
7051 if (file_lock)
7052 locks_free_lock(file_lock);
7053 return status;
7054
7055 out_nfserr:
7056 status = nfserrno(err);
7057 goto put_file;
7058 }
7059
7060 /*
7061 * returns
7062 * true: locks held by lockowner
7063 * false: no locks held by lockowner
7064 */
7065 static bool
check_for_locks(struct nfs4_file * fp,struct nfs4_lockowner * lowner)7066 check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
7067 {
7068 struct file_lock *fl;
7069 int status = false;
7070 struct nfsd_file *nf = find_any_file(fp);
7071 struct inode *inode;
7072 struct file_lock_context *flctx;
7073
7074 if (!nf) {
7075 /* Any valid lock stateid should have some sort of access */
7076 WARN_ON_ONCE(1);
7077 return status;
7078 }
7079
7080 inode = locks_inode(nf->nf_file);
7081 flctx = inode->i_flctx;
7082
7083 if (flctx && !list_empty_careful(&flctx->flc_posix)) {
7084 spin_lock(&flctx->flc_lock);
7085 list_for_each_entry(fl, &flctx->flc_posix, fl_list) {
7086 if (fl->fl_owner == (fl_owner_t)lowner) {
7087 status = true;
7088 break;
7089 }
7090 }
7091 spin_unlock(&flctx->flc_lock);
7092 }
7093 nfsd_file_put(nf);
7094 return status;
7095 }
7096
7097 __be32
nfsd4_release_lockowner(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)7098 nfsd4_release_lockowner(struct svc_rqst *rqstp,
7099 struct nfsd4_compound_state *cstate,
7100 union nfsd4_op_u *u)
7101 {
7102 struct nfsd4_release_lockowner *rlockowner = &u->release_lockowner;
7103 clientid_t *clid = &rlockowner->rl_clientid;
7104 struct nfs4_stateowner *sop;
7105 struct nfs4_lockowner *lo = NULL;
7106 struct nfs4_ol_stateid *stp;
7107 struct xdr_netobj *owner = &rlockowner->rl_owner;
7108 unsigned int hashval = ownerstr_hashval(owner);
7109 __be32 status;
7110 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
7111 struct nfs4_client *clp;
7112 LIST_HEAD (reaplist);
7113
7114 dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
7115 clid->cl_boot, clid->cl_id);
7116
7117 status = lookup_clientid(clid, cstate, nn, false);
7118 if (status)
7119 return status;
7120
7121 clp = cstate->clp;
7122 /* Find the matching lock stateowner */
7123 spin_lock(&clp->cl_lock);
7124 list_for_each_entry(sop, &clp->cl_ownerstr_hashtbl[hashval],
7125 so_strhash) {
7126
7127 if (sop->so_is_open_owner || !same_owner_str(sop, owner))
7128 continue;
7129
7130 if (atomic_read(&sop->so_count) != 1) {
7131 spin_unlock(&clp->cl_lock);
7132 return nfserr_locks_held;
7133 }
7134
7135 lo = lockowner(sop);
7136 nfs4_get_stateowner(sop);
7137 break;
7138 }
7139 if (!lo) {
7140 spin_unlock(&clp->cl_lock);
7141 return status;
7142 }
7143
7144 unhash_lockowner_locked(lo);
7145 while (!list_empty(&lo->lo_owner.so_stateids)) {
7146 stp = list_first_entry(&lo->lo_owner.so_stateids,
7147 struct nfs4_ol_stateid,
7148 st_perstateowner);
7149 WARN_ON(!unhash_lock_stateid(stp));
7150 put_ol_stateid_locked(stp, &reaplist);
7151 }
7152 spin_unlock(&clp->cl_lock);
7153 free_ol_stateid_reaplist(&reaplist);
7154 remove_blocked_locks(lo);
7155 nfs4_put_stateowner(&lo->lo_owner);
7156
7157 return status;
7158 }
7159
7160 static inline struct nfs4_client_reclaim *
alloc_reclaim(void)7161 alloc_reclaim(void)
7162 {
7163 return kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL);
7164 }
7165
7166 bool
nfs4_has_reclaimed_state(struct xdr_netobj name,struct nfsd_net * nn)7167 nfs4_has_reclaimed_state(struct xdr_netobj name, struct nfsd_net *nn)
7168 {
7169 struct nfs4_client_reclaim *crp;
7170
7171 crp = nfsd4_find_reclaim_client(name, nn);
7172 return (crp && crp->cr_clp);
7173 }
7174
7175 /*
7176 * failure => all reset bets are off, nfserr_no_grace...
7177 *
7178 * The caller is responsible for freeing name.data if NULL is returned (it
7179 * will be freed in nfs4_remove_reclaim_record in the normal case).
7180 */
7181 struct nfs4_client_reclaim *
nfs4_client_to_reclaim(struct xdr_netobj name,struct xdr_netobj princhash,struct nfsd_net * nn)7182 nfs4_client_to_reclaim(struct xdr_netobj name, struct xdr_netobj princhash,
7183 struct nfsd_net *nn)
7184 {
7185 unsigned int strhashval;
7186 struct nfs4_client_reclaim *crp;
7187
7188 crp = alloc_reclaim();
7189 if (crp) {
7190 strhashval = clientstr_hashval(name);
7191 INIT_LIST_HEAD(&crp->cr_strhash);
7192 list_add(&crp->cr_strhash, &nn->reclaim_str_hashtbl[strhashval]);
7193 crp->cr_name.data = name.data;
7194 crp->cr_name.len = name.len;
7195 crp->cr_princhash.data = princhash.data;
7196 crp->cr_princhash.len = princhash.len;
7197 crp->cr_clp = NULL;
7198 nn->reclaim_str_hashtbl_size++;
7199 }
7200 return crp;
7201 }
7202
7203 void
nfs4_remove_reclaim_record(struct nfs4_client_reclaim * crp,struct nfsd_net * nn)7204 nfs4_remove_reclaim_record(struct nfs4_client_reclaim *crp, struct nfsd_net *nn)
7205 {
7206 list_del(&crp->cr_strhash);
7207 kfree(crp->cr_name.data);
7208 kfree(crp->cr_princhash.data);
7209 kfree(crp);
7210 nn->reclaim_str_hashtbl_size--;
7211 }
7212
7213 void
nfs4_release_reclaim(struct nfsd_net * nn)7214 nfs4_release_reclaim(struct nfsd_net *nn)
7215 {
7216 struct nfs4_client_reclaim *crp = NULL;
7217 int i;
7218
7219 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
7220 while (!list_empty(&nn->reclaim_str_hashtbl[i])) {
7221 crp = list_entry(nn->reclaim_str_hashtbl[i].next,
7222 struct nfs4_client_reclaim, cr_strhash);
7223 nfs4_remove_reclaim_record(crp, nn);
7224 }
7225 }
7226 WARN_ON_ONCE(nn->reclaim_str_hashtbl_size);
7227 }
7228
7229 /*
7230 * called from OPEN, CLAIM_PREVIOUS with a new clientid. */
7231 struct nfs4_client_reclaim *
nfsd4_find_reclaim_client(struct xdr_netobj name,struct nfsd_net * nn)7232 nfsd4_find_reclaim_client(struct xdr_netobj name, struct nfsd_net *nn)
7233 {
7234 unsigned int strhashval;
7235 struct nfs4_client_reclaim *crp = NULL;
7236
7237 strhashval = clientstr_hashval(name);
7238 list_for_each_entry(crp, &nn->reclaim_str_hashtbl[strhashval], cr_strhash) {
7239 if (compare_blob(&crp->cr_name, &name) == 0) {
7240 return crp;
7241 }
7242 }
7243 return NULL;
7244 }
7245
7246 /*
7247 * Called from OPEN. Look for clientid in reclaim list.
7248 */
7249 __be32
nfs4_check_open_reclaim(clientid_t * clid,struct nfsd4_compound_state * cstate,struct nfsd_net * nn)7250 nfs4_check_open_reclaim(clientid_t *clid,
7251 struct nfsd4_compound_state *cstate,
7252 struct nfsd_net *nn)
7253 {
7254 __be32 status;
7255
7256 /* find clientid in conf_id_hashtbl */
7257 status = lookup_clientid(clid, cstate, nn, false);
7258 if (status)
7259 return nfserr_reclaim_bad;
7260
7261 if (test_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &cstate->clp->cl_flags))
7262 return nfserr_no_grace;
7263
7264 if (nfsd4_client_record_check(cstate->clp))
7265 return nfserr_reclaim_bad;
7266
7267 return nfs_ok;
7268 }
7269
7270 /*
7271 * Since the lifetime of a delegation isn't limited to that of an open, a
7272 * client may quite reasonably hang on to a delegation as long as it has
7273 * the inode cached. This becomes an obvious problem the first time a
7274 * client's inode cache approaches the size of the server's total memory.
7275 *
7276 * For now we avoid this problem by imposing a hard limit on the number
7277 * of delegations, which varies according to the server's memory size.
7278 */
7279 static void
set_max_delegations(void)7280 set_max_delegations(void)
7281 {
7282 /*
7283 * Allow at most 4 delegations per megabyte of RAM. Quick
7284 * estimates suggest that in the worst case (where every delegation
7285 * is for a different inode), a delegation could take about 1.5K,
7286 * giving a worst case usage of about 6% of memory.
7287 */
7288 max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT);
7289 }
7290
nfs4_state_create_net(struct net * net)7291 static int nfs4_state_create_net(struct net *net)
7292 {
7293 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7294 int i;
7295
7296 nn->conf_id_hashtbl = kmalloc_array(CLIENT_HASH_SIZE,
7297 sizeof(struct list_head),
7298 GFP_KERNEL);
7299 if (!nn->conf_id_hashtbl)
7300 goto err;
7301 nn->unconf_id_hashtbl = kmalloc_array(CLIENT_HASH_SIZE,
7302 sizeof(struct list_head),
7303 GFP_KERNEL);
7304 if (!nn->unconf_id_hashtbl)
7305 goto err_unconf_id;
7306 nn->sessionid_hashtbl = kmalloc_array(SESSION_HASH_SIZE,
7307 sizeof(struct list_head),
7308 GFP_KERNEL);
7309 if (!nn->sessionid_hashtbl)
7310 goto err_sessionid;
7311
7312 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
7313 INIT_LIST_HEAD(&nn->conf_id_hashtbl[i]);
7314 INIT_LIST_HEAD(&nn->unconf_id_hashtbl[i]);
7315 }
7316 for (i = 0; i < SESSION_HASH_SIZE; i++)
7317 INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]);
7318 nn->conf_name_tree = RB_ROOT;
7319 nn->unconf_name_tree = RB_ROOT;
7320 nn->boot_time = ktime_get_real_seconds();
7321 nn->grace_ended = false;
7322 nn->nfsd4_manager.block_opens = true;
7323 INIT_LIST_HEAD(&nn->nfsd4_manager.list);
7324 INIT_LIST_HEAD(&nn->client_lru);
7325 INIT_LIST_HEAD(&nn->close_lru);
7326 INIT_LIST_HEAD(&nn->del_recall_lru);
7327 spin_lock_init(&nn->client_lock);
7328 spin_lock_init(&nn->s2s_cp_lock);
7329 idr_init(&nn->s2s_cp_stateids);
7330
7331 spin_lock_init(&nn->blocked_locks_lock);
7332 INIT_LIST_HEAD(&nn->blocked_locks_lru);
7333
7334 INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main);
7335 get_net(net);
7336
7337 return 0;
7338
7339 err_sessionid:
7340 kfree(nn->unconf_id_hashtbl);
7341 err_unconf_id:
7342 kfree(nn->conf_id_hashtbl);
7343 err:
7344 return -ENOMEM;
7345 }
7346
7347 static void
nfs4_state_destroy_net(struct net * net)7348 nfs4_state_destroy_net(struct net *net)
7349 {
7350 int i;
7351 struct nfs4_client *clp = NULL;
7352 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7353
7354 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
7355 while (!list_empty(&nn->conf_id_hashtbl[i])) {
7356 clp = list_entry(nn->conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
7357 destroy_client(clp);
7358 }
7359 }
7360
7361 WARN_ON(!list_empty(&nn->blocked_locks_lru));
7362
7363 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
7364 while (!list_empty(&nn->unconf_id_hashtbl[i])) {
7365 clp = list_entry(nn->unconf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
7366 destroy_client(clp);
7367 }
7368 }
7369
7370 kfree(nn->sessionid_hashtbl);
7371 kfree(nn->unconf_id_hashtbl);
7372 kfree(nn->conf_id_hashtbl);
7373 put_net(net);
7374 }
7375
7376 int
nfs4_state_start_net(struct net * net)7377 nfs4_state_start_net(struct net *net)
7378 {
7379 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7380 int ret;
7381
7382 ret = get_nfsdfs(net);
7383 if (ret)
7384 return ret;
7385 ret = nfs4_state_create_net(net);
7386 if (ret) {
7387 mntput(nn->nfsd_mnt);
7388 return ret;
7389 }
7390 locks_start_grace(net, &nn->nfsd4_manager);
7391 nfsd4_client_tracking_init(net);
7392 if (nn->track_reclaim_completes && nn->reclaim_str_hashtbl_size == 0)
7393 goto skip_grace;
7394 printk(KERN_INFO "NFSD: starting %lld-second grace period (net %x)\n",
7395 nn->nfsd4_grace, net->ns.inum);
7396 trace_nfsd_grace_start(nn);
7397 queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ);
7398 return 0;
7399
7400 skip_grace:
7401 printk(KERN_INFO "NFSD: no clients to reclaim, skipping NFSv4 grace period (net %x)\n",
7402 net->ns.inum);
7403 queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_lease * HZ);
7404 nfsd4_end_grace(nn);
7405 return 0;
7406 }
7407
7408 /* initialization to perform when the nfsd service is started: */
7409
7410 int
nfs4_state_start(void)7411 nfs4_state_start(void)
7412 {
7413 int ret;
7414
7415 laundry_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, "nfsd4");
7416 if (laundry_wq == NULL) {
7417 ret = -ENOMEM;
7418 goto out;
7419 }
7420 ret = nfsd4_create_callback_queue();
7421 if (ret)
7422 goto out_free_laundry;
7423
7424 set_max_delegations();
7425 return 0;
7426
7427 out_free_laundry:
7428 destroy_workqueue(laundry_wq);
7429 out:
7430 return ret;
7431 }
7432
7433 void
nfs4_state_shutdown_net(struct net * net)7434 nfs4_state_shutdown_net(struct net *net)
7435 {
7436 struct nfs4_delegation *dp = NULL;
7437 struct list_head *pos, *next, reaplist;
7438 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7439
7440 cancel_delayed_work_sync(&nn->laundromat_work);
7441 locks_end_grace(&nn->nfsd4_manager);
7442
7443 INIT_LIST_HEAD(&reaplist);
7444 spin_lock(&state_lock);
7445 list_for_each_safe(pos, next, &nn->del_recall_lru) {
7446 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
7447 WARN_ON(!unhash_delegation_locked(dp));
7448 list_add(&dp->dl_recall_lru, &reaplist);
7449 }
7450 spin_unlock(&state_lock);
7451 list_for_each_safe(pos, next, &reaplist) {
7452 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
7453 list_del_init(&dp->dl_recall_lru);
7454 destroy_unhashed_deleg(dp);
7455 }
7456
7457 nfsd4_client_tracking_exit(net);
7458 nfs4_state_destroy_net(net);
7459 mntput(nn->nfsd_mnt);
7460 }
7461
7462 void
nfs4_state_shutdown(void)7463 nfs4_state_shutdown(void)
7464 {
7465 destroy_workqueue(laundry_wq);
7466 nfsd4_destroy_callback_queue();
7467 }
7468
7469 static void
get_stateid(struct nfsd4_compound_state * cstate,stateid_t * stateid)7470 get_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
7471 {
7472 if (HAS_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG) &&
7473 CURRENT_STATEID(stateid))
7474 memcpy(stateid, &cstate->current_stateid, sizeof(stateid_t));
7475 }
7476
7477 static void
put_stateid(struct nfsd4_compound_state * cstate,stateid_t * stateid)7478 put_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
7479 {
7480 if (cstate->minorversion) {
7481 memcpy(&cstate->current_stateid, stateid, sizeof(stateid_t));
7482 SET_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG);
7483 }
7484 }
7485
7486 void
clear_current_stateid(struct nfsd4_compound_state * cstate)7487 clear_current_stateid(struct nfsd4_compound_state *cstate)
7488 {
7489 CLEAR_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG);
7490 }
7491
7492 /*
7493 * functions to set current state id
7494 */
7495 void
nfsd4_set_opendowngradestateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)7496 nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate,
7497 union nfsd4_op_u *u)
7498 {
7499 put_stateid(cstate, &u->open_downgrade.od_stateid);
7500 }
7501
7502 void
nfsd4_set_openstateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)7503 nfsd4_set_openstateid(struct nfsd4_compound_state *cstate,
7504 union nfsd4_op_u *u)
7505 {
7506 put_stateid(cstate, &u->open.op_stateid);
7507 }
7508
7509 void
nfsd4_set_closestateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)7510 nfsd4_set_closestateid(struct nfsd4_compound_state *cstate,
7511 union nfsd4_op_u *u)
7512 {
7513 put_stateid(cstate, &u->close.cl_stateid);
7514 }
7515
7516 void
nfsd4_set_lockstateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)7517 nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate,
7518 union nfsd4_op_u *u)
7519 {
7520 put_stateid(cstate, &u->lock.lk_resp_stateid);
7521 }
7522
7523 /*
7524 * functions to consume current state id
7525 */
7526
7527 void
nfsd4_get_opendowngradestateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)7528 nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate,
7529 union nfsd4_op_u *u)
7530 {
7531 get_stateid(cstate, &u->open_downgrade.od_stateid);
7532 }
7533
7534 void
nfsd4_get_delegreturnstateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)7535 nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *cstate,
7536 union nfsd4_op_u *u)
7537 {
7538 get_stateid(cstate, &u->delegreturn.dr_stateid);
7539 }
7540
7541 void
nfsd4_get_freestateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)7542 nfsd4_get_freestateid(struct nfsd4_compound_state *cstate,
7543 union nfsd4_op_u *u)
7544 {
7545 get_stateid(cstate, &u->free_stateid.fr_stateid);
7546 }
7547
7548 void
nfsd4_get_setattrstateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)7549 nfsd4_get_setattrstateid(struct nfsd4_compound_state *cstate,
7550 union nfsd4_op_u *u)
7551 {
7552 get_stateid(cstate, &u->setattr.sa_stateid);
7553 }
7554
7555 void
nfsd4_get_closestateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)7556 nfsd4_get_closestateid(struct nfsd4_compound_state *cstate,
7557 union nfsd4_op_u *u)
7558 {
7559 get_stateid(cstate, &u->close.cl_stateid);
7560 }
7561
7562 void
nfsd4_get_lockustateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)7563 nfsd4_get_lockustateid(struct nfsd4_compound_state *cstate,
7564 union nfsd4_op_u *u)
7565 {
7566 get_stateid(cstate, &u->locku.lu_stateid);
7567 }
7568
7569 void
nfsd4_get_readstateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)7570 nfsd4_get_readstateid(struct nfsd4_compound_state *cstate,
7571 union nfsd4_op_u *u)
7572 {
7573 get_stateid(cstate, &u->read.rd_stateid);
7574 }
7575
7576 void
nfsd4_get_writestateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)7577 nfsd4_get_writestateid(struct nfsd4_compound_state *cstate,
7578 union nfsd4_op_u *u)
7579 {
7580 get_stateid(cstate, &u->write.wr_stateid);
7581 }
7582