1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /* AFS file locking support
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5*4882a593Smuzhiyun * Written by David Howells (dhowells@redhat.com)
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include "internal.h"
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #define AFS_LOCK_GRANTED 0
11*4882a593Smuzhiyun #define AFS_LOCK_PENDING 1
12*4882a593Smuzhiyun #define AFS_LOCK_YOUR_TRY 2
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun struct workqueue_struct *afs_lock_manager;
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun static void afs_next_locker(struct afs_vnode *vnode, int error);
17*4882a593Smuzhiyun static void afs_fl_copy_lock(struct file_lock *new, struct file_lock *fl);
18*4882a593Smuzhiyun static void afs_fl_release_private(struct file_lock *fl);
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun static const struct file_lock_operations afs_lock_ops = {
21*4882a593Smuzhiyun .fl_copy_lock = afs_fl_copy_lock,
22*4882a593Smuzhiyun .fl_release_private = afs_fl_release_private,
23*4882a593Smuzhiyun };
24*4882a593Smuzhiyun
afs_set_lock_state(struct afs_vnode * vnode,enum afs_lock_state state)25*4882a593Smuzhiyun static inline void afs_set_lock_state(struct afs_vnode *vnode, enum afs_lock_state state)
26*4882a593Smuzhiyun {
27*4882a593Smuzhiyun _debug("STATE %u -> %u", vnode->lock_state, state);
28*4882a593Smuzhiyun vnode->lock_state = state;
29*4882a593Smuzhiyun }
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun static atomic_t afs_file_lock_debug_id;
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun /*
34*4882a593Smuzhiyun * if the callback is broken on this vnode, then the lock may now be available
35*4882a593Smuzhiyun */
afs_lock_may_be_available(struct afs_vnode * vnode)36*4882a593Smuzhiyun void afs_lock_may_be_available(struct afs_vnode *vnode)
37*4882a593Smuzhiyun {
38*4882a593Smuzhiyun _enter("{%llx:%llu}", vnode->fid.vid, vnode->fid.vnode);
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun spin_lock(&vnode->lock);
41*4882a593Smuzhiyun if (vnode->lock_state == AFS_VNODE_LOCK_WAITING_FOR_CB)
42*4882a593Smuzhiyun afs_next_locker(vnode, 0);
43*4882a593Smuzhiyun trace_afs_flock_ev(vnode, NULL, afs_flock_callback_break, 0);
44*4882a593Smuzhiyun spin_unlock(&vnode->lock);
45*4882a593Smuzhiyun }
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun /*
48*4882a593Smuzhiyun * the lock will time out in 5 minutes unless we extend it, so schedule
49*4882a593Smuzhiyun * extension in a bit less than that time
50*4882a593Smuzhiyun */
afs_schedule_lock_extension(struct afs_vnode * vnode)51*4882a593Smuzhiyun static void afs_schedule_lock_extension(struct afs_vnode *vnode)
52*4882a593Smuzhiyun {
53*4882a593Smuzhiyun ktime_t expires_at, now, duration;
54*4882a593Smuzhiyun u64 duration_j;
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun expires_at = ktime_add_ms(vnode->locked_at, AFS_LOCKWAIT * 1000 / 2);
57*4882a593Smuzhiyun now = ktime_get_real();
58*4882a593Smuzhiyun duration = ktime_sub(expires_at, now);
59*4882a593Smuzhiyun if (duration <= 0)
60*4882a593Smuzhiyun duration_j = 0;
61*4882a593Smuzhiyun else
62*4882a593Smuzhiyun duration_j = nsecs_to_jiffies(ktime_to_ns(duration));
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun queue_delayed_work(afs_lock_manager, &vnode->lock_work, duration_j);
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun /*
68*4882a593Smuzhiyun * In the case of successful completion of a lock operation, record the time
69*4882a593Smuzhiyun * the reply appeared and start the lock extension timer.
70*4882a593Smuzhiyun */
afs_lock_op_done(struct afs_call * call)71*4882a593Smuzhiyun void afs_lock_op_done(struct afs_call *call)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun struct afs_operation *op = call->op;
74*4882a593Smuzhiyun struct afs_vnode *vnode = op->file[0].vnode;
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun if (call->error == 0) {
77*4882a593Smuzhiyun spin_lock(&vnode->lock);
78*4882a593Smuzhiyun trace_afs_flock_ev(vnode, NULL, afs_flock_timestamp, 0);
79*4882a593Smuzhiyun vnode->locked_at = call->issue_time;
80*4882a593Smuzhiyun afs_schedule_lock_extension(vnode);
81*4882a593Smuzhiyun spin_unlock(&vnode->lock);
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun /*
86*4882a593Smuzhiyun * grant one or more locks (readlocks are allowed to jump the queue if the
87*4882a593Smuzhiyun * first lock in the queue is itself a readlock)
88*4882a593Smuzhiyun * - the caller must hold the vnode lock
89*4882a593Smuzhiyun */
afs_grant_locks(struct afs_vnode * vnode)90*4882a593Smuzhiyun static void afs_grant_locks(struct afs_vnode *vnode)
91*4882a593Smuzhiyun {
92*4882a593Smuzhiyun struct file_lock *p, *_p;
93*4882a593Smuzhiyun bool exclusive = (vnode->lock_type == AFS_LOCK_WRITE);
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun list_for_each_entry_safe(p, _p, &vnode->pending_locks, fl_u.afs.link) {
96*4882a593Smuzhiyun if (!exclusive && p->fl_type == F_WRLCK)
97*4882a593Smuzhiyun continue;
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun list_move_tail(&p->fl_u.afs.link, &vnode->granted_locks);
100*4882a593Smuzhiyun p->fl_u.afs.state = AFS_LOCK_GRANTED;
101*4882a593Smuzhiyun trace_afs_flock_op(vnode, p, afs_flock_op_grant);
102*4882a593Smuzhiyun wake_up(&p->fl_wait);
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun /*
107*4882a593Smuzhiyun * If an error is specified, reject every pending lock that matches the
108*4882a593Smuzhiyun * authentication and type of the lock we failed to get. If there are any
109*4882a593Smuzhiyun * remaining lockers, try to wake up one of them to have a go.
110*4882a593Smuzhiyun */
afs_next_locker(struct afs_vnode * vnode,int error)111*4882a593Smuzhiyun static void afs_next_locker(struct afs_vnode *vnode, int error)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun struct file_lock *p, *_p, *next = NULL;
114*4882a593Smuzhiyun struct key *key = vnode->lock_key;
115*4882a593Smuzhiyun unsigned int fl_type = F_RDLCK;
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun _enter("");
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun if (vnode->lock_type == AFS_LOCK_WRITE)
120*4882a593Smuzhiyun fl_type = F_WRLCK;
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun list_for_each_entry_safe(p, _p, &vnode->pending_locks, fl_u.afs.link) {
123*4882a593Smuzhiyun if (error &&
124*4882a593Smuzhiyun p->fl_type == fl_type &&
125*4882a593Smuzhiyun afs_file_key(p->fl_file) == key) {
126*4882a593Smuzhiyun list_del_init(&p->fl_u.afs.link);
127*4882a593Smuzhiyun p->fl_u.afs.state = error;
128*4882a593Smuzhiyun wake_up(&p->fl_wait);
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun /* Select the next locker to hand off to. */
132*4882a593Smuzhiyun if (next &&
133*4882a593Smuzhiyun (next->fl_type == F_WRLCK || p->fl_type == F_RDLCK))
134*4882a593Smuzhiyun continue;
135*4882a593Smuzhiyun next = p;
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun vnode->lock_key = NULL;
139*4882a593Smuzhiyun key_put(key);
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun if (next) {
142*4882a593Smuzhiyun afs_set_lock_state(vnode, AFS_VNODE_LOCK_SETTING);
143*4882a593Smuzhiyun next->fl_u.afs.state = AFS_LOCK_YOUR_TRY;
144*4882a593Smuzhiyun trace_afs_flock_op(vnode, next, afs_flock_op_wake);
145*4882a593Smuzhiyun wake_up(&next->fl_wait);
146*4882a593Smuzhiyun } else {
147*4882a593Smuzhiyun afs_set_lock_state(vnode, AFS_VNODE_LOCK_NONE);
148*4882a593Smuzhiyun trace_afs_flock_ev(vnode, NULL, afs_flock_no_lockers, 0);
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun _leave("");
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun /*
155*4882a593Smuzhiyun * Kill off all waiters in the the pending lock queue due to the vnode being
156*4882a593Smuzhiyun * deleted.
157*4882a593Smuzhiyun */
afs_kill_lockers_enoent(struct afs_vnode * vnode)158*4882a593Smuzhiyun static void afs_kill_lockers_enoent(struct afs_vnode *vnode)
159*4882a593Smuzhiyun {
160*4882a593Smuzhiyun struct file_lock *p;
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun afs_set_lock_state(vnode, AFS_VNODE_LOCK_DELETED);
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun while (!list_empty(&vnode->pending_locks)) {
165*4882a593Smuzhiyun p = list_entry(vnode->pending_locks.next,
166*4882a593Smuzhiyun struct file_lock, fl_u.afs.link);
167*4882a593Smuzhiyun list_del_init(&p->fl_u.afs.link);
168*4882a593Smuzhiyun p->fl_u.afs.state = -ENOENT;
169*4882a593Smuzhiyun wake_up(&p->fl_wait);
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun key_put(vnode->lock_key);
173*4882a593Smuzhiyun vnode->lock_key = NULL;
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun
afs_lock_success(struct afs_operation * op)176*4882a593Smuzhiyun static void afs_lock_success(struct afs_operation *op)
177*4882a593Smuzhiyun {
178*4882a593Smuzhiyun _enter("op=%08x", op->debug_id);
179*4882a593Smuzhiyun afs_vnode_commit_status(op, &op->file[0]);
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun static const struct afs_operation_ops afs_set_lock_operation = {
183*4882a593Smuzhiyun .issue_afs_rpc = afs_fs_set_lock,
184*4882a593Smuzhiyun .issue_yfs_rpc = yfs_fs_set_lock,
185*4882a593Smuzhiyun .success = afs_lock_success,
186*4882a593Smuzhiyun .aborted = afs_check_for_remote_deletion,
187*4882a593Smuzhiyun };
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun /*
190*4882a593Smuzhiyun * Get a lock on a file
191*4882a593Smuzhiyun */
afs_set_lock(struct afs_vnode * vnode,struct key * key,afs_lock_type_t type)192*4882a593Smuzhiyun static int afs_set_lock(struct afs_vnode *vnode, struct key *key,
193*4882a593Smuzhiyun afs_lock_type_t type)
194*4882a593Smuzhiyun {
195*4882a593Smuzhiyun struct afs_operation *op;
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun _enter("%s{%llx:%llu.%u},%x,%u",
198*4882a593Smuzhiyun vnode->volume->name,
199*4882a593Smuzhiyun vnode->fid.vid,
200*4882a593Smuzhiyun vnode->fid.vnode,
201*4882a593Smuzhiyun vnode->fid.unique,
202*4882a593Smuzhiyun key_serial(key), type);
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun op = afs_alloc_operation(key, vnode->volume);
205*4882a593Smuzhiyun if (IS_ERR(op))
206*4882a593Smuzhiyun return PTR_ERR(op);
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun afs_op_set_vnode(op, 0, vnode);
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun op->lock.type = type;
211*4882a593Smuzhiyun op->ops = &afs_set_lock_operation;
212*4882a593Smuzhiyun return afs_do_sync_operation(op);
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun static const struct afs_operation_ops afs_extend_lock_operation = {
216*4882a593Smuzhiyun .issue_afs_rpc = afs_fs_extend_lock,
217*4882a593Smuzhiyun .issue_yfs_rpc = yfs_fs_extend_lock,
218*4882a593Smuzhiyun .success = afs_lock_success,
219*4882a593Smuzhiyun };
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun /*
222*4882a593Smuzhiyun * Extend a lock on a file
223*4882a593Smuzhiyun */
afs_extend_lock(struct afs_vnode * vnode,struct key * key)224*4882a593Smuzhiyun static int afs_extend_lock(struct afs_vnode *vnode, struct key *key)
225*4882a593Smuzhiyun {
226*4882a593Smuzhiyun struct afs_operation *op;
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun _enter("%s{%llx:%llu.%u},%x",
229*4882a593Smuzhiyun vnode->volume->name,
230*4882a593Smuzhiyun vnode->fid.vid,
231*4882a593Smuzhiyun vnode->fid.vnode,
232*4882a593Smuzhiyun vnode->fid.unique,
233*4882a593Smuzhiyun key_serial(key));
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun op = afs_alloc_operation(key, vnode->volume);
236*4882a593Smuzhiyun if (IS_ERR(op))
237*4882a593Smuzhiyun return PTR_ERR(op);
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun afs_op_set_vnode(op, 0, vnode);
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun op->flags |= AFS_OPERATION_UNINTR;
242*4882a593Smuzhiyun op->ops = &afs_extend_lock_operation;
243*4882a593Smuzhiyun return afs_do_sync_operation(op);
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun static const struct afs_operation_ops afs_release_lock_operation = {
247*4882a593Smuzhiyun .issue_afs_rpc = afs_fs_release_lock,
248*4882a593Smuzhiyun .issue_yfs_rpc = yfs_fs_release_lock,
249*4882a593Smuzhiyun .success = afs_lock_success,
250*4882a593Smuzhiyun };
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun /*
253*4882a593Smuzhiyun * Release a lock on a file
254*4882a593Smuzhiyun */
afs_release_lock(struct afs_vnode * vnode,struct key * key)255*4882a593Smuzhiyun static int afs_release_lock(struct afs_vnode *vnode, struct key *key)
256*4882a593Smuzhiyun {
257*4882a593Smuzhiyun struct afs_operation *op;
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun _enter("%s{%llx:%llu.%u},%x",
260*4882a593Smuzhiyun vnode->volume->name,
261*4882a593Smuzhiyun vnode->fid.vid,
262*4882a593Smuzhiyun vnode->fid.vnode,
263*4882a593Smuzhiyun vnode->fid.unique,
264*4882a593Smuzhiyun key_serial(key));
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun op = afs_alloc_operation(key, vnode->volume);
267*4882a593Smuzhiyun if (IS_ERR(op))
268*4882a593Smuzhiyun return PTR_ERR(op);
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun afs_op_set_vnode(op, 0, vnode);
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun op->flags |= AFS_OPERATION_UNINTR;
273*4882a593Smuzhiyun op->ops = &afs_release_lock_operation;
274*4882a593Smuzhiyun return afs_do_sync_operation(op);
275*4882a593Smuzhiyun }
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun /*
278*4882a593Smuzhiyun * do work for a lock, including:
279*4882a593Smuzhiyun * - probing for a lock we're waiting on but didn't get immediately
280*4882a593Smuzhiyun * - extending a lock that's close to timing out
281*4882a593Smuzhiyun */
afs_lock_work(struct work_struct * work)282*4882a593Smuzhiyun void afs_lock_work(struct work_struct *work)
283*4882a593Smuzhiyun {
284*4882a593Smuzhiyun struct afs_vnode *vnode =
285*4882a593Smuzhiyun container_of(work, struct afs_vnode, lock_work.work);
286*4882a593Smuzhiyun struct key *key;
287*4882a593Smuzhiyun int ret;
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun _enter("{%llx:%llu}", vnode->fid.vid, vnode->fid.vnode);
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun spin_lock(&vnode->lock);
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun again:
294*4882a593Smuzhiyun _debug("wstate %u for %p", vnode->lock_state, vnode);
295*4882a593Smuzhiyun switch (vnode->lock_state) {
296*4882a593Smuzhiyun case AFS_VNODE_LOCK_NEED_UNLOCK:
297*4882a593Smuzhiyun afs_set_lock_state(vnode, AFS_VNODE_LOCK_UNLOCKING);
298*4882a593Smuzhiyun trace_afs_flock_ev(vnode, NULL, afs_flock_work_unlocking, 0);
299*4882a593Smuzhiyun spin_unlock(&vnode->lock);
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun /* attempt to release the server lock; if it fails, we just
302*4882a593Smuzhiyun * wait 5 minutes and it'll expire anyway */
303*4882a593Smuzhiyun ret = afs_release_lock(vnode, vnode->lock_key);
304*4882a593Smuzhiyun if (ret < 0 && vnode->lock_state != AFS_VNODE_LOCK_DELETED) {
305*4882a593Smuzhiyun trace_afs_flock_ev(vnode, NULL, afs_flock_release_fail,
306*4882a593Smuzhiyun ret);
307*4882a593Smuzhiyun printk(KERN_WARNING "AFS:"
308*4882a593Smuzhiyun " Failed to release lock on {%llx:%llx} error %d\n",
309*4882a593Smuzhiyun vnode->fid.vid, vnode->fid.vnode, ret);
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun spin_lock(&vnode->lock);
313*4882a593Smuzhiyun if (ret == -ENOENT)
314*4882a593Smuzhiyun afs_kill_lockers_enoent(vnode);
315*4882a593Smuzhiyun else
316*4882a593Smuzhiyun afs_next_locker(vnode, 0);
317*4882a593Smuzhiyun spin_unlock(&vnode->lock);
318*4882a593Smuzhiyun return;
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun /* If we've already got a lock, then it must be time to extend that
321*4882a593Smuzhiyun * lock as AFS locks time out after 5 minutes.
322*4882a593Smuzhiyun */
323*4882a593Smuzhiyun case AFS_VNODE_LOCK_GRANTED:
324*4882a593Smuzhiyun _debug("extend");
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun ASSERT(!list_empty(&vnode->granted_locks));
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun key = key_get(vnode->lock_key);
329*4882a593Smuzhiyun afs_set_lock_state(vnode, AFS_VNODE_LOCK_EXTENDING);
330*4882a593Smuzhiyun trace_afs_flock_ev(vnode, NULL, afs_flock_work_extending, 0);
331*4882a593Smuzhiyun spin_unlock(&vnode->lock);
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun ret = afs_extend_lock(vnode, key); /* RPC */
334*4882a593Smuzhiyun key_put(key);
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun if (ret < 0) {
337*4882a593Smuzhiyun trace_afs_flock_ev(vnode, NULL, afs_flock_extend_fail,
338*4882a593Smuzhiyun ret);
339*4882a593Smuzhiyun pr_warn("AFS: Failed to extend lock on {%llx:%llx} error %d\n",
340*4882a593Smuzhiyun vnode->fid.vid, vnode->fid.vnode, ret);
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun spin_lock(&vnode->lock);
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun if (ret == -ENOENT) {
346*4882a593Smuzhiyun afs_kill_lockers_enoent(vnode);
347*4882a593Smuzhiyun spin_unlock(&vnode->lock);
348*4882a593Smuzhiyun return;
349*4882a593Smuzhiyun }
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun if (vnode->lock_state != AFS_VNODE_LOCK_EXTENDING)
352*4882a593Smuzhiyun goto again;
353*4882a593Smuzhiyun afs_set_lock_state(vnode, AFS_VNODE_LOCK_GRANTED);
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun if (ret != 0)
356*4882a593Smuzhiyun queue_delayed_work(afs_lock_manager, &vnode->lock_work,
357*4882a593Smuzhiyun HZ * 10);
358*4882a593Smuzhiyun spin_unlock(&vnode->lock);
359*4882a593Smuzhiyun _leave(" [ext]");
360*4882a593Smuzhiyun return;
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun /* If we're waiting for a callback to indicate lock release, we can't
363*4882a593Smuzhiyun * actually rely on this, so need to recheck at regular intervals. The
364*4882a593Smuzhiyun * problem is that the server might not notify us if the lock just
365*4882a593Smuzhiyun * expires (say because a client died) rather than being explicitly
366*4882a593Smuzhiyun * released.
367*4882a593Smuzhiyun */
368*4882a593Smuzhiyun case AFS_VNODE_LOCK_WAITING_FOR_CB:
369*4882a593Smuzhiyun _debug("retry");
370*4882a593Smuzhiyun afs_next_locker(vnode, 0);
371*4882a593Smuzhiyun spin_unlock(&vnode->lock);
372*4882a593Smuzhiyun return;
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun case AFS_VNODE_LOCK_DELETED:
375*4882a593Smuzhiyun afs_kill_lockers_enoent(vnode);
376*4882a593Smuzhiyun spin_unlock(&vnode->lock);
377*4882a593Smuzhiyun return;
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun default:
380*4882a593Smuzhiyun /* Looks like a lock request was withdrawn. */
381*4882a593Smuzhiyun spin_unlock(&vnode->lock);
382*4882a593Smuzhiyun _leave(" [no]");
383*4882a593Smuzhiyun return;
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun }
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun /*
388*4882a593Smuzhiyun * pass responsibility for the unlocking of a vnode on the server to the
389*4882a593Smuzhiyun * manager thread, lest a pending signal in the calling thread interrupt
390*4882a593Smuzhiyun * AF_RXRPC
391*4882a593Smuzhiyun * - the caller must hold the vnode lock
392*4882a593Smuzhiyun */
afs_defer_unlock(struct afs_vnode * vnode)393*4882a593Smuzhiyun static void afs_defer_unlock(struct afs_vnode *vnode)
394*4882a593Smuzhiyun {
395*4882a593Smuzhiyun _enter("%u", vnode->lock_state);
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun if (list_empty(&vnode->granted_locks) &&
398*4882a593Smuzhiyun (vnode->lock_state == AFS_VNODE_LOCK_GRANTED ||
399*4882a593Smuzhiyun vnode->lock_state == AFS_VNODE_LOCK_EXTENDING)) {
400*4882a593Smuzhiyun cancel_delayed_work(&vnode->lock_work);
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun afs_set_lock_state(vnode, AFS_VNODE_LOCK_NEED_UNLOCK);
403*4882a593Smuzhiyun trace_afs_flock_ev(vnode, NULL, afs_flock_defer_unlock, 0);
404*4882a593Smuzhiyun queue_delayed_work(afs_lock_manager, &vnode->lock_work, 0);
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun }
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun /*
409*4882a593Smuzhiyun * Check that our view of the file metadata is up to date and check to see
410*4882a593Smuzhiyun * whether we think that we have a locking permit.
411*4882a593Smuzhiyun */
afs_do_setlk_check(struct afs_vnode * vnode,struct key * key,enum afs_flock_mode mode,afs_lock_type_t type)412*4882a593Smuzhiyun static int afs_do_setlk_check(struct afs_vnode *vnode, struct key *key,
413*4882a593Smuzhiyun enum afs_flock_mode mode, afs_lock_type_t type)
414*4882a593Smuzhiyun {
415*4882a593Smuzhiyun afs_access_t access;
416*4882a593Smuzhiyun int ret;
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun /* Make sure we've got a callback on this file and that our view of the
419*4882a593Smuzhiyun * data version is up to date.
420*4882a593Smuzhiyun */
421*4882a593Smuzhiyun ret = afs_validate(vnode, key);
422*4882a593Smuzhiyun if (ret < 0)
423*4882a593Smuzhiyun return ret;
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun /* Check the permission set to see if we're actually going to be
426*4882a593Smuzhiyun * allowed to get a lock on this file.
427*4882a593Smuzhiyun */
428*4882a593Smuzhiyun ret = afs_check_permit(vnode, key, &access);
429*4882a593Smuzhiyun if (ret < 0)
430*4882a593Smuzhiyun return ret;
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun /* At a rough estimation, you need LOCK, WRITE or INSERT perm to
433*4882a593Smuzhiyun * read-lock a file and WRITE or INSERT perm to write-lock a file.
434*4882a593Smuzhiyun *
435*4882a593Smuzhiyun * We can't rely on the server to do this for us since if we want to
436*4882a593Smuzhiyun * share a read lock that we already have, we won't go the server.
437*4882a593Smuzhiyun */
438*4882a593Smuzhiyun if (type == AFS_LOCK_READ) {
439*4882a593Smuzhiyun if (!(access & (AFS_ACE_INSERT | AFS_ACE_WRITE | AFS_ACE_LOCK)))
440*4882a593Smuzhiyun return -EACCES;
441*4882a593Smuzhiyun } else {
442*4882a593Smuzhiyun if (!(access & (AFS_ACE_INSERT | AFS_ACE_WRITE)))
443*4882a593Smuzhiyun return -EACCES;
444*4882a593Smuzhiyun }
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun return 0;
447*4882a593Smuzhiyun }
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun /*
450*4882a593Smuzhiyun * request a lock on a file on the server
451*4882a593Smuzhiyun */
afs_do_setlk(struct file * file,struct file_lock * fl)452*4882a593Smuzhiyun static int afs_do_setlk(struct file *file, struct file_lock *fl)
453*4882a593Smuzhiyun {
454*4882a593Smuzhiyun struct inode *inode = locks_inode(file);
455*4882a593Smuzhiyun struct afs_vnode *vnode = AFS_FS_I(inode);
456*4882a593Smuzhiyun enum afs_flock_mode mode = AFS_FS_S(inode->i_sb)->flock_mode;
457*4882a593Smuzhiyun afs_lock_type_t type;
458*4882a593Smuzhiyun struct key *key = afs_file_key(file);
459*4882a593Smuzhiyun bool partial, no_server_lock = false;
460*4882a593Smuzhiyun int ret;
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun if (mode == afs_flock_mode_unset)
463*4882a593Smuzhiyun mode = afs_flock_mode_openafs;
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun _enter("{%llx:%llu},%llu-%llu,%u,%u",
466*4882a593Smuzhiyun vnode->fid.vid, vnode->fid.vnode,
467*4882a593Smuzhiyun fl->fl_start, fl->fl_end, fl->fl_type, mode);
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun fl->fl_ops = &afs_lock_ops;
470*4882a593Smuzhiyun INIT_LIST_HEAD(&fl->fl_u.afs.link);
471*4882a593Smuzhiyun fl->fl_u.afs.state = AFS_LOCK_PENDING;
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun partial = (fl->fl_start != 0 || fl->fl_end != OFFSET_MAX);
474*4882a593Smuzhiyun type = (fl->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE;
475*4882a593Smuzhiyun if (mode == afs_flock_mode_write && partial)
476*4882a593Smuzhiyun type = AFS_LOCK_WRITE;
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun ret = afs_do_setlk_check(vnode, key, mode, type);
479*4882a593Smuzhiyun if (ret < 0)
480*4882a593Smuzhiyun return ret;
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun trace_afs_flock_op(vnode, fl, afs_flock_op_set_lock);
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun /* AFS3 protocol only supports full-file locks and doesn't provide any
485*4882a593Smuzhiyun * method of upgrade/downgrade, so we need to emulate for partial-file
486*4882a593Smuzhiyun * locks.
487*4882a593Smuzhiyun *
488*4882a593Smuzhiyun * The OpenAFS client only gets a server lock for a full-file lock and
489*4882a593Smuzhiyun * keeps partial-file locks local. Allow this behaviour to be emulated
490*4882a593Smuzhiyun * (as the default).
491*4882a593Smuzhiyun */
492*4882a593Smuzhiyun if (mode == afs_flock_mode_local ||
493*4882a593Smuzhiyun (partial && mode == afs_flock_mode_openafs)) {
494*4882a593Smuzhiyun no_server_lock = true;
495*4882a593Smuzhiyun goto skip_server_lock;
496*4882a593Smuzhiyun }
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun spin_lock(&vnode->lock);
499*4882a593Smuzhiyun list_add_tail(&fl->fl_u.afs.link, &vnode->pending_locks);
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun ret = -ENOENT;
502*4882a593Smuzhiyun if (vnode->lock_state == AFS_VNODE_LOCK_DELETED)
503*4882a593Smuzhiyun goto error_unlock;
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun /* If we've already got a lock on the server then try to move to having
506*4882a593Smuzhiyun * the VFS grant the requested lock. Note that this means that other
507*4882a593Smuzhiyun * clients may get starved out.
508*4882a593Smuzhiyun */
509*4882a593Smuzhiyun _debug("try %u", vnode->lock_state);
510*4882a593Smuzhiyun if (vnode->lock_state == AFS_VNODE_LOCK_GRANTED) {
511*4882a593Smuzhiyun if (type == AFS_LOCK_READ) {
512*4882a593Smuzhiyun _debug("instant readlock");
513*4882a593Smuzhiyun list_move_tail(&fl->fl_u.afs.link, &vnode->granted_locks);
514*4882a593Smuzhiyun fl->fl_u.afs.state = AFS_LOCK_GRANTED;
515*4882a593Smuzhiyun goto vnode_is_locked_u;
516*4882a593Smuzhiyun }
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun if (vnode->lock_type == AFS_LOCK_WRITE) {
519*4882a593Smuzhiyun _debug("instant writelock");
520*4882a593Smuzhiyun list_move_tail(&fl->fl_u.afs.link, &vnode->granted_locks);
521*4882a593Smuzhiyun fl->fl_u.afs.state = AFS_LOCK_GRANTED;
522*4882a593Smuzhiyun goto vnode_is_locked_u;
523*4882a593Smuzhiyun }
524*4882a593Smuzhiyun }
525*4882a593Smuzhiyun
526*4882a593Smuzhiyun if (vnode->lock_state == AFS_VNODE_LOCK_NONE &&
527*4882a593Smuzhiyun !(fl->fl_flags & FL_SLEEP)) {
528*4882a593Smuzhiyun ret = -EAGAIN;
529*4882a593Smuzhiyun if (type == AFS_LOCK_READ) {
530*4882a593Smuzhiyun if (vnode->status.lock_count == -1)
531*4882a593Smuzhiyun goto lock_is_contended; /* Write locked */
532*4882a593Smuzhiyun } else {
533*4882a593Smuzhiyun if (vnode->status.lock_count != 0)
534*4882a593Smuzhiyun goto lock_is_contended; /* Locked */
535*4882a593Smuzhiyun }
536*4882a593Smuzhiyun }
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun if (vnode->lock_state != AFS_VNODE_LOCK_NONE)
539*4882a593Smuzhiyun goto need_to_wait;
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun try_to_lock:
542*4882a593Smuzhiyun /* We don't have a lock on this vnode and we aren't currently waiting
543*4882a593Smuzhiyun * for one either, so ask the server for a lock.
544*4882a593Smuzhiyun *
545*4882a593Smuzhiyun * Note that we need to be careful if we get interrupted by a signal
546*4882a593Smuzhiyun * after dispatching the request as we may still get the lock, even
547*4882a593Smuzhiyun * though we don't wait for the reply (it's not too bad a problem - the
548*4882a593Smuzhiyun * lock will expire in 5 mins anyway).
549*4882a593Smuzhiyun */
550*4882a593Smuzhiyun trace_afs_flock_ev(vnode, fl, afs_flock_try_to_lock, 0);
551*4882a593Smuzhiyun vnode->lock_key = key_get(key);
552*4882a593Smuzhiyun vnode->lock_type = type;
553*4882a593Smuzhiyun afs_set_lock_state(vnode, AFS_VNODE_LOCK_SETTING);
554*4882a593Smuzhiyun spin_unlock(&vnode->lock);
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun ret = afs_set_lock(vnode, key, type); /* RPC */
557*4882a593Smuzhiyun
558*4882a593Smuzhiyun spin_lock(&vnode->lock);
559*4882a593Smuzhiyun switch (ret) {
560*4882a593Smuzhiyun case -EKEYREJECTED:
561*4882a593Smuzhiyun case -EKEYEXPIRED:
562*4882a593Smuzhiyun case -EKEYREVOKED:
563*4882a593Smuzhiyun case -EPERM:
564*4882a593Smuzhiyun case -EACCES:
565*4882a593Smuzhiyun fl->fl_u.afs.state = ret;
566*4882a593Smuzhiyun trace_afs_flock_ev(vnode, fl, afs_flock_fail_perm, ret);
567*4882a593Smuzhiyun list_del_init(&fl->fl_u.afs.link);
568*4882a593Smuzhiyun afs_next_locker(vnode, ret);
569*4882a593Smuzhiyun goto error_unlock;
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun case -ENOENT:
572*4882a593Smuzhiyun fl->fl_u.afs.state = ret;
573*4882a593Smuzhiyun trace_afs_flock_ev(vnode, fl, afs_flock_fail_other, ret);
574*4882a593Smuzhiyun list_del_init(&fl->fl_u.afs.link);
575*4882a593Smuzhiyun afs_kill_lockers_enoent(vnode);
576*4882a593Smuzhiyun goto error_unlock;
577*4882a593Smuzhiyun
578*4882a593Smuzhiyun default:
579*4882a593Smuzhiyun fl->fl_u.afs.state = ret;
580*4882a593Smuzhiyun trace_afs_flock_ev(vnode, fl, afs_flock_fail_other, ret);
581*4882a593Smuzhiyun list_del_init(&fl->fl_u.afs.link);
582*4882a593Smuzhiyun afs_next_locker(vnode, 0);
583*4882a593Smuzhiyun goto error_unlock;
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun case -EWOULDBLOCK:
586*4882a593Smuzhiyun /* The server doesn't have a lock-waiting queue, so the client
587*4882a593Smuzhiyun * will have to retry. The server will break the outstanding
588*4882a593Smuzhiyun * callbacks on a file when a lock is released.
589*4882a593Smuzhiyun */
590*4882a593Smuzhiyun ASSERT(list_empty(&vnode->granted_locks));
591*4882a593Smuzhiyun ASSERTCMP(vnode->pending_locks.next, ==, &fl->fl_u.afs.link);
592*4882a593Smuzhiyun goto lock_is_contended;
593*4882a593Smuzhiyun
594*4882a593Smuzhiyun case 0:
595*4882a593Smuzhiyun afs_set_lock_state(vnode, AFS_VNODE_LOCK_GRANTED);
596*4882a593Smuzhiyun trace_afs_flock_ev(vnode, fl, afs_flock_acquired, type);
597*4882a593Smuzhiyun afs_grant_locks(vnode);
598*4882a593Smuzhiyun goto vnode_is_locked_u;
599*4882a593Smuzhiyun }
600*4882a593Smuzhiyun
601*4882a593Smuzhiyun vnode_is_locked_u:
602*4882a593Smuzhiyun spin_unlock(&vnode->lock);
603*4882a593Smuzhiyun vnode_is_locked:
604*4882a593Smuzhiyun /* the lock has been granted by the server... */
605*4882a593Smuzhiyun ASSERTCMP(fl->fl_u.afs.state, ==, AFS_LOCK_GRANTED);
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun skip_server_lock:
608*4882a593Smuzhiyun /* ... but the VFS still needs to distribute access on this client. */
609*4882a593Smuzhiyun trace_afs_flock_ev(vnode, fl, afs_flock_vfs_locking, 0);
610*4882a593Smuzhiyun ret = locks_lock_file_wait(file, fl);
611*4882a593Smuzhiyun trace_afs_flock_ev(vnode, fl, afs_flock_vfs_lock, ret);
612*4882a593Smuzhiyun if (ret < 0)
613*4882a593Smuzhiyun goto vfs_rejected_lock;
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun /* Again, make sure we've got a callback on this file and, again, make
616*4882a593Smuzhiyun * sure that our view of the data version is up to date (we ignore
617*4882a593Smuzhiyun * errors incurred here and deal with the consequences elsewhere).
618*4882a593Smuzhiyun */
619*4882a593Smuzhiyun afs_validate(vnode, key);
620*4882a593Smuzhiyun _leave(" = 0");
621*4882a593Smuzhiyun return 0;
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun lock_is_contended:
624*4882a593Smuzhiyun if (!(fl->fl_flags & FL_SLEEP)) {
625*4882a593Smuzhiyun list_del_init(&fl->fl_u.afs.link);
626*4882a593Smuzhiyun afs_next_locker(vnode, 0);
627*4882a593Smuzhiyun ret = -EAGAIN;
628*4882a593Smuzhiyun goto error_unlock;
629*4882a593Smuzhiyun }
630*4882a593Smuzhiyun
631*4882a593Smuzhiyun afs_set_lock_state(vnode, AFS_VNODE_LOCK_WAITING_FOR_CB);
632*4882a593Smuzhiyun trace_afs_flock_ev(vnode, fl, afs_flock_would_block, ret);
633*4882a593Smuzhiyun queue_delayed_work(afs_lock_manager, &vnode->lock_work, HZ * 5);
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun need_to_wait:
636*4882a593Smuzhiyun /* We're going to have to wait. Either this client doesn't have a lock
637*4882a593Smuzhiyun * on the server yet and we need to wait for a callback to occur, or
638*4882a593Smuzhiyun * the client does have a lock on the server, but it's shared and we
639*4882a593Smuzhiyun * need an exclusive lock.
640*4882a593Smuzhiyun */
641*4882a593Smuzhiyun spin_unlock(&vnode->lock);
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun trace_afs_flock_ev(vnode, fl, afs_flock_waiting, 0);
644*4882a593Smuzhiyun ret = wait_event_interruptible(fl->fl_wait,
645*4882a593Smuzhiyun fl->fl_u.afs.state != AFS_LOCK_PENDING);
646*4882a593Smuzhiyun trace_afs_flock_ev(vnode, fl, afs_flock_waited, ret);
647*4882a593Smuzhiyun
648*4882a593Smuzhiyun if (fl->fl_u.afs.state >= 0 && fl->fl_u.afs.state != AFS_LOCK_GRANTED) {
649*4882a593Smuzhiyun spin_lock(&vnode->lock);
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun switch (fl->fl_u.afs.state) {
652*4882a593Smuzhiyun case AFS_LOCK_YOUR_TRY:
653*4882a593Smuzhiyun fl->fl_u.afs.state = AFS_LOCK_PENDING;
654*4882a593Smuzhiyun goto try_to_lock;
655*4882a593Smuzhiyun case AFS_LOCK_PENDING:
656*4882a593Smuzhiyun if (ret > 0) {
657*4882a593Smuzhiyun /* We need to retry the lock. We may not be
658*4882a593Smuzhiyun * notified by the server if it just expired
659*4882a593Smuzhiyun * rather than being released.
660*4882a593Smuzhiyun */
661*4882a593Smuzhiyun ASSERTCMP(vnode->lock_state, ==, AFS_VNODE_LOCK_WAITING_FOR_CB);
662*4882a593Smuzhiyun afs_set_lock_state(vnode, AFS_VNODE_LOCK_SETTING);
663*4882a593Smuzhiyun fl->fl_u.afs.state = AFS_LOCK_PENDING;
664*4882a593Smuzhiyun goto try_to_lock;
665*4882a593Smuzhiyun }
666*4882a593Smuzhiyun goto error_unlock;
667*4882a593Smuzhiyun case AFS_LOCK_GRANTED:
668*4882a593Smuzhiyun default:
669*4882a593Smuzhiyun break;
670*4882a593Smuzhiyun }
671*4882a593Smuzhiyun
672*4882a593Smuzhiyun spin_unlock(&vnode->lock);
673*4882a593Smuzhiyun }
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun if (fl->fl_u.afs.state == AFS_LOCK_GRANTED)
676*4882a593Smuzhiyun goto vnode_is_locked;
677*4882a593Smuzhiyun ret = fl->fl_u.afs.state;
678*4882a593Smuzhiyun goto error;
679*4882a593Smuzhiyun
680*4882a593Smuzhiyun vfs_rejected_lock:
681*4882a593Smuzhiyun /* The VFS rejected the lock we just obtained, so we have to discard
682*4882a593Smuzhiyun * what we just got. We defer this to the lock manager work item to
683*4882a593Smuzhiyun * deal with.
684*4882a593Smuzhiyun */
685*4882a593Smuzhiyun _debug("vfs refused %d", ret);
686*4882a593Smuzhiyun if (no_server_lock)
687*4882a593Smuzhiyun goto error;
688*4882a593Smuzhiyun spin_lock(&vnode->lock);
689*4882a593Smuzhiyun list_del_init(&fl->fl_u.afs.link);
690*4882a593Smuzhiyun afs_defer_unlock(vnode);
691*4882a593Smuzhiyun
692*4882a593Smuzhiyun error_unlock:
693*4882a593Smuzhiyun spin_unlock(&vnode->lock);
694*4882a593Smuzhiyun error:
695*4882a593Smuzhiyun _leave(" = %d", ret);
696*4882a593Smuzhiyun return ret;
697*4882a593Smuzhiyun }
698*4882a593Smuzhiyun
699*4882a593Smuzhiyun /*
700*4882a593Smuzhiyun * unlock on a file on the server
701*4882a593Smuzhiyun */
afs_do_unlk(struct file * file,struct file_lock * fl)702*4882a593Smuzhiyun static int afs_do_unlk(struct file *file, struct file_lock *fl)
703*4882a593Smuzhiyun {
704*4882a593Smuzhiyun struct afs_vnode *vnode = AFS_FS_I(locks_inode(file));
705*4882a593Smuzhiyun int ret;
706*4882a593Smuzhiyun
707*4882a593Smuzhiyun _enter("{%llx:%llu},%u", vnode->fid.vid, vnode->fid.vnode, fl->fl_type);
708*4882a593Smuzhiyun
709*4882a593Smuzhiyun trace_afs_flock_op(vnode, fl, afs_flock_op_unlock);
710*4882a593Smuzhiyun
711*4882a593Smuzhiyun /* Flush all pending writes before doing anything with locks. */
712*4882a593Smuzhiyun vfs_fsync(file, 0);
713*4882a593Smuzhiyun
714*4882a593Smuzhiyun ret = locks_lock_file_wait(file, fl);
715*4882a593Smuzhiyun _leave(" = %d [%u]", ret, vnode->lock_state);
716*4882a593Smuzhiyun return ret;
717*4882a593Smuzhiyun }
718*4882a593Smuzhiyun
719*4882a593Smuzhiyun /*
720*4882a593Smuzhiyun * return information about a lock we currently hold, if indeed we hold one
721*4882a593Smuzhiyun */
afs_do_getlk(struct file * file,struct file_lock * fl)722*4882a593Smuzhiyun static int afs_do_getlk(struct file *file, struct file_lock *fl)
723*4882a593Smuzhiyun {
724*4882a593Smuzhiyun struct afs_vnode *vnode = AFS_FS_I(locks_inode(file));
725*4882a593Smuzhiyun struct key *key = afs_file_key(file);
726*4882a593Smuzhiyun int ret, lock_count;
727*4882a593Smuzhiyun
728*4882a593Smuzhiyun _enter("");
729*4882a593Smuzhiyun
730*4882a593Smuzhiyun if (vnode->lock_state == AFS_VNODE_LOCK_DELETED)
731*4882a593Smuzhiyun return -ENOENT;
732*4882a593Smuzhiyun
733*4882a593Smuzhiyun fl->fl_type = F_UNLCK;
734*4882a593Smuzhiyun
735*4882a593Smuzhiyun /* check local lock records first */
736*4882a593Smuzhiyun posix_test_lock(file, fl);
737*4882a593Smuzhiyun if (fl->fl_type == F_UNLCK) {
738*4882a593Smuzhiyun /* no local locks; consult the server */
739*4882a593Smuzhiyun ret = afs_fetch_status(vnode, key, false, NULL);
740*4882a593Smuzhiyun if (ret < 0)
741*4882a593Smuzhiyun goto error;
742*4882a593Smuzhiyun
743*4882a593Smuzhiyun lock_count = READ_ONCE(vnode->status.lock_count);
744*4882a593Smuzhiyun if (lock_count != 0) {
745*4882a593Smuzhiyun if (lock_count > 0)
746*4882a593Smuzhiyun fl->fl_type = F_RDLCK;
747*4882a593Smuzhiyun else
748*4882a593Smuzhiyun fl->fl_type = F_WRLCK;
749*4882a593Smuzhiyun fl->fl_start = 0;
750*4882a593Smuzhiyun fl->fl_end = OFFSET_MAX;
751*4882a593Smuzhiyun fl->fl_pid = 0;
752*4882a593Smuzhiyun }
753*4882a593Smuzhiyun }
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun ret = 0;
756*4882a593Smuzhiyun error:
757*4882a593Smuzhiyun _leave(" = %d [%hd]", ret, fl->fl_type);
758*4882a593Smuzhiyun return ret;
759*4882a593Smuzhiyun }
760*4882a593Smuzhiyun
761*4882a593Smuzhiyun /*
762*4882a593Smuzhiyun * manage POSIX locks on a file
763*4882a593Smuzhiyun */
afs_lock(struct file * file,int cmd,struct file_lock * fl)764*4882a593Smuzhiyun int afs_lock(struct file *file, int cmd, struct file_lock *fl)
765*4882a593Smuzhiyun {
766*4882a593Smuzhiyun struct afs_vnode *vnode = AFS_FS_I(locks_inode(file));
767*4882a593Smuzhiyun enum afs_flock_operation op;
768*4882a593Smuzhiyun int ret;
769*4882a593Smuzhiyun
770*4882a593Smuzhiyun _enter("{%llx:%llu},%d,{t=%x,fl=%x,r=%Ld:%Ld}",
771*4882a593Smuzhiyun vnode->fid.vid, vnode->fid.vnode, cmd,
772*4882a593Smuzhiyun fl->fl_type, fl->fl_flags,
773*4882a593Smuzhiyun (long long) fl->fl_start, (long long) fl->fl_end);
774*4882a593Smuzhiyun
775*4882a593Smuzhiyun /* AFS doesn't support mandatory locks */
776*4882a593Smuzhiyun if (__mandatory_lock(&vnode->vfs_inode) && fl->fl_type != F_UNLCK)
777*4882a593Smuzhiyun return -ENOLCK;
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun if (IS_GETLK(cmd))
780*4882a593Smuzhiyun return afs_do_getlk(file, fl);
781*4882a593Smuzhiyun
782*4882a593Smuzhiyun fl->fl_u.afs.debug_id = atomic_inc_return(&afs_file_lock_debug_id);
783*4882a593Smuzhiyun trace_afs_flock_op(vnode, fl, afs_flock_op_lock);
784*4882a593Smuzhiyun
785*4882a593Smuzhiyun if (fl->fl_type == F_UNLCK)
786*4882a593Smuzhiyun ret = afs_do_unlk(file, fl);
787*4882a593Smuzhiyun else
788*4882a593Smuzhiyun ret = afs_do_setlk(file, fl);
789*4882a593Smuzhiyun
790*4882a593Smuzhiyun switch (ret) {
791*4882a593Smuzhiyun case 0: op = afs_flock_op_return_ok; break;
792*4882a593Smuzhiyun case -EAGAIN: op = afs_flock_op_return_eagain; break;
793*4882a593Smuzhiyun case -EDEADLK: op = afs_flock_op_return_edeadlk; break;
794*4882a593Smuzhiyun default: op = afs_flock_op_return_error; break;
795*4882a593Smuzhiyun }
796*4882a593Smuzhiyun trace_afs_flock_op(vnode, fl, op);
797*4882a593Smuzhiyun return ret;
798*4882a593Smuzhiyun }
799*4882a593Smuzhiyun
800*4882a593Smuzhiyun /*
801*4882a593Smuzhiyun * manage FLOCK locks on a file
802*4882a593Smuzhiyun */
afs_flock(struct file * file,int cmd,struct file_lock * fl)803*4882a593Smuzhiyun int afs_flock(struct file *file, int cmd, struct file_lock *fl)
804*4882a593Smuzhiyun {
805*4882a593Smuzhiyun struct afs_vnode *vnode = AFS_FS_I(locks_inode(file));
806*4882a593Smuzhiyun enum afs_flock_operation op;
807*4882a593Smuzhiyun int ret;
808*4882a593Smuzhiyun
809*4882a593Smuzhiyun _enter("{%llx:%llu},%d,{t=%x,fl=%x}",
810*4882a593Smuzhiyun vnode->fid.vid, vnode->fid.vnode, cmd,
811*4882a593Smuzhiyun fl->fl_type, fl->fl_flags);
812*4882a593Smuzhiyun
813*4882a593Smuzhiyun /*
814*4882a593Smuzhiyun * No BSD flocks over NFS allowed.
815*4882a593Smuzhiyun * Note: we could try to fake a POSIX lock request here by
816*4882a593Smuzhiyun * using ((u32) filp | 0x80000000) or some such as the pid.
817*4882a593Smuzhiyun * Not sure whether that would be unique, though, or whether
818*4882a593Smuzhiyun * that would break in other places.
819*4882a593Smuzhiyun */
820*4882a593Smuzhiyun if (!(fl->fl_flags & FL_FLOCK))
821*4882a593Smuzhiyun return -ENOLCK;
822*4882a593Smuzhiyun
823*4882a593Smuzhiyun fl->fl_u.afs.debug_id = atomic_inc_return(&afs_file_lock_debug_id);
824*4882a593Smuzhiyun trace_afs_flock_op(vnode, fl, afs_flock_op_flock);
825*4882a593Smuzhiyun
826*4882a593Smuzhiyun /* we're simulating flock() locks using posix locks on the server */
827*4882a593Smuzhiyun if (fl->fl_type == F_UNLCK)
828*4882a593Smuzhiyun ret = afs_do_unlk(file, fl);
829*4882a593Smuzhiyun else
830*4882a593Smuzhiyun ret = afs_do_setlk(file, fl);
831*4882a593Smuzhiyun
832*4882a593Smuzhiyun switch (ret) {
833*4882a593Smuzhiyun case 0: op = afs_flock_op_return_ok; break;
834*4882a593Smuzhiyun case -EAGAIN: op = afs_flock_op_return_eagain; break;
835*4882a593Smuzhiyun case -EDEADLK: op = afs_flock_op_return_edeadlk; break;
836*4882a593Smuzhiyun default: op = afs_flock_op_return_error; break;
837*4882a593Smuzhiyun }
838*4882a593Smuzhiyun trace_afs_flock_op(vnode, fl, op);
839*4882a593Smuzhiyun return ret;
840*4882a593Smuzhiyun }
841*4882a593Smuzhiyun
842*4882a593Smuzhiyun /*
843*4882a593Smuzhiyun * the POSIX lock management core VFS code copies the lock record and adds the
844*4882a593Smuzhiyun * copy into its own list, so we need to add that copy to the vnode's lock
845*4882a593Smuzhiyun * queue in the same place as the original (which will be deleted shortly
846*4882a593Smuzhiyun * after)
847*4882a593Smuzhiyun */
afs_fl_copy_lock(struct file_lock * new,struct file_lock * fl)848*4882a593Smuzhiyun static void afs_fl_copy_lock(struct file_lock *new, struct file_lock *fl)
849*4882a593Smuzhiyun {
850*4882a593Smuzhiyun struct afs_vnode *vnode = AFS_FS_I(locks_inode(fl->fl_file));
851*4882a593Smuzhiyun
852*4882a593Smuzhiyun _enter("");
853*4882a593Smuzhiyun
854*4882a593Smuzhiyun new->fl_u.afs.debug_id = atomic_inc_return(&afs_file_lock_debug_id);
855*4882a593Smuzhiyun
856*4882a593Smuzhiyun spin_lock(&vnode->lock);
857*4882a593Smuzhiyun trace_afs_flock_op(vnode, new, afs_flock_op_copy_lock);
858*4882a593Smuzhiyun list_add(&new->fl_u.afs.link, &fl->fl_u.afs.link);
859*4882a593Smuzhiyun spin_unlock(&vnode->lock);
860*4882a593Smuzhiyun }
861*4882a593Smuzhiyun
862*4882a593Smuzhiyun /*
863*4882a593Smuzhiyun * need to remove this lock from the vnode queue when it's removed from the
864*4882a593Smuzhiyun * VFS's list
865*4882a593Smuzhiyun */
afs_fl_release_private(struct file_lock * fl)866*4882a593Smuzhiyun static void afs_fl_release_private(struct file_lock *fl)
867*4882a593Smuzhiyun {
868*4882a593Smuzhiyun struct afs_vnode *vnode = AFS_FS_I(locks_inode(fl->fl_file));
869*4882a593Smuzhiyun
870*4882a593Smuzhiyun _enter("");
871*4882a593Smuzhiyun
872*4882a593Smuzhiyun spin_lock(&vnode->lock);
873*4882a593Smuzhiyun
874*4882a593Smuzhiyun trace_afs_flock_op(vnode, fl, afs_flock_op_release_lock);
875*4882a593Smuzhiyun list_del_init(&fl->fl_u.afs.link);
876*4882a593Smuzhiyun if (list_empty(&vnode->granted_locks))
877*4882a593Smuzhiyun afs_defer_unlock(vnode);
878*4882a593Smuzhiyun
879*4882a593Smuzhiyun _debug("state %u for %p", vnode->lock_state, vnode);
880*4882a593Smuzhiyun spin_unlock(&vnode->lock);
881*4882a593Smuzhiyun }
882