1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright (c) 2002, 2007 Red Hat, Inc. All rights reserved.
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * This software may be freely redistributed under the terms of the
5*4882a593Smuzhiyun * GNU General Public License.
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * You should have received a copy of the GNU General Public License
8*4882a593Smuzhiyun * along with this program; if not, write to the Free Software
9*4882a593Smuzhiyun * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun * Authors: David Woodhouse <dwmw2@infradead.org>
12*4882a593Smuzhiyun * David Howells <dhowells@redhat.com>
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun */
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun #include <linux/kernel.h>
17*4882a593Smuzhiyun #include <linux/module.h>
18*4882a593Smuzhiyun #include <linux/init.h>
19*4882a593Smuzhiyun #include <linux/circ_buf.h>
20*4882a593Smuzhiyun #include <linux/sched.h>
21*4882a593Smuzhiyun #include "internal.h"
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun /*
24*4882a593Smuzhiyun * Allow the fileserver to request callback state (re-)initialisation.
25*4882a593Smuzhiyun * Unfortunately, UUIDs are not guaranteed unique.
26*4882a593Smuzhiyun */
afs_init_callback_state(struct afs_server * server)27*4882a593Smuzhiyun void afs_init_callback_state(struct afs_server *server)
28*4882a593Smuzhiyun {
29*4882a593Smuzhiyun rcu_read_lock();
30*4882a593Smuzhiyun do {
31*4882a593Smuzhiyun server->cb_s_break++;
32*4882a593Smuzhiyun server = rcu_dereference(server->uuid_next);
33*4882a593Smuzhiyun } while (0);
34*4882a593Smuzhiyun rcu_read_unlock();
35*4882a593Smuzhiyun }
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun /*
38*4882a593Smuzhiyun * actually break a callback
39*4882a593Smuzhiyun */
__afs_break_callback(struct afs_vnode * vnode,enum afs_cb_break_reason reason)40*4882a593Smuzhiyun void __afs_break_callback(struct afs_vnode *vnode, enum afs_cb_break_reason reason)
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun _enter("");
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun clear_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags);
45*4882a593Smuzhiyun if (test_and_clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags)) {
46*4882a593Smuzhiyun vnode->cb_break++;
47*4882a593Smuzhiyun afs_clear_permits(vnode);
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun if (vnode->lock_state == AFS_VNODE_LOCK_WAITING_FOR_CB)
50*4882a593Smuzhiyun afs_lock_may_be_available(vnode);
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun trace_afs_cb_break(&vnode->fid, vnode->cb_break, reason, true);
53*4882a593Smuzhiyun } else {
54*4882a593Smuzhiyun trace_afs_cb_break(&vnode->fid, vnode->cb_break, reason, false);
55*4882a593Smuzhiyun }
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun
afs_break_callback(struct afs_vnode * vnode,enum afs_cb_break_reason reason)58*4882a593Smuzhiyun void afs_break_callback(struct afs_vnode *vnode, enum afs_cb_break_reason reason)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun write_seqlock(&vnode->cb_lock);
61*4882a593Smuzhiyun __afs_break_callback(vnode, reason);
62*4882a593Smuzhiyun write_sequnlock(&vnode->cb_lock);
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun /*
66*4882a593Smuzhiyun * Look up a volume by volume ID under RCU conditions.
67*4882a593Smuzhiyun */
afs_lookup_volume_rcu(struct afs_cell * cell,afs_volid_t vid)68*4882a593Smuzhiyun static struct afs_volume *afs_lookup_volume_rcu(struct afs_cell *cell,
69*4882a593Smuzhiyun afs_volid_t vid)
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun struct afs_volume *volume = NULL;
72*4882a593Smuzhiyun struct rb_node *p;
73*4882a593Smuzhiyun int seq = 0;
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun do {
76*4882a593Smuzhiyun /* Unfortunately, rbtree walking doesn't give reliable results
77*4882a593Smuzhiyun * under just the RCU read lock, so we have to check for
78*4882a593Smuzhiyun * changes.
79*4882a593Smuzhiyun */
80*4882a593Smuzhiyun read_seqbegin_or_lock(&cell->volume_lock, &seq);
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun p = rcu_dereference_raw(cell->volumes.rb_node);
83*4882a593Smuzhiyun while (p) {
84*4882a593Smuzhiyun volume = rb_entry(p, struct afs_volume, cell_node);
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun if (volume->vid < vid)
87*4882a593Smuzhiyun p = rcu_dereference_raw(p->rb_left);
88*4882a593Smuzhiyun else if (volume->vid > vid)
89*4882a593Smuzhiyun p = rcu_dereference_raw(p->rb_right);
90*4882a593Smuzhiyun else
91*4882a593Smuzhiyun break;
92*4882a593Smuzhiyun volume = NULL;
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun } while (need_seqretry(&cell->volume_lock, seq));
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun done_seqretry(&cell->volume_lock, seq);
98*4882a593Smuzhiyun return volume;
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun /*
102*4882a593Smuzhiyun * allow the fileserver to explicitly break one callback
103*4882a593Smuzhiyun * - happens when
104*4882a593Smuzhiyun * - the backing file is changed
105*4882a593Smuzhiyun * - a lock is released
106*4882a593Smuzhiyun */
afs_break_one_callback(struct afs_volume * volume,struct afs_fid * fid)107*4882a593Smuzhiyun static void afs_break_one_callback(struct afs_volume *volume,
108*4882a593Smuzhiyun struct afs_fid *fid)
109*4882a593Smuzhiyun {
110*4882a593Smuzhiyun struct super_block *sb;
111*4882a593Smuzhiyun struct afs_vnode *vnode;
112*4882a593Smuzhiyun struct inode *inode;
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun if (fid->vnode == 0 && fid->unique == 0) {
115*4882a593Smuzhiyun /* The callback break applies to an entire volume. */
116*4882a593Smuzhiyun write_lock(&volume->cb_v_break_lock);
117*4882a593Smuzhiyun volume->cb_v_break++;
118*4882a593Smuzhiyun trace_afs_cb_break(fid, volume->cb_v_break,
119*4882a593Smuzhiyun afs_cb_break_for_volume_callback, false);
120*4882a593Smuzhiyun write_unlock(&volume->cb_v_break_lock);
121*4882a593Smuzhiyun return;
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun /* See if we can find a matching inode - even an I_NEW inode needs to
125*4882a593Smuzhiyun * be marked as it can have its callback broken before we finish
126*4882a593Smuzhiyun * setting up the local inode.
127*4882a593Smuzhiyun */
128*4882a593Smuzhiyun sb = rcu_dereference(volume->sb);
129*4882a593Smuzhiyun if (!sb)
130*4882a593Smuzhiyun return;
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun inode = find_inode_rcu(sb, fid->vnode, afs_ilookup5_test_by_fid, fid);
133*4882a593Smuzhiyun if (inode) {
134*4882a593Smuzhiyun vnode = AFS_FS_I(inode);
135*4882a593Smuzhiyun afs_break_callback(vnode, afs_cb_break_for_callback);
136*4882a593Smuzhiyun } else {
137*4882a593Smuzhiyun trace_afs_cb_miss(fid, afs_cb_break_for_callback);
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun
afs_break_some_callbacks(struct afs_server * server,struct afs_callback_break * cbb,size_t * _count)141*4882a593Smuzhiyun static void afs_break_some_callbacks(struct afs_server *server,
142*4882a593Smuzhiyun struct afs_callback_break *cbb,
143*4882a593Smuzhiyun size_t *_count)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun struct afs_callback_break *residue = cbb;
146*4882a593Smuzhiyun struct afs_volume *volume;
147*4882a593Smuzhiyun afs_volid_t vid = cbb->fid.vid;
148*4882a593Smuzhiyun size_t i;
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun volume = afs_lookup_volume_rcu(server->cell, vid);
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun /* TODO: Find all matching volumes if we couldn't match the server and
153*4882a593Smuzhiyun * break them anyway.
154*4882a593Smuzhiyun */
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun for (i = *_count; i > 0; cbb++, i--) {
157*4882a593Smuzhiyun if (cbb->fid.vid == vid) {
158*4882a593Smuzhiyun _debug("- Fid { vl=%08llx n=%llu u=%u }",
159*4882a593Smuzhiyun cbb->fid.vid,
160*4882a593Smuzhiyun cbb->fid.vnode,
161*4882a593Smuzhiyun cbb->fid.unique);
162*4882a593Smuzhiyun --*_count;
163*4882a593Smuzhiyun if (volume)
164*4882a593Smuzhiyun afs_break_one_callback(volume, &cbb->fid);
165*4882a593Smuzhiyun } else {
166*4882a593Smuzhiyun *residue++ = *cbb;
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun /*
172*4882a593Smuzhiyun * allow the fileserver to break callback promises
173*4882a593Smuzhiyun */
afs_break_callbacks(struct afs_server * server,size_t count,struct afs_callback_break * callbacks)174*4882a593Smuzhiyun void afs_break_callbacks(struct afs_server *server, size_t count,
175*4882a593Smuzhiyun struct afs_callback_break *callbacks)
176*4882a593Smuzhiyun {
177*4882a593Smuzhiyun _enter("%p,%zu,", server, count);
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun ASSERT(server != NULL);
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun rcu_read_lock();
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun while (count > 0)
184*4882a593Smuzhiyun afs_break_some_callbacks(server, callbacks, &count);
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun rcu_read_unlock();
187*4882a593Smuzhiyun return;
188*4882a593Smuzhiyun }
189