xref: /OK3568_Linux_fs/kernel/fs/dlm/recover.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /******************************************************************************
3*4882a593Smuzhiyun *******************************************************************************
4*4882a593Smuzhiyun **
5*4882a593Smuzhiyun **  Copyright (C) Sistina Software, Inc.  1997-2003  All rights reserved.
6*4882a593Smuzhiyun **  Copyright (C) 2004-2005 Red Hat, Inc.  All rights reserved.
7*4882a593Smuzhiyun **
8*4882a593Smuzhiyun **
9*4882a593Smuzhiyun *******************************************************************************
10*4882a593Smuzhiyun ******************************************************************************/
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include "dlm_internal.h"
13*4882a593Smuzhiyun #include "lockspace.h"
14*4882a593Smuzhiyun #include "dir.h"
15*4882a593Smuzhiyun #include "config.h"
16*4882a593Smuzhiyun #include "ast.h"
17*4882a593Smuzhiyun #include "memory.h"
18*4882a593Smuzhiyun #include "rcom.h"
19*4882a593Smuzhiyun #include "lock.h"
20*4882a593Smuzhiyun #include "lowcomms.h"
21*4882a593Smuzhiyun #include "member.h"
22*4882a593Smuzhiyun #include "recover.h"
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun /*
26*4882a593Smuzhiyun  * Recovery waiting routines: these functions wait for a particular reply from
27*4882a593Smuzhiyun  * a remote node, or for the remote node to report a certain status.  They need
28*4882a593Smuzhiyun  * to abort if the lockspace is stopped indicating a node has failed (perhaps
29*4882a593Smuzhiyun  * the one being waited for).
30*4882a593Smuzhiyun  */
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun /*
33*4882a593Smuzhiyun  * Wait until given function returns non-zero or lockspace is stopped
34*4882a593Smuzhiyun  * (LS_RECOVERY_STOP set due to failure of a node in ls_nodes).  When another
35*4882a593Smuzhiyun  * function thinks it could have completed the waited-on task, they should wake
36*4882a593Smuzhiyun  * up ls_wait_general to get an immediate response rather than waiting for the
37*4882a593Smuzhiyun  * timeout.  This uses a timeout so it can check periodically if the wait
38*4882a593Smuzhiyun  * should abort due to node failure (which doesn't cause a wake_up).
39*4882a593Smuzhiyun  * This should only be called by the dlm_recoverd thread.
40*4882a593Smuzhiyun  */
41*4882a593Smuzhiyun 
dlm_wait_function(struct dlm_ls * ls,int (* testfn)(struct dlm_ls * ls))42*4882a593Smuzhiyun int dlm_wait_function(struct dlm_ls *ls, int (*testfn) (struct dlm_ls *ls))
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun 	int error = 0;
45*4882a593Smuzhiyun 	int rv;
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun 	while (1) {
48*4882a593Smuzhiyun 		rv = wait_event_timeout(ls->ls_wait_general,
49*4882a593Smuzhiyun 					testfn(ls) || dlm_recovery_stopped(ls),
50*4882a593Smuzhiyun 					dlm_config.ci_recover_timer * HZ);
51*4882a593Smuzhiyun 		if (rv)
52*4882a593Smuzhiyun 			break;
53*4882a593Smuzhiyun 		if (test_bit(LSFL_RCOM_WAIT, &ls->ls_flags)) {
54*4882a593Smuzhiyun 			log_debug(ls, "dlm_wait_function timed out");
55*4882a593Smuzhiyun 			return -ETIMEDOUT;
56*4882a593Smuzhiyun 		}
57*4882a593Smuzhiyun 	}
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	if (dlm_recovery_stopped(ls)) {
60*4882a593Smuzhiyun 		log_debug(ls, "dlm_wait_function aborted");
61*4882a593Smuzhiyun 		error = -EINTR;
62*4882a593Smuzhiyun 	}
63*4882a593Smuzhiyun 	return error;
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun /*
67*4882a593Smuzhiyun  * An efficient way for all nodes to wait for all others to have a certain
68*4882a593Smuzhiyun  * status.  The node with the lowest nodeid polls all the others for their
69*4882a593Smuzhiyun  * status (wait_status_all) and all the others poll the node with the low id
70*4882a593Smuzhiyun  * for its accumulated result (wait_status_low).  When all nodes have set
71*4882a593Smuzhiyun  * status flag X, then status flag X_ALL will be set on the low nodeid.
72*4882a593Smuzhiyun  */
73*4882a593Smuzhiyun 
dlm_recover_status(struct dlm_ls * ls)74*4882a593Smuzhiyun uint32_t dlm_recover_status(struct dlm_ls *ls)
75*4882a593Smuzhiyun {
76*4882a593Smuzhiyun 	uint32_t status;
77*4882a593Smuzhiyun 	spin_lock(&ls->ls_recover_lock);
78*4882a593Smuzhiyun 	status = ls->ls_recover_status;
79*4882a593Smuzhiyun 	spin_unlock(&ls->ls_recover_lock);
80*4882a593Smuzhiyun 	return status;
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun 
_set_recover_status(struct dlm_ls * ls,uint32_t status)83*4882a593Smuzhiyun static void _set_recover_status(struct dlm_ls *ls, uint32_t status)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun 	ls->ls_recover_status |= status;
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun 
dlm_set_recover_status(struct dlm_ls * ls,uint32_t status)88*4882a593Smuzhiyun void dlm_set_recover_status(struct dlm_ls *ls, uint32_t status)
89*4882a593Smuzhiyun {
90*4882a593Smuzhiyun 	spin_lock(&ls->ls_recover_lock);
91*4882a593Smuzhiyun 	_set_recover_status(ls, status);
92*4882a593Smuzhiyun 	spin_unlock(&ls->ls_recover_lock);
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun 
wait_status_all(struct dlm_ls * ls,uint32_t wait_status,int save_slots)95*4882a593Smuzhiyun static int wait_status_all(struct dlm_ls *ls, uint32_t wait_status,
96*4882a593Smuzhiyun 			   int save_slots)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun 	struct dlm_rcom *rc = ls->ls_recover_buf;
99*4882a593Smuzhiyun 	struct dlm_member *memb;
100*4882a593Smuzhiyun 	int error = 0, delay;
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun 	list_for_each_entry(memb, &ls->ls_nodes, list) {
103*4882a593Smuzhiyun 		delay = 0;
104*4882a593Smuzhiyun 		for (;;) {
105*4882a593Smuzhiyun 			if (dlm_recovery_stopped(ls)) {
106*4882a593Smuzhiyun 				error = -EINTR;
107*4882a593Smuzhiyun 				goto out;
108*4882a593Smuzhiyun 			}
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 			error = dlm_rcom_status(ls, memb->nodeid, 0);
111*4882a593Smuzhiyun 			if (error)
112*4882a593Smuzhiyun 				goto out;
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 			if (save_slots)
115*4882a593Smuzhiyun 				dlm_slot_save(ls, rc, memb);
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 			if (rc->rc_result & wait_status)
118*4882a593Smuzhiyun 				break;
119*4882a593Smuzhiyun 			if (delay < 1000)
120*4882a593Smuzhiyun 				delay += 20;
121*4882a593Smuzhiyun 			msleep(delay);
122*4882a593Smuzhiyun 		}
123*4882a593Smuzhiyun 	}
124*4882a593Smuzhiyun  out:
125*4882a593Smuzhiyun 	return error;
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun 
wait_status_low(struct dlm_ls * ls,uint32_t wait_status,uint32_t status_flags)128*4882a593Smuzhiyun static int wait_status_low(struct dlm_ls *ls, uint32_t wait_status,
129*4882a593Smuzhiyun 			   uint32_t status_flags)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun 	struct dlm_rcom *rc = ls->ls_recover_buf;
132*4882a593Smuzhiyun 	int error = 0, delay = 0, nodeid = ls->ls_low_nodeid;
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	for (;;) {
135*4882a593Smuzhiyun 		if (dlm_recovery_stopped(ls)) {
136*4882a593Smuzhiyun 			error = -EINTR;
137*4882a593Smuzhiyun 			goto out;
138*4882a593Smuzhiyun 		}
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 		error = dlm_rcom_status(ls, nodeid, status_flags);
141*4882a593Smuzhiyun 		if (error)
142*4882a593Smuzhiyun 			break;
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 		if (rc->rc_result & wait_status)
145*4882a593Smuzhiyun 			break;
146*4882a593Smuzhiyun 		if (delay < 1000)
147*4882a593Smuzhiyun 			delay += 20;
148*4882a593Smuzhiyun 		msleep(delay);
149*4882a593Smuzhiyun 	}
150*4882a593Smuzhiyun  out:
151*4882a593Smuzhiyun 	return error;
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun 
wait_status(struct dlm_ls * ls,uint32_t status)154*4882a593Smuzhiyun static int wait_status(struct dlm_ls *ls, uint32_t status)
155*4882a593Smuzhiyun {
156*4882a593Smuzhiyun 	uint32_t status_all = status << 1;
157*4882a593Smuzhiyun 	int error;
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	if (ls->ls_low_nodeid == dlm_our_nodeid()) {
160*4882a593Smuzhiyun 		error = wait_status_all(ls, status, 0);
161*4882a593Smuzhiyun 		if (!error)
162*4882a593Smuzhiyun 			dlm_set_recover_status(ls, status_all);
163*4882a593Smuzhiyun 	} else
164*4882a593Smuzhiyun 		error = wait_status_low(ls, status_all, 0);
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 	return error;
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun 
dlm_recover_members_wait(struct dlm_ls * ls)169*4882a593Smuzhiyun int dlm_recover_members_wait(struct dlm_ls *ls)
170*4882a593Smuzhiyun {
171*4882a593Smuzhiyun 	struct dlm_member *memb;
172*4882a593Smuzhiyun 	struct dlm_slot *slots;
173*4882a593Smuzhiyun 	int num_slots, slots_size;
174*4882a593Smuzhiyun 	int error, rv;
175*4882a593Smuzhiyun 	uint32_t gen;
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	list_for_each_entry(memb, &ls->ls_nodes, list) {
178*4882a593Smuzhiyun 		memb->slot = -1;
179*4882a593Smuzhiyun 		memb->generation = 0;
180*4882a593Smuzhiyun 	}
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	if (ls->ls_low_nodeid == dlm_our_nodeid()) {
183*4882a593Smuzhiyun 		error = wait_status_all(ls, DLM_RS_NODES, 1);
184*4882a593Smuzhiyun 		if (error)
185*4882a593Smuzhiyun 			goto out;
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 		/* slots array is sparse, slots_size may be > num_slots */
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 		rv = dlm_slots_assign(ls, &num_slots, &slots_size, &slots, &gen);
190*4882a593Smuzhiyun 		if (!rv) {
191*4882a593Smuzhiyun 			spin_lock(&ls->ls_recover_lock);
192*4882a593Smuzhiyun 			_set_recover_status(ls, DLM_RS_NODES_ALL);
193*4882a593Smuzhiyun 			ls->ls_num_slots = num_slots;
194*4882a593Smuzhiyun 			ls->ls_slots_size = slots_size;
195*4882a593Smuzhiyun 			ls->ls_slots = slots;
196*4882a593Smuzhiyun 			ls->ls_generation = gen;
197*4882a593Smuzhiyun 			spin_unlock(&ls->ls_recover_lock);
198*4882a593Smuzhiyun 		} else {
199*4882a593Smuzhiyun 			dlm_set_recover_status(ls, DLM_RS_NODES_ALL);
200*4882a593Smuzhiyun 		}
201*4882a593Smuzhiyun 	} else {
202*4882a593Smuzhiyun 		error = wait_status_low(ls, DLM_RS_NODES_ALL, DLM_RSF_NEED_SLOTS);
203*4882a593Smuzhiyun 		if (error)
204*4882a593Smuzhiyun 			goto out;
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 		dlm_slots_copy_in(ls);
207*4882a593Smuzhiyun 	}
208*4882a593Smuzhiyun  out:
209*4882a593Smuzhiyun 	return error;
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun 
dlm_recover_directory_wait(struct dlm_ls * ls)212*4882a593Smuzhiyun int dlm_recover_directory_wait(struct dlm_ls *ls)
213*4882a593Smuzhiyun {
214*4882a593Smuzhiyun 	return wait_status(ls, DLM_RS_DIR);
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun 
dlm_recover_locks_wait(struct dlm_ls * ls)217*4882a593Smuzhiyun int dlm_recover_locks_wait(struct dlm_ls *ls)
218*4882a593Smuzhiyun {
219*4882a593Smuzhiyun 	return wait_status(ls, DLM_RS_LOCKS);
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun 
dlm_recover_done_wait(struct dlm_ls * ls)222*4882a593Smuzhiyun int dlm_recover_done_wait(struct dlm_ls *ls)
223*4882a593Smuzhiyun {
224*4882a593Smuzhiyun 	return wait_status(ls, DLM_RS_DONE);
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun /*
228*4882a593Smuzhiyun  * The recover_list contains all the rsb's for which we've requested the new
229*4882a593Smuzhiyun  * master nodeid.  As replies are returned from the resource directories the
230*4882a593Smuzhiyun  * rsb's are removed from the list.  When the list is empty we're done.
231*4882a593Smuzhiyun  *
232*4882a593Smuzhiyun  * The recover_list is later similarly used for all rsb's for which we've sent
233*4882a593Smuzhiyun  * new lkb's and need to receive new corresponding lkid's.
234*4882a593Smuzhiyun  *
235*4882a593Smuzhiyun  * We use the address of the rsb struct as a simple local identifier for the
236*4882a593Smuzhiyun  * rsb so we can match an rcom reply with the rsb it was sent for.
237*4882a593Smuzhiyun  */
238*4882a593Smuzhiyun 
recover_list_empty(struct dlm_ls * ls)239*4882a593Smuzhiyun static int recover_list_empty(struct dlm_ls *ls)
240*4882a593Smuzhiyun {
241*4882a593Smuzhiyun 	int empty;
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	spin_lock(&ls->ls_recover_list_lock);
244*4882a593Smuzhiyun 	empty = list_empty(&ls->ls_recover_list);
245*4882a593Smuzhiyun 	spin_unlock(&ls->ls_recover_list_lock);
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun 	return empty;
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun 
recover_list_add(struct dlm_rsb * r)250*4882a593Smuzhiyun static void recover_list_add(struct dlm_rsb *r)
251*4882a593Smuzhiyun {
252*4882a593Smuzhiyun 	struct dlm_ls *ls = r->res_ls;
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 	spin_lock(&ls->ls_recover_list_lock);
255*4882a593Smuzhiyun 	if (list_empty(&r->res_recover_list)) {
256*4882a593Smuzhiyun 		list_add_tail(&r->res_recover_list, &ls->ls_recover_list);
257*4882a593Smuzhiyun 		ls->ls_recover_list_count++;
258*4882a593Smuzhiyun 		dlm_hold_rsb(r);
259*4882a593Smuzhiyun 	}
260*4882a593Smuzhiyun 	spin_unlock(&ls->ls_recover_list_lock);
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun 
recover_list_del(struct dlm_rsb * r)263*4882a593Smuzhiyun static void recover_list_del(struct dlm_rsb *r)
264*4882a593Smuzhiyun {
265*4882a593Smuzhiyun 	struct dlm_ls *ls = r->res_ls;
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun 	spin_lock(&ls->ls_recover_list_lock);
268*4882a593Smuzhiyun 	list_del_init(&r->res_recover_list);
269*4882a593Smuzhiyun 	ls->ls_recover_list_count--;
270*4882a593Smuzhiyun 	spin_unlock(&ls->ls_recover_list_lock);
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	dlm_put_rsb(r);
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun 
recover_list_clear(struct dlm_ls * ls)275*4882a593Smuzhiyun static void recover_list_clear(struct dlm_ls *ls)
276*4882a593Smuzhiyun {
277*4882a593Smuzhiyun 	struct dlm_rsb *r, *s;
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 	spin_lock(&ls->ls_recover_list_lock);
280*4882a593Smuzhiyun 	list_for_each_entry_safe(r, s, &ls->ls_recover_list, res_recover_list) {
281*4882a593Smuzhiyun 		list_del_init(&r->res_recover_list);
282*4882a593Smuzhiyun 		r->res_recover_locks_count = 0;
283*4882a593Smuzhiyun 		dlm_put_rsb(r);
284*4882a593Smuzhiyun 		ls->ls_recover_list_count--;
285*4882a593Smuzhiyun 	}
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 	if (ls->ls_recover_list_count != 0) {
288*4882a593Smuzhiyun 		log_error(ls, "warning: recover_list_count %d",
289*4882a593Smuzhiyun 			  ls->ls_recover_list_count);
290*4882a593Smuzhiyun 		ls->ls_recover_list_count = 0;
291*4882a593Smuzhiyun 	}
292*4882a593Smuzhiyun 	spin_unlock(&ls->ls_recover_list_lock);
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun 
recover_idr_empty(struct dlm_ls * ls)295*4882a593Smuzhiyun static int recover_idr_empty(struct dlm_ls *ls)
296*4882a593Smuzhiyun {
297*4882a593Smuzhiyun 	int empty = 1;
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun 	spin_lock(&ls->ls_recover_idr_lock);
300*4882a593Smuzhiyun 	if (ls->ls_recover_list_count)
301*4882a593Smuzhiyun 		empty = 0;
302*4882a593Smuzhiyun 	spin_unlock(&ls->ls_recover_idr_lock);
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 	return empty;
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun 
recover_idr_add(struct dlm_rsb * r)307*4882a593Smuzhiyun static int recover_idr_add(struct dlm_rsb *r)
308*4882a593Smuzhiyun {
309*4882a593Smuzhiyun 	struct dlm_ls *ls = r->res_ls;
310*4882a593Smuzhiyun 	int rv;
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun 	idr_preload(GFP_NOFS);
313*4882a593Smuzhiyun 	spin_lock(&ls->ls_recover_idr_lock);
314*4882a593Smuzhiyun 	if (r->res_id) {
315*4882a593Smuzhiyun 		rv = -1;
316*4882a593Smuzhiyun 		goto out_unlock;
317*4882a593Smuzhiyun 	}
318*4882a593Smuzhiyun 	rv = idr_alloc(&ls->ls_recover_idr, r, 1, 0, GFP_NOWAIT);
319*4882a593Smuzhiyun 	if (rv < 0)
320*4882a593Smuzhiyun 		goto out_unlock;
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 	r->res_id = rv;
323*4882a593Smuzhiyun 	ls->ls_recover_list_count++;
324*4882a593Smuzhiyun 	dlm_hold_rsb(r);
325*4882a593Smuzhiyun 	rv = 0;
326*4882a593Smuzhiyun out_unlock:
327*4882a593Smuzhiyun 	spin_unlock(&ls->ls_recover_idr_lock);
328*4882a593Smuzhiyun 	idr_preload_end();
329*4882a593Smuzhiyun 	return rv;
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun 
recover_idr_del(struct dlm_rsb * r)332*4882a593Smuzhiyun static void recover_idr_del(struct dlm_rsb *r)
333*4882a593Smuzhiyun {
334*4882a593Smuzhiyun 	struct dlm_ls *ls = r->res_ls;
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	spin_lock(&ls->ls_recover_idr_lock);
337*4882a593Smuzhiyun 	idr_remove(&ls->ls_recover_idr, r->res_id);
338*4882a593Smuzhiyun 	r->res_id = 0;
339*4882a593Smuzhiyun 	ls->ls_recover_list_count--;
340*4882a593Smuzhiyun 	spin_unlock(&ls->ls_recover_idr_lock);
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 	dlm_put_rsb(r);
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun 
recover_idr_find(struct dlm_ls * ls,uint64_t id)345*4882a593Smuzhiyun static struct dlm_rsb *recover_idr_find(struct dlm_ls *ls, uint64_t id)
346*4882a593Smuzhiyun {
347*4882a593Smuzhiyun 	struct dlm_rsb *r;
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 	spin_lock(&ls->ls_recover_idr_lock);
350*4882a593Smuzhiyun 	r = idr_find(&ls->ls_recover_idr, (int)id);
351*4882a593Smuzhiyun 	spin_unlock(&ls->ls_recover_idr_lock);
352*4882a593Smuzhiyun 	return r;
353*4882a593Smuzhiyun }
354*4882a593Smuzhiyun 
recover_idr_clear(struct dlm_ls * ls)355*4882a593Smuzhiyun static void recover_idr_clear(struct dlm_ls *ls)
356*4882a593Smuzhiyun {
357*4882a593Smuzhiyun 	struct dlm_rsb *r;
358*4882a593Smuzhiyun 	int id;
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 	spin_lock(&ls->ls_recover_idr_lock);
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	idr_for_each_entry(&ls->ls_recover_idr, r, id) {
363*4882a593Smuzhiyun 		idr_remove(&ls->ls_recover_idr, id);
364*4882a593Smuzhiyun 		r->res_id = 0;
365*4882a593Smuzhiyun 		r->res_recover_locks_count = 0;
366*4882a593Smuzhiyun 		ls->ls_recover_list_count--;
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 		dlm_put_rsb(r);
369*4882a593Smuzhiyun 	}
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun 	if (ls->ls_recover_list_count != 0) {
372*4882a593Smuzhiyun 		log_error(ls, "warning: recover_list_count %d",
373*4882a593Smuzhiyun 			  ls->ls_recover_list_count);
374*4882a593Smuzhiyun 		ls->ls_recover_list_count = 0;
375*4882a593Smuzhiyun 	}
376*4882a593Smuzhiyun 	spin_unlock(&ls->ls_recover_idr_lock);
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun /* Master recovery: find new master node for rsb's that were
381*4882a593Smuzhiyun    mastered on nodes that have been removed.
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun    dlm_recover_masters
384*4882a593Smuzhiyun    recover_master
385*4882a593Smuzhiyun    dlm_send_rcom_lookup            ->  receive_rcom_lookup
386*4882a593Smuzhiyun                                        dlm_dir_lookup
387*4882a593Smuzhiyun    receive_rcom_lookup_reply       <-
388*4882a593Smuzhiyun    dlm_recover_master_reply
389*4882a593Smuzhiyun    set_new_master
390*4882a593Smuzhiyun    set_master_lkbs
391*4882a593Smuzhiyun    set_lock_master
392*4882a593Smuzhiyun */
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun /*
395*4882a593Smuzhiyun  * Set the lock master for all LKBs in a lock queue
396*4882a593Smuzhiyun  * If we are the new master of the rsb, we may have received new
397*4882a593Smuzhiyun  * MSTCPY locks from other nodes already which we need to ignore
398*4882a593Smuzhiyun  * when setting the new nodeid.
399*4882a593Smuzhiyun  */
400*4882a593Smuzhiyun 
set_lock_master(struct list_head * queue,int nodeid)401*4882a593Smuzhiyun static void set_lock_master(struct list_head *queue, int nodeid)
402*4882a593Smuzhiyun {
403*4882a593Smuzhiyun 	struct dlm_lkb *lkb;
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun 	list_for_each_entry(lkb, queue, lkb_statequeue) {
406*4882a593Smuzhiyun 		if (!(lkb->lkb_flags & DLM_IFL_MSTCPY)) {
407*4882a593Smuzhiyun 			lkb->lkb_nodeid = nodeid;
408*4882a593Smuzhiyun 			lkb->lkb_remid = 0;
409*4882a593Smuzhiyun 		}
410*4882a593Smuzhiyun 	}
411*4882a593Smuzhiyun }
412*4882a593Smuzhiyun 
set_master_lkbs(struct dlm_rsb * r)413*4882a593Smuzhiyun static void set_master_lkbs(struct dlm_rsb *r)
414*4882a593Smuzhiyun {
415*4882a593Smuzhiyun 	set_lock_master(&r->res_grantqueue, r->res_nodeid);
416*4882a593Smuzhiyun 	set_lock_master(&r->res_convertqueue, r->res_nodeid);
417*4882a593Smuzhiyun 	set_lock_master(&r->res_waitqueue, r->res_nodeid);
418*4882a593Smuzhiyun }
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun /*
421*4882a593Smuzhiyun  * Propagate the new master nodeid to locks
422*4882a593Smuzhiyun  * The NEW_MASTER flag tells dlm_recover_locks() which rsb's to consider.
423*4882a593Smuzhiyun  * The NEW_MASTER2 flag tells recover_lvb() and recover_grant() which
424*4882a593Smuzhiyun  * rsb's to consider.
425*4882a593Smuzhiyun  */
426*4882a593Smuzhiyun 
set_new_master(struct dlm_rsb * r)427*4882a593Smuzhiyun static void set_new_master(struct dlm_rsb *r)
428*4882a593Smuzhiyun {
429*4882a593Smuzhiyun 	set_master_lkbs(r);
430*4882a593Smuzhiyun 	rsb_set_flag(r, RSB_NEW_MASTER);
431*4882a593Smuzhiyun 	rsb_set_flag(r, RSB_NEW_MASTER2);
432*4882a593Smuzhiyun }
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun /*
435*4882a593Smuzhiyun  * We do async lookups on rsb's that need new masters.  The rsb's
436*4882a593Smuzhiyun  * waiting for a lookup reply are kept on the recover_list.
437*4882a593Smuzhiyun  *
438*4882a593Smuzhiyun  * Another node recovering the master may have sent us a rcom lookup,
439*4882a593Smuzhiyun  * and our dlm_master_lookup() set it as the new master, along with
440*4882a593Smuzhiyun  * NEW_MASTER so that we'll recover it here (this implies dir_nodeid
441*4882a593Smuzhiyun  * equals our_nodeid below).
442*4882a593Smuzhiyun  */
443*4882a593Smuzhiyun 
recover_master(struct dlm_rsb * r,unsigned int * count)444*4882a593Smuzhiyun static int recover_master(struct dlm_rsb *r, unsigned int *count)
445*4882a593Smuzhiyun {
446*4882a593Smuzhiyun 	struct dlm_ls *ls = r->res_ls;
447*4882a593Smuzhiyun 	int our_nodeid, dir_nodeid;
448*4882a593Smuzhiyun 	int is_removed = 0;
449*4882a593Smuzhiyun 	int error;
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun 	if (is_master(r))
452*4882a593Smuzhiyun 		return 0;
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun 	is_removed = dlm_is_removed(ls, r->res_nodeid);
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun 	if (!is_removed && !rsb_flag(r, RSB_NEW_MASTER))
457*4882a593Smuzhiyun 		return 0;
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun 	our_nodeid = dlm_our_nodeid();
460*4882a593Smuzhiyun 	dir_nodeid = dlm_dir_nodeid(r);
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun 	if (dir_nodeid == our_nodeid) {
463*4882a593Smuzhiyun 		if (is_removed) {
464*4882a593Smuzhiyun 			r->res_master_nodeid = our_nodeid;
465*4882a593Smuzhiyun 			r->res_nodeid = 0;
466*4882a593Smuzhiyun 		}
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun 		/* set master of lkbs to ourself when is_removed, or to
469*4882a593Smuzhiyun 		   another new master which we set along with NEW_MASTER
470*4882a593Smuzhiyun 		   in dlm_master_lookup */
471*4882a593Smuzhiyun 		set_new_master(r);
472*4882a593Smuzhiyun 		error = 0;
473*4882a593Smuzhiyun 	} else {
474*4882a593Smuzhiyun 		recover_idr_add(r);
475*4882a593Smuzhiyun 		error = dlm_send_rcom_lookup(r, dir_nodeid);
476*4882a593Smuzhiyun 	}
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun 	(*count)++;
479*4882a593Smuzhiyun 	return error;
480*4882a593Smuzhiyun }
481*4882a593Smuzhiyun 
482*4882a593Smuzhiyun /*
483*4882a593Smuzhiyun  * All MSTCPY locks are purged and rebuilt, even if the master stayed the same.
484*4882a593Smuzhiyun  * This is necessary because recovery can be started, aborted and restarted,
485*4882a593Smuzhiyun  * causing the master nodeid to briefly change during the aborted recovery, and
486*4882a593Smuzhiyun  * change back to the original value in the second recovery.  The MSTCPY locks
487*4882a593Smuzhiyun  * may or may not have been purged during the aborted recovery.  Another node
488*4882a593Smuzhiyun  * with an outstanding request in waiters list and a request reply saved in the
489*4882a593Smuzhiyun  * requestqueue, cannot know whether it should ignore the reply and resend the
490*4882a593Smuzhiyun  * request, or accept the reply and complete the request.  It must do the
491*4882a593Smuzhiyun  * former if the remote node purged MSTCPY locks, and it must do the later if
492*4882a593Smuzhiyun  * the remote node did not.  This is solved by always purging MSTCPY locks, in
493*4882a593Smuzhiyun  * which case, the request reply would always be ignored and the request
494*4882a593Smuzhiyun  * resent.
495*4882a593Smuzhiyun  */
496*4882a593Smuzhiyun 
recover_master_static(struct dlm_rsb * r,unsigned int * count)497*4882a593Smuzhiyun static int recover_master_static(struct dlm_rsb *r, unsigned int *count)
498*4882a593Smuzhiyun {
499*4882a593Smuzhiyun 	int dir_nodeid = dlm_dir_nodeid(r);
500*4882a593Smuzhiyun 	int new_master = dir_nodeid;
501*4882a593Smuzhiyun 
502*4882a593Smuzhiyun 	if (dir_nodeid == dlm_our_nodeid())
503*4882a593Smuzhiyun 		new_master = 0;
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun 	dlm_purge_mstcpy_locks(r);
506*4882a593Smuzhiyun 	r->res_master_nodeid = dir_nodeid;
507*4882a593Smuzhiyun 	r->res_nodeid = new_master;
508*4882a593Smuzhiyun 	set_new_master(r);
509*4882a593Smuzhiyun 	(*count)++;
510*4882a593Smuzhiyun 	return 0;
511*4882a593Smuzhiyun }
512*4882a593Smuzhiyun 
513*4882a593Smuzhiyun /*
514*4882a593Smuzhiyun  * Go through local root resources and for each rsb which has a master which
515*4882a593Smuzhiyun  * has departed, get the new master nodeid from the directory.  The dir will
516*4882a593Smuzhiyun  * assign mastery to the first node to look up the new master.  That means
517*4882a593Smuzhiyun  * we'll discover in this lookup if we're the new master of any rsb's.
518*4882a593Smuzhiyun  *
519*4882a593Smuzhiyun  * We fire off all the dir lookup requests individually and asynchronously to
520*4882a593Smuzhiyun  * the correct dir node.
521*4882a593Smuzhiyun  */
522*4882a593Smuzhiyun 
dlm_recover_masters(struct dlm_ls * ls)523*4882a593Smuzhiyun int dlm_recover_masters(struct dlm_ls *ls)
524*4882a593Smuzhiyun {
525*4882a593Smuzhiyun 	struct dlm_rsb *r;
526*4882a593Smuzhiyun 	unsigned int total = 0;
527*4882a593Smuzhiyun 	unsigned int count = 0;
528*4882a593Smuzhiyun 	int nodir = dlm_no_directory(ls);
529*4882a593Smuzhiyun 	int error;
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun 	log_rinfo(ls, "dlm_recover_masters");
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun 	down_read(&ls->ls_root_sem);
534*4882a593Smuzhiyun 	list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
535*4882a593Smuzhiyun 		if (dlm_recovery_stopped(ls)) {
536*4882a593Smuzhiyun 			up_read(&ls->ls_root_sem);
537*4882a593Smuzhiyun 			error = -EINTR;
538*4882a593Smuzhiyun 			goto out;
539*4882a593Smuzhiyun 		}
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun 		lock_rsb(r);
542*4882a593Smuzhiyun 		if (nodir)
543*4882a593Smuzhiyun 			error = recover_master_static(r, &count);
544*4882a593Smuzhiyun 		else
545*4882a593Smuzhiyun 			error = recover_master(r, &count);
546*4882a593Smuzhiyun 		unlock_rsb(r);
547*4882a593Smuzhiyun 		cond_resched();
548*4882a593Smuzhiyun 		total++;
549*4882a593Smuzhiyun 
550*4882a593Smuzhiyun 		if (error) {
551*4882a593Smuzhiyun 			up_read(&ls->ls_root_sem);
552*4882a593Smuzhiyun 			goto out;
553*4882a593Smuzhiyun 		}
554*4882a593Smuzhiyun 	}
555*4882a593Smuzhiyun 	up_read(&ls->ls_root_sem);
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun 	log_rinfo(ls, "dlm_recover_masters %u of %u", count, total);
558*4882a593Smuzhiyun 
559*4882a593Smuzhiyun 	error = dlm_wait_function(ls, &recover_idr_empty);
560*4882a593Smuzhiyun  out:
561*4882a593Smuzhiyun 	if (error)
562*4882a593Smuzhiyun 		recover_idr_clear(ls);
563*4882a593Smuzhiyun 	return error;
564*4882a593Smuzhiyun }
565*4882a593Smuzhiyun 
dlm_recover_master_reply(struct dlm_ls * ls,struct dlm_rcom * rc)566*4882a593Smuzhiyun int dlm_recover_master_reply(struct dlm_ls *ls, struct dlm_rcom *rc)
567*4882a593Smuzhiyun {
568*4882a593Smuzhiyun 	struct dlm_rsb *r;
569*4882a593Smuzhiyun 	int ret_nodeid, new_master;
570*4882a593Smuzhiyun 
571*4882a593Smuzhiyun 	r = recover_idr_find(ls, rc->rc_id);
572*4882a593Smuzhiyun 	if (!r) {
573*4882a593Smuzhiyun 		log_error(ls, "dlm_recover_master_reply no id %llx",
574*4882a593Smuzhiyun 			  (unsigned long long)rc->rc_id);
575*4882a593Smuzhiyun 		goto out;
576*4882a593Smuzhiyun 	}
577*4882a593Smuzhiyun 
578*4882a593Smuzhiyun 	ret_nodeid = rc->rc_result;
579*4882a593Smuzhiyun 
580*4882a593Smuzhiyun 	if (ret_nodeid == dlm_our_nodeid())
581*4882a593Smuzhiyun 		new_master = 0;
582*4882a593Smuzhiyun 	else
583*4882a593Smuzhiyun 		new_master = ret_nodeid;
584*4882a593Smuzhiyun 
585*4882a593Smuzhiyun 	lock_rsb(r);
586*4882a593Smuzhiyun 	r->res_master_nodeid = ret_nodeid;
587*4882a593Smuzhiyun 	r->res_nodeid = new_master;
588*4882a593Smuzhiyun 	set_new_master(r);
589*4882a593Smuzhiyun 	unlock_rsb(r);
590*4882a593Smuzhiyun 	recover_idr_del(r);
591*4882a593Smuzhiyun 
592*4882a593Smuzhiyun 	if (recover_idr_empty(ls))
593*4882a593Smuzhiyun 		wake_up(&ls->ls_wait_general);
594*4882a593Smuzhiyun  out:
595*4882a593Smuzhiyun 	return 0;
596*4882a593Smuzhiyun }
597*4882a593Smuzhiyun 
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun /* Lock recovery: rebuild the process-copy locks we hold on a
600*4882a593Smuzhiyun    remastered rsb on the new rsb master.
601*4882a593Smuzhiyun 
602*4882a593Smuzhiyun    dlm_recover_locks
603*4882a593Smuzhiyun    recover_locks
604*4882a593Smuzhiyun    recover_locks_queue
605*4882a593Smuzhiyun    dlm_send_rcom_lock              ->  receive_rcom_lock
606*4882a593Smuzhiyun                                        dlm_recover_master_copy
607*4882a593Smuzhiyun    receive_rcom_lock_reply         <-
608*4882a593Smuzhiyun    dlm_recover_process_copy
609*4882a593Smuzhiyun */
610*4882a593Smuzhiyun 
611*4882a593Smuzhiyun 
612*4882a593Smuzhiyun /*
613*4882a593Smuzhiyun  * keep a count of the number of lkb's we send to the new master; when we get
614*4882a593Smuzhiyun  * an equal number of replies then recovery for the rsb is done
615*4882a593Smuzhiyun  */
616*4882a593Smuzhiyun 
recover_locks_queue(struct dlm_rsb * r,struct list_head * head)617*4882a593Smuzhiyun static int recover_locks_queue(struct dlm_rsb *r, struct list_head *head)
618*4882a593Smuzhiyun {
619*4882a593Smuzhiyun 	struct dlm_lkb *lkb;
620*4882a593Smuzhiyun 	int error = 0;
621*4882a593Smuzhiyun 
622*4882a593Smuzhiyun 	list_for_each_entry(lkb, head, lkb_statequeue) {
623*4882a593Smuzhiyun 	   	error = dlm_send_rcom_lock(r, lkb);
624*4882a593Smuzhiyun 		if (error)
625*4882a593Smuzhiyun 			break;
626*4882a593Smuzhiyun 		r->res_recover_locks_count++;
627*4882a593Smuzhiyun 	}
628*4882a593Smuzhiyun 
629*4882a593Smuzhiyun 	return error;
630*4882a593Smuzhiyun }
631*4882a593Smuzhiyun 
recover_locks(struct dlm_rsb * r)632*4882a593Smuzhiyun static int recover_locks(struct dlm_rsb *r)
633*4882a593Smuzhiyun {
634*4882a593Smuzhiyun 	int error = 0;
635*4882a593Smuzhiyun 
636*4882a593Smuzhiyun 	lock_rsb(r);
637*4882a593Smuzhiyun 
638*4882a593Smuzhiyun 	DLM_ASSERT(!r->res_recover_locks_count, dlm_dump_rsb(r););
639*4882a593Smuzhiyun 
640*4882a593Smuzhiyun 	error = recover_locks_queue(r, &r->res_grantqueue);
641*4882a593Smuzhiyun 	if (error)
642*4882a593Smuzhiyun 		goto out;
643*4882a593Smuzhiyun 	error = recover_locks_queue(r, &r->res_convertqueue);
644*4882a593Smuzhiyun 	if (error)
645*4882a593Smuzhiyun 		goto out;
646*4882a593Smuzhiyun 	error = recover_locks_queue(r, &r->res_waitqueue);
647*4882a593Smuzhiyun 	if (error)
648*4882a593Smuzhiyun 		goto out;
649*4882a593Smuzhiyun 
650*4882a593Smuzhiyun 	if (r->res_recover_locks_count)
651*4882a593Smuzhiyun 		recover_list_add(r);
652*4882a593Smuzhiyun 	else
653*4882a593Smuzhiyun 		rsb_clear_flag(r, RSB_NEW_MASTER);
654*4882a593Smuzhiyun  out:
655*4882a593Smuzhiyun 	unlock_rsb(r);
656*4882a593Smuzhiyun 	return error;
657*4882a593Smuzhiyun }
658*4882a593Smuzhiyun 
dlm_recover_locks(struct dlm_ls * ls)659*4882a593Smuzhiyun int dlm_recover_locks(struct dlm_ls *ls)
660*4882a593Smuzhiyun {
661*4882a593Smuzhiyun 	struct dlm_rsb *r;
662*4882a593Smuzhiyun 	int error, count = 0;
663*4882a593Smuzhiyun 
664*4882a593Smuzhiyun 	down_read(&ls->ls_root_sem);
665*4882a593Smuzhiyun 	list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
666*4882a593Smuzhiyun 		if (is_master(r)) {
667*4882a593Smuzhiyun 			rsb_clear_flag(r, RSB_NEW_MASTER);
668*4882a593Smuzhiyun 			continue;
669*4882a593Smuzhiyun 		}
670*4882a593Smuzhiyun 
671*4882a593Smuzhiyun 		if (!rsb_flag(r, RSB_NEW_MASTER))
672*4882a593Smuzhiyun 			continue;
673*4882a593Smuzhiyun 
674*4882a593Smuzhiyun 		if (dlm_recovery_stopped(ls)) {
675*4882a593Smuzhiyun 			error = -EINTR;
676*4882a593Smuzhiyun 			up_read(&ls->ls_root_sem);
677*4882a593Smuzhiyun 			goto out;
678*4882a593Smuzhiyun 		}
679*4882a593Smuzhiyun 
680*4882a593Smuzhiyun 		error = recover_locks(r);
681*4882a593Smuzhiyun 		if (error) {
682*4882a593Smuzhiyun 			up_read(&ls->ls_root_sem);
683*4882a593Smuzhiyun 			goto out;
684*4882a593Smuzhiyun 		}
685*4882a593Smuzhiyun 
686*4882a593Smuzhiyun 		count += r->res_recover_locks_count;
687*4882a593Smuzhiyun 	}
688*4882a593Smuzhiyun 	up_read(&ls->ls_root_sem);
689*4882a593Smuzhiyun 
690*4882a593Smuzhiyun 	log_rinfo(ls, "dlm_recover_locks %d out", count);
691*4882a593Smuzhiyun 
692*4882a593Smuzhiyun 	error = dlm_wait_function(ls, &recover_list_empty);
693*4882a593Smuzhiyun  out:
694*4882a593Smuzhiyun 	if (error)
695*4882a593Smuzhiyun 		recover_list_clear(ls);
696*4882a593Smuzhiyun 	return error;
697*4882a593Smuzhiyun }
698*4882a593Smuzhiyun 
dlm_recovered_lock(struct dlm_rsb * r)699*4882a593Smuzhiyun void dlm_recovered_lock(struct dlm_rsb *r)
700*4882a593Smuzhiyun {
701*4882a593Smuzhiyun 	DLM_ASSERT(rsb_flag(r, RSB_NEW_MASTER), dlm_dump_rsb(r););
702*4882a593Smuzhiyun 
703*4882a593Smuzhiyun 	r->res_recover_locks_count--;
704*4882a593Smuzhiyun 	if (!r->res_recover_locks_count) {
705*4882a593Smuzhiyun 		rsb_clear_flag(r, RSB_NEW_MASTER);
706*4882a593Smuzhiyun 		recover_list_del(r);
707*4882a593Smuzhiyun 	}
708*4882a593Smuzhiyun 
709*4882a593Smuzhiyun 	if (recover_list_empty(r->res_ls))
710*4882a593Smuzhiyun 		wake_up(&r->res_ls->ls_wait_general);
711*4882a593Smuzhiyun }
712*4882a593Smuzhiyun 
713*4882a593Smuzhiyun /*
714*4882a593Smuzhiyun  * The lvb needs to be recovered on all master rsb's.  This includes setting
715*4882a593Smuzhiyun  * the VALNOTVALID flag if necessary, and determining the correct lvb contents
716*4882a593Smuzhiyun  * based on the lvb's of the locks held on the rsb.
717*4882a593Smuzhiyun  *
718*4882a593Smuzhiyun  * RSB_VALNOTVALID is set in two cases:
719*4882a593Smuzhiyun  *
720*4882a593Smuzhiyun  * 1. we are master, but not new, and we purged an EX/PW lock held by a
721*4882a593Smuzhiyun  * failed node (in dlm_recover_purge which set RSB_RECOVER_LVB_INVAL)
722*4882a593Smuzhiyun  *
723*4882a593Smuzhiyun  * 2. we are a new master, and there are only NL/CR locks left.
724*4882a593Smuzhiyun  * (We could probably improve this by only invaliding in this way when
725*4882a593Smuzhiyun  * the previous master left uncleanly.  VMS docs mention that.)
726*4882a593Smuzhiyun  *
727*4882a593Smuzhiyun  * The LVB contents are only considered for changing when this is a new master
728*4882a593Smuzhiyun  * of the rsb (NEW_MASTER2).  Then, the rsb's lvb is taken from any lkb with
729*4882a593Smuzhiyun  * mode > CR.  If no lkb's exist with mode above CR, the lvb contents are taken
730*4882a593Smuzhiyun  * from the lkb with the largest lvb sequence number.
731*4882a593Smuzhiyun  */
732*4882a593Smuzhiyun 
recover_lvb(struct dlm_rsb * r)733*4882a593Smuzhiyun static void recover_lvb(struct dlm_rsb *r)
734*4882a593Smuzhiyun {
735*4882a593Smuzhiyun 	struct dlm_lkb *lkb, *high_lkb = NULL;
736*4882a593Smuzhiyun 	uint32_t high_seq = 0;
737*4882a593Smuzhiyun 	int lock_lvb_exists = 0;
738*4882a593Smuzhiyun 	int big_lock_exists = 0;
739*4882a593Smuzhiyun 	int lvblen = r->res_ls->ls_lvblen;
740*4882a593Smuzhiyun 
741*4882a593Smuzhiyun 	if (!rsb_flag(r, RSB_NEW_MASTER2) &&
742*4882a593Smuzhiyun 	    rsb_flag(r, RSB_RECOVER_LVB_INVAL)) {
743*4882a593Smuzhiyun 		/* case 1 above */
744*4882a593Smuzhiyun 		rsb_set_flag(r, RSB_VALNOTVALID);
745*4882a593Smuzhiyun 		return;
746*4882a593Smuzhiyun 	}
747*4882a593Smuzhiyun 
748*4882a593Smuzhiyun 	if (!rsb_flag(r, RSB_NEW_MASTER2))
749*4882a593Smuzhiyun 		return;
750*4882a593Smuzhiyun 
751*4882a593Smuzhiyun 	/* we are the new master, so figure out if VALNOTVALID should
752*4882a593Smuzhiyun 	   be set, and set the rsb lvb from the best lkb available. */
753*4882a593Smuzhiyun 
754*4882a593Smuzhiyun 	list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) {
755*4882a593Smuzhiyun 		if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
756*4882a593Smuzhiyun 			continue;
757*4882a593Smuzhiyun 
758*4882a593Smuzhiyun 		lock_lvb_exists = 1;
759*4882a593Smuzhiyun 
760*4882a593Smuzhiyun 		if (lkb->lkb_grmode > DLM_LOCK_CR) {
761*4882a593Smuzhiyun 			big_lock_exists = 1;
762*4882a593Smuzhiyun 			goto setflag;
763*4882a593Smuzhiyun 		}
764*4882a593Smuzhiyun 
765*4882a593Smuzhiyun 		if (((int)lkb->lkb_lvbseq - (int)high_seq) >= 0) {
766*4882a593Smuzhiyun 			high_lkb = lkb;
767*4882a593Smuzhiyun 			high_seq = lkb->lkb_lvbseq;
768*4882a593Smuzhiyun 		}
769*4882a593Smuzhiyun 	}
770*4882a593Smuzhiyun 
771*4882a593Smuzhiyun 	list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue) {
772*4882a593Smuzhiyun 		if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
773*4882a593Smuzhiyun 			continue;
774*4882a593Smuzhiyun 
775*4882a593Smuzhiyun 		lock_lvb_exists = 1;
776*4882a593Smuzhiyun 
777*4882a593Smuzhiyun 		if (lkb->lkb_grmode > DLM_LOCK_CR) {
778*4882a593Smuzhiyun 			big_lock_exists = 1;
779*4882a593Smuzhiyun 			goto setflag;
780*4882a593Smuzhiyun 		}
781*4882a593Smuzhiyun 
782*4882a593Smuzhiyun 		if (((int)lkb->lkb_lvbseq - (int)high_seq) >= 0) {
783*4882a593Smuzhiyun 			high_lkb = lkb;
784*4882a593Smuzhiyun 			high_seq = lkb->lkb_lvbseq;
785*4882a593Smuzhiyun 		}
786*4882a593Smuzhiyun 	}
787*4882a593Smuzhiyun 
788*4882a593Smuzhiyun  setflag:
789*4882a593Smuzhiyun 	if (!lock_lvb_exists)
790*4882a593Smuzhiyun 		goto out;
791*4882a593Smuzhiyun 
792*4882a593Smuzhiyun 	/* lvb is invalidated if only NL/CR locks remain */
793*4882a593Smuzhiyun 	if (!big_lock_exists)
794*4882a593Smuzhiyun 		rsb_set_flag(r, RSB_VALNOTVALID);
795*4882a593Smuzhiyun 
796*4882a593Smuzhiyun 	if (!r->res_lvbptr) {
797*4882a593Smuzhiyun 		r->res_lvbptr = dlm_allocate_lvb(r->res_ls);
798*4882a593Smuzhiyun 		if (!r->res_lvbptr)
799*4882a593Smuzhiyun 			goto out;
800*4882a593Smuzhiyun 	}
801*4882a593Smuzhiyun 
802*4882a593Smuzhiyun 	if (big_lock_exists) {
803*4882a593Smuzhiyun 		r->res_lvbseq = lkb->lkb_lvbseq;
804*4882a593Smuzhiyun 		memcpy(r->res_lvbptr, lkb->lkb_lvbptr, lvblen);
805*4882a593Smuzhiyun 	} else if (high_lkb) {
806*4882a593Smuzhiyun 		r->res_lvbseq = high_lkb->lkb_lvbseq;
807*4882a593Smuzhiyun 		memcpy(r->res_lvbptr, high_lkb->lkb_lvbptr, lvblen);
808*4882a593Smuzhiyun 	} else {
809*4882a593Smuzhiyun 		r->res_lvbseq = 0;
810*4882a593Smuzhiyun 		memset(r->res_lvbptr, 0, lvblen);
811*4882a593Smuzhiyun 	}
812*4882a593Smuzhiyun  out:
813*4882a593Smuzhiyun 	return;
814*4882a593Smuzhiyun }
815*4882a593Smuzhiyun 
816*4882a593Smuzhiyun /* All master rsb's flagged RECOVER_CONVERT need to be looked at.  The locks
817*4882a593Smuzhiyun    converting PR->CW or CW->PR need to have their lkb_grmode set. */
818*4882a593Smuzhiyun 
recover_conversion(struct dlm_rsb * r)819*4882a593Smuzhiyun static void recover_conversion(struct dlm_rsb *r)
820*4882a593Smuzhiyun {
821*4882a593Smuzhiyun 	struct dlm_ls *ls = r->res_ls;
822*4882a593Smuzhiyun 	struct dlm_lkb *lkb;
823*4882a593Smuzhiyun 	int grmode = -1;
824*4882a593Smuzhiyun 
825*4882a593Smuzhiyun 	list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) {
826*4882a593Smuzhiyun 		if (lkb->lkb_grmode == DLM_LOCK_PR ||
827*4882a593Smuzhiyun 		    lkb->lkb_grmode == DLM_LOCK_CW) {
828*4882a593Smuzhiyun 			grmode = lkb->lkb_grmode;
829*4882a593Smuzhiyun 			break;
830*4882a593Smuzhiyun 		}
831*4882a593Smuzhiyun 	}
832*4882a593Smuzhiyun 
833*4882a593Smuzhiyun 	list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue) {
834*4882a593Smuzhiyun 		if (lkb->lkb_grmode != DLM_LOCK_IV)
835*4882a593Smuzhiyun 			continue;
836*4882a593Smuzhiyun 		if (grmode == -1) {
837*4882a593Smuzhiyun 			log_debug(ls, "recover_conversion %x set gr to rq %d",
838*4882a593Smuzhiyun 				  lkb->lkb_id, lkb->lkb_rqmode);
839*4882a593Smuzhiyun 			lkb->lkb_grmode = lkb->lkb_rqmode;
840*4882a593Smuzhiyun 		} else {
841*4882a593Smuzhiyun 			log_debug(ls, "recover_conversion %x set gr %d",
842*4882a593Smuzhiyun 				  lkb->lkb_id, grmode);
843*4882a593Smuzhiyun 			lkb->lkb_grmode = grmode;
844*4882a593Smuzhiyun 		}
845*4882a593Smuzhiyun 	}
846*4882a593Smuzhiyun }
847*4882a593Smuzhiyun 
848*4882a593Smuzhiyun /* We've become the new master for this rsb and waiting/converting locks may
849*4882a593Smuzhiyun    need to be granted in dlm_recover_grant() due to locks that may have
850*4882a593Smuzhiyun    existed from a removed node. */
851*4882a593Smuzhiyun 
recover_grant(struct dlm_rsb * r)852*4882a593Smuzhiyun static void recover_grant(struct dlm_rsb *r)
853*4882a593Smuzhiyun {
854*4882a593Smuzhiyun 	if (!list_empty(&r->res_waitqueue) || !list_empty(&r->res_convertqueue))
855*4882a593Smuzhiyun 		rsb_set_flag(r, RSB_RECOVER_GRANT);
856*4882a593Smuzhiyun }
857*4882a593Smuzhiyun 
dlm_recover_rsbs(struct dlm_ls * ls)858*4882a593Smuzhiyun void dlm_recover_rsbs(struct dlm_ls *ls)
859*4882a593Smuzhiyun {
860*4882a593Smuzhiyun 	struct dlm_rsb *r;
861*4882a593Smuzhiyun 	unsigned int count = 0;
862*4882a593Smuzhiyun 
863*4882a593Smuzhiyun 	down_read(&ls->ls_root_sem);
864*4882a593Smuzhiyun 	list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
865*4882a593Smuzhiyun 		lock_rsb(r);
866*4882a593Smuzhiyun 		if (is_master(r)) {
867*4882a593Smuzhiyun 			if (rsb_flag(r, RSB_RECOVER_CONVERT))
868*4882a593Smuzhiyun 				recover_conversion(r);
869*4882a593Smuzhiyun 
870*4882a593Smuzhiyun 			/* recover lvb before granting locks so the updated
871*4882a593Smuzhiyun 			   lvb/VALNOTVALID is presented in the completion */
872*4882a593Smuzhiyun 			recover_lvb(r);
873*4882a593Smuzhiyun 
874*4882a593Smuzhiyun 			if (rsb_flag(r, RSB_NEW_MASTER2))
875*4882a593Smuzhiyun 				recover_grant(r);
876*4882a593Smuzhiyun 			count++;
877*4882a593Smuzhiyun 		} else {
878*4882a593Smuzhiyun 			rsb_clear_flag(r, RSB_VALNOTVALID);
879*4882a593Smuzhiyun 		}
880*4882a593Smuzhiyun 		rsb_clear_flag(r, RSB_RECOVER_CONVERT);
881*4882a593Smuzhiyun 		rsb_clear_flag(r, RSB_RECOVER_LVB_INVAL);
882*4882a593Smuzhiyun 		rsb_clear_flag(r, RSB_NEW_MASTER2);
883*4882a593Smuzhiyun 		unlock_rsb(r);
884*4882a593Smuzhiyun 	}
885*4882a593Smuzhiyun 	up_read(&ls->ls_root_sem);
886*4882a593Smuzhiyun 
887*4882a593Smuzhiyun 	if (count)
888*4882a593Smuzhiyun 		log_rinfo(ls, "dlm_recover_rsbs %d done", count);
889*4882a593Smuzhiyun }
890*4882a593Smuzhiyun 
891*4882a593Smuzhiyun /* Create a single list of all root rsb's to be used during recovery */
892*4882a593Smuzhiyun 
dlm_create_root_list(struct dlm_ls * ls)893*4882a593Smuzhiyun int dlm_create_root_list(struct dlm_ls *ls)
894*4882a593Smuzhiyun {
895*4882a593Smuzhiyun 	struct rb_node *n;
896*4882a593Smuzhiyun 	struct dlm_rsb *r;
897*4882a593Smuzhiyun 	int i, error = 0;
898*4882a593Smuzhiyun 
899*4882a593Smuzhiyun 	down_write(&ls->ls_root_sem);
900*4882a593Smuzhiyun 	if (!list_empty(&ls->ls_root_list)) {
901*4882a593Smuzhiyun 		log_error(ls, "root list not empty");
902*4882a593Smuzhiyun 		error = -EINVAL;
903*4882a593Smuzhiyun 		goto out;
904*4882a593Smuzhiyun 	}
905*4882a593Smuzhiyun 
906*4882a593Smuzhiyun 	for (i = 0; i < ls->ls_rsbtbl_size; i++) {
907*4882a593Smuzhiyun 		spin_lock(&ls->ls_rsbtbl[i].lock);
908*4882a593Smuzhiyun 		for (n = rb_first(&ls->ls_rsbtbl[i].keep); n; n = rb_next(n)) {
909*4882a593Smuzhiyun 			r = rb_entry(n, struct dlm_rsb, res_hashnode);
910*4882a593Smuzhiyun 			list_add(&r->res_root_list, &ls->ls_root_list);
911*4882a593Smuzhiyun 			dlm_hold_rsb(r);
912*4882a593Smuzhiyun 		}
913*4882a593Smuzhiyun 
914*4882a593Smuzhiyun 		if (!RB_EMPTY_ROOT(&ls->ls_rsbtbl[i].toss))
915*4882a593Smuzhiyun 			log_error(ls, "dlm_create_root_list toss not empty");
916*4882a593Smuzhiyun 		spin_unlock(&ls->ls_rsbtbl[i].lock);
917*4882a593Smuzhiyun 	}
918*4882a593Smuzhiyun  out:
919*4882a593Smuzhiyun 	up_write(&ls->ls_root_sem);
920*4882a593Smuzhiyun 	return error;
921*4882a593Smuzhiyun }
922*4882a593Smuzhiyun 
dlm_release_root_list(struct dlm_ls * ls)923*4882a593Smuzhiyun void dlm_release_root_list(struct dlm_ls *ls)
924*4882a593Smuzhiyun {
925*4882a593Smuzhiyun 	struct dlm_rsb *r, *safe;
926*4882a593Smuzhiyun 
927*4882a593Smuzhiyun 	down_write(&ls->ls_root_sem);
928*4882a593Smuzhiyun 	list_for_each_entry_safe(r, safe, &ls->ls_root_list, res_root_list) {
929*4882a593Smuzhiyun 		list_del_init(&r->res_root_list);
930*4882a593Smuzhiyun 		dlm_put_rsb(r);
931*4882a593Smuzhiyun 	}
932*4882a593Smuzhiyun 	up_write(&ls->ls_root_sem);
933*4882a593Smuzhiyun }
934*4882a593Smuzhiyun 
dlm_clear_toss(struct dlm_ls * ls)935*4882a593Smuzhiyun void dlm_clear_toss(struct dlm_ls *ls)
936*4882a593Smuzhiyun {
937*4882a593Smuzhiyun 	struct rb_node *n, *next;
938*4882a593Smuzhiyun 	struct dlm_rsb *r;
939*4882a593Smuzhiyun 	unsigned int count = 0;
940*4882a593Smuzhiyun 	int i;
941*4882a593Smuzhiyun 
942*4882a593Smuzhiyun 	for (i = 0; i < ls->ls_rsbtbl_size; i++) {
943*4882a593Smuzhiyun 		spin_lock(&ls->ls_rsbtbl[i].lock);
944*4882a593Smuzhiyun 		for (n = rb_first(&ls->ls_rsbtbl[i].toss); n; n = next) {
945*4882a593Smuzhiyun 			next = rb_next(n);
946*4882a593Smuzhiyun 			r = rb_entry(n, struct dlm_rsb, res_hashnode);
947*4882a593Smuzhiyun 			rb_erase(n, &ls->ls_rsbtbl[i].toss);
948*4882a593Smuzhiyun 			dlm_free_rsb(r);
949*4882a593Smuzhiyun 			count++;
950*4882a593Smuzhiyun 		}
951*4882a593Smuzhiyun 		spin_unlock(&ls->ls_rsbtbl[i].lock);
952*4882a593Smuzhiyun 	}
953*4882a593Smuzhiyun 
954*4882a593Smuzhiyun 	if (count)
955*4882a593Smuzhiyun 		log_rinfo(ls, "dlm_clear_toss %u done", count);
956*4882a593Smuzhiyun }
957*4882a593Smuzhiyun 
958