xref: /OK3568_Linux_fs/kernel/fs/gfs2/lock_dlm.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
4*4882a593Smuzhiyun  * Copyright 2004-2011 Red Hat, Inc.
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/fs.h>
10*4882a593Smuzhiyun #include <linux/dlm.h>
11*4882a593Smuzhiyun #include <linux/slab.h>
12*4882a593Smuzhiyun #include <linux/types.h>
13*4882a593Smuzhiyun #include <linux/delay.h>
14*4882a593Smuzhiyun #include <linux/gfs2_ondisk.h>
15*4882a593Smuzhiyun #include <linux/sched/signal.h>
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #include "incore.h"
18*4882a593Smuzhiyun #include "glock.h"
19*4882a593Smuzhiyun #include "glops.h"
20*4882a593Smuzhiyun #include "recovery.h"
21*4882a593Smuzhiyun #include "util.h"
22*4882a593Smuzhiyun #include "sys.h"
23*4882a593Smuzhiyun #include "trace_gfs2.h"
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun /**
26*4882a593Smuzhiyun  * gfs2_update_stats - Update time based stats
27*4882a593Smuzhiyun  * @mv: Pointer to mean/variance structure to update
28*4882a593Smuzhiyun  * @sample: New data to include
29*4882a593Smuzhiyun  *
30*4882a593Smuzhiyun  * @delta is the difference between the current rtt sample and the
31*4882a593Smuzhiyun  * running average srtt. We add 1/8 of that to the srtt in order to
32*4882a593Smuzhiyun  * update the current srtt estimate. The variance estimate is a bit
33*4882a593Smuzhiyun  * more complicated. We subtract the current variance estimate from
34*4882a593Smuzhiyun  * the abs value of the @delta and add 1/4 of that to the running
35*4882a593Smuzhiyun  * total.  That's equivalent to 3/4 of the current variance
36*4882a593Smuzhiyun  * estimate plus 1/4 of the abs of @delta.
37*4882a593Smuzhiyun  *
38*4882a593Smuzhiyun  * Note that the index points at the array entry containing the smoothed
39*4882a593Smuzhiyun  * mean value, and the variance is always in the following entry
40*4882a593Smuzhiyun  *
41*4882a593Smuzhiyun  * Reference: TCP/IP Illustrated, vol 2, p. 831,832
42*4882a593Smuzhiyun  * All times are in units of integer nanoseconds. Unlike the TCP/IP case,
43*4882a593Smuzhiyun  * they are not scaled fixed point.
44*4882a593Smuzhiyun  */
45*4882a593Smuzhiyun 
gfs2_update_stats(struct gfs2_lkstats * s,unsigned index,s64 sample)46*4882a593Smuzhiyun static inline void gfs2_update_stats(struct gfs2_lkstats *s, unsigned index,
47*4882a593Smuzhiyun 				     s64 sample)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun 	s64 delta = sample - s->stats[index];
50*4882a593Smuzhiyun 	s->stats[index] += (delta >> 3);
51*4882a593Smuzhiyun 	index++;
52*4882a593Smuzhiyun 	s->stats[index] += (s64)(abs(delta) - s->stats[index]) >> 2;
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun /**
56*4882a593Smuzhiyun  * gfs2_update_reply_times - Update locking statistics
57*4882a593Smuzhiyun  * @gl: The glock to update
58*4882a593Smuzhiyun  *
59*4882a593Smuzhiyun  * This assumes that gl->gl_dstamp has been set earlier.
60*4882a593Smuzhiyun  *
61*4882a593Smuzhiyun  * The rtt (lock round trip time) is an estimate of the time
62*4882a593Smuzhiyun  * taken to perform a dlm lock request. We update it on each
63*4882a593Smuzhiyun  * reply from the dlm.
64*4882a593Smuzhiyun  *
65*4882a593Smuzhiyun  * The blocking flag is set on the glock for all dlm requests
66*4882a593Smuzhiyun  * which may potentially block due to lock requests from other nodes.
67*4882a593Smuzhiyun  * DLM requests where the current lock state is exclusive, the
68*4882a593Smuzhiyun  * requested state is null (or unlocked) or where the TRY or
69*4882a593Smuzhiyun  * TRY_1CB flags are set are classified as non-blocking. All
70*4882a593Smuzhiyun  * other DLM requests are counted as (potentially) blocking.
71*4882a593Smuzhiyun  */
gfs2_update_reply_times(struct gfs2_glock * gl)72*4882a593Smuzhiyun static inline void gfs2_update_reply_times(struct gfs2_glock *gl)
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun 	struct gfs2_pcpu_lkstats *lks;
75*4882a593Smuzhiyun 	const unsigned gltype = gl->gl_name.ln_type;
76*4882a593Smuzhiyun 	unsigned index = test_bit(GLF_BLOCKING, &gl->gl_flags) ?
77*4882a593Smuzhiyun 			 GFS2_LKS_SRTTB : GFS2_LKS_SRTT;
78*4882a593Smuzhiyun 	s64 rtt;
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 	preempt_disable();
81*4882a593Smuzhiyun 	rtt = ktime_to_ns(ktime_sub(ktime_get_real(), gl->gl_dstamp));
82*4882a593Smuzhiyun 	lks = this_cpu_ptr(gl->gl_name.ln_sbd->sd_lkstats);
83*4882a593Smuzhiyun 	gfs2_update_stats(&gl->gl_stats, index, rtt);		/* Local */
84*4882a593Smuzhiyun 	gfs2_update_stats(&lks->lkstats[gltype], index, rtt);	/* Global */
85*4882a593Smuzhiyun 	preempt_enable();
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	trace_gfs2_glock_lock_time(gl, rtt);
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun /**
91*4882a593Smuzhiyun  * gfs2_update_request_times - Update locking statistics
92*4882a593Smuzhiyun  * @gl: The glock to update
93*4882a593Smuzhiyun  *
94*4882a593Smuzhiyun  * The irt (lock inter-request times) measures the average time
95*4882a593Smuzhiyun  * between requests to the dlm. It is updated immediately before
96*4882a593Smuzhiyun  * each dlm call.
97*4882a593Smuzhiyun  */
98*4882a593Smuzhiyun 
gfs2_update_request_times(struct gfs2_glock * gl)99*4882a593Smuzhiyun static inline void gfs2_update_request_times(struct gfs2_glock *gl)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun 	struct gfs2_pcpu_lkstats *lks;
102*4882a593Smuzhiyun 	const unsigned gltype = gl->gl_name.ln_type;
103*4882a593Smuzhiyun 	ktime_t dstamp;
104*4882a593Smuzhiyun 	s64 irt;
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	preempt_disable();
107*4882a593Smuzhiyun 	dstamp = gl->gl_dstamp;
108*4882a593Smuzhiyun 	gl->gl_dstamp = ktime_get_real();
109*4882a593Smuzhiyun 	irt = ktime_to_ns(ktime_sub(gl->gl_dstamp, dstamp));
110*4882a593Smuzhiyun 	lks = this_cpu_ptr(gl->gl_name.ln_sbd->sd_lkstats);
111*4882a593Smuzhiyun 	gfs2_update_stats(&gl->gl_stats, GFS2_LKS_SIRT, irt);		/* Local */
112*4882a593Smuzhiyun 	gfs2_update_stats(&lks->lkstats[gltype], GFS2_LKS_SIRT, irt);	/* Global */
113*4882a593Smuzhiyun 	preempt_enable();
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun 
gdlm_ast(void * arg)116*4882a593Smuzhiyun static void gdlm_ast(void *arg)
117*4882a593Smuzhiyun {
118*4882a593Smuzhiyun 	struct gfs2_glock *gl = arg;
119*4882a593Smuzhiyun 	unsigned ret = gl->gl_state;
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	gfs2_update_reply_times(gl);
122*4882a593Smuzhiyun 	BUG_ON(gl->gl_lksb.sb_flags & DLM_SBF_DEMOTED);
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	if ((gl->gl_lksb.sb_flags & DLM_SBF_VALNOTVALID) && gl->gl_lksb.sb_lvbptr)
125*4882a593Smuzhiyun 		memset(gl->gl_lksb.sb_lvbptr, 0, GDLM_LVB_SIZE);
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	switch (gl->gl_lksb.sb_status) {
128*4882a593Smuzhiyun 	case -DLM_EUNLOCK: /* Unlocked, so glock can be freed */
129*4882a593Smuzhiyun 		if (gl->gl_ops->go_free)
130*4882a593Smuzhiyun 			gl->gl_ops->go_free(gl);
131*4882a593Smuzhiyun 		gfs2_glock_free(gl);
132*4882a593Smuzhiyun 		return;
133*4882a593Smuzhiyun 	case -DLM_ECANCEL: /* Cancel while getting lock */
134*4882a593Smuzhiyun 		ret |= LM_OUT_CANCELED;
135*4882a593Smuzhiyun 		goto out;
136*4882a593Smuzhiyun 	case -EAGAIN: /* Try lock fails */
137*4882a593Smuzhiyun 	case -EDEADLK: /* Deadlock detected */
138*4882a593Smuzhiyun 		goto out;
139*4882a593Smuzhiyun 	case -ETIMEDOUT: /* Canceled due to timeout */
140*4882a593Smuzhiyun 		ret |= LM_OUT_ERROR;
141*4882a593Smuzhiyun 		goto out;
142*4882a593Smuzhiyun 	case 0: /* Success */
143*4882a593Smuzhiyun 		break;
144*4882a593Smuzhiyun 	default: /* Something unexpected */
145*4882a593Smuzhiyun 		BUG();
146*4882a593Smuzhiyun 	}
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	ret = gl->gl_req;
149*4882a593Smuzhiyun 	if (gl->gl_lksb.sb_flags & DLM_SBF_ALTMODE) {
150*4882a593Smuzhiyun 		if (gl->gl_req == LM_ST_SHARED)
151*4882a593Smuzhiyun 			ret = LM_ST_DEFERRED;
152*4882a593Smuzhiyun 		else if (gl->gl_req == LM_ST_DEFERRED)
153*4882a593Smuzhiyun 			ret = LM_ST_SHARED;
154*4882a593Smuzhiyun 		else
155*4882a593Smuzhiyun 			BUG();
156*4882a593Smuzhiyun 	}
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	set_bit(GLF_INITIAL, &gl->gl_flags);
159*4882a593Smuzhiyun 	gfs2_glock_complete(gl, ret);
160*4882a593Smuzhiyun 	return;
161*4882a593Smuzhiyun out:
162*4882a593Smuzhiyun 	if (!test_bit(GLF_INITIAL, &gl->gl_flags))
163*4882a593Smuzhiyun 		gl->gl_lksb.sb_lkid = 0;
164*4882a593Smuzhiyun 	gfs2_glock_complete(gl, ret);
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun 
gdlm_bast(void * arg,int mode)167*4882a593Smuzhiyun static void gdlm_bast(void *arg, int mode)
168*4882a593Smuzhiyun {
169*4882a593Smuzhiyun 	struct gfs2_glock *gl = arg;
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 	switch (mode) {
172*4882a593Smuzhiyun 	case DLM_LOCK_EX:
173*4882a593Smuzhiyun 		gfs2_glock_cb(gl, LM_ST_UNLOCKED);
174*4882a593Smuzhiyun 		break;
175*4882a593Smuzhiyun 	case DLM_LOCK_CW:
176*4882a593Smuzhiyun 		gfs2_glock_cb(gl, LM_ST_DEFERRED);
177*4882a593Smuzhiyun 		break;
178*4882a593Smuzhiyun 	case DLM_LOCK_PR:
179*4882a593Smuzhiyun 		gfs2_glock_cb(gl, LM_ST_SHARED);
180*4882a593Smuzhiyun 		break;
181*4882a593Smuzhiyun 	default:
182*4882a593Smuzhiyun 		fs_err(gl->gl_name.ln_sbd, "unknown bast mode %d\n", mode);
183*4882a593Smuzhiyun 		BUG();
184*4882a593Smuzhiyun 	}
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun /* convert gfs lock-state to dlm lock-mode */
188*4882a593Smuzhiyun 
make_mode(struct gfs2_sbd * sdp,const unsigned int lmstate)189*4882a593Smuzhiyun static int make_mode(struct gfs2_sbd *sdp, const unsigned int lmstate)
190*4882a593Smuzhiyun {
191*4882a593Smuzhiyun 	switch (lmstate) {
192*4882a593Smuzhiyun 	case LM_ST_UNLOCKED:
193*4882a593Smuzhiyun 		return DLM_LOCK_NL;
194*4882a593Smuzhiyun 	case LM_ST_EXCLUSIVE:
195*4882a593Smuzhiyun 		return DLM_LOCK_EX;
196*4882a593Smuzhiyun 	case LM_ST_DEFERRED:
197*4882a593Smuzhiyun 		return DLM_LOCK_CW;
198*4882a593Smuzhiyun 	case LM_ST_SHARED:
199*4882a593Smuzhiyun 		return DLM_LOCK_PR;
200*4882a593Smuzhiyun 	}
201*4882a593Smuzhiyun 	fs_err(sdp, "unknown LM state %d\n", lmstate);
202*4882a593Smuzhiyun 	BUG();
203*4882a593Smuzhiyun 	return -1;
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun 
make_flags(struct gfs2_glock * gl,const unsigned int gfs_flags,const int req)206*4882a593Smuzhiyun static u32 make_flags(struct gfs2_glock *gl, const unsigned int gfs_flags,
207*4882a593Smuzhiyun 		      const int req)
208*4882a593Smuzhiyun {
209*4882a593Smuzhiyun 	u32 lkf = 0;
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	if (gl->gl_lksb.sb_lvbptr)
212*4882a593Smuzhiyun 		lkf |= DLM_LKF_VALBLK;
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	if (gfs_flags & LM_FLAG_TRY)
215*4882a593Smuzhiyun 		lkf |= DLM_LKF_NOQUEUE;
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 	if (gfs_flags & LM_FLAG_TRY_1CB) {
218*4882a593Smuzhiyun 		lkf |= DLM_LKF_NOQUEUE;
219*4882a593Smuzhiyun 		lkf |= DLM_LKF_NOQUEUEBAST;
220*4882a593Smuzhiyun 	}
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 	if (gfs_flags & LM_FLAG_PRIORITY) {
223*4882a593Smuzhiyun 		lkf |= DLM_LKF_NOORDER;
224*4882a593Smuzhiyun 		lkf |= DLM_LKF_HEADQUE;
225*4882a593Smuzhiyun 	}
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	if (gfs_flags & LM_FLAG_ANY) {
228*4882a593Smuzhiyun 		if (req == DLM_LOCK_PR)
229*4882a593Smuzhiyun 			lkf |= DLM_LKF_ALTCW;
230*4882a593Smuzhiyun 		else if (req == DLM_LOCK_CW)
231*4882a593Smuzhiyun 			lkf |= DLM_LKF_ALTPR;
232*4882a593Smuzhiyun 		else
233*4882a593Smuzhiyun 			BUG();
234*4882a593Smuzhiyun 	}
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 	if (gl->gl_lksb.sb_lkid != 0) {
237*4882a593Smuzhiyun 		lkf |= DLM_LKF_CONVERT;
238*4882a593Smuzhiyun 		if (test_bit(GLF_BLOCKING, &gl->gl_flags))
239*4882a593Smuzhiyun 			lkf |= DLM_LKF_QUECVT;
240*4882a593Smuzhiyun 	}
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	return lkf;
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun 
gfs2_reverse_hex(char * c,u64 value)245*4882a593Smuzhiyun static void gfs2_reverse_hex(char *c, u64 value)
246*4882a593Smuzhiyun {
247*4882a593Smuzhiyun 	*c = '0';
248*4882a593Smuzhiyun 	while (value) {
249*4882a593Smuzhiyun 		*c-- = hex_asc[value & 0x0f];
250*4882a593Smuzhiyun 		value >>= 4;
251*4882a593Smuzhiyun 	}
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun 
gdlm_lock(struct gfs2_glock * gl,unsigned int req_state,unsigned int flags)254*4882a593Smuzhiyun static int gdlm_lock(struct gfs2_glock *gl, unsigned int req_state,
255*4882a593Smuzhiyun 		     unsigned int flags)
256*4882a593Smuzhiyun {
257*4882a593Smuzhiyun 	struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct;
258*4882a593Smuzhiyun 	int req;
259*4882a593Smuzhiyun 	u32 lkf;
260*4882a593Smuzhiyun 	char strname[GDLM_STRNAME_BYTES] = "";
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun 	req = make_mode(gl->gl_name.ln_sbd, req_state);
263*4882a593Smuzhiyun 	lkf = make_flags(gl, flags, req);
264*4882a593Smuzhiyun 	gfs2_glstats_inc(gl, GFS2_LKS_DCOUNT);
265*4882a593Smuzhiyun 	gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT);
266*4882a593Smuzhiyun 	if (gl->gl_lksb.sb_lkid) {
267*4882a593Smuzhiyun 		gfs2_update_request_times(gl);
268*4882a593Smuzhiyun 	} else {
269*4882a593Smuzhiyun 		memset(strname, ' ', GDLM_STRNAME_BYTES - 1);
270*4882a593Smuzhiyun 		strname[GDLM_STRNAME_BYTES - 1] = '\0';
271*4882a593Smuzhiyun 		gfs2_reverse_hex(strname + 7, gl->gl_name.ln_type);
272*4882a593Smuzhiyun 		gfs2_reverse_hex(strname + 23, gl->gl_name.ln_number);
273*4882a593Smuzhiyun 		gl->gl_dstamp = ktime_get_real();
274*4882a593Smuzhiyun 	}
275*4882a593Smuzhiyun 	/*
276*4882a593Smuzhiyun 	 * Submit the actual lock request.
277*4882a593Smuzhiyun 	 */
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 	return dlm_lock(ls->ls_dlm, req, &gl->gl_lksb, lkf, strname,
280*4882a593Smuzhiyun 			GDLM_STRNAME_BYTES - 1, 0, gdlm_ast, gl, gdlm_bast);
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun 
gdlm_put_lock(struct gfs2_glock * gl)283*4882a593Smuzhiyun static void gdlm_put_lock(struct gfs2_glock *gl)
284*4882a593Smuzhiyun {
285*4882a593Smuzhiyun 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
286*4882a593Smuzhiyun 	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
287*4882a593Smuzhiyun 	int error;
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 	if (gl->gl_lksb.sb_lkid == 0) {
290*4882a593Smuzhiyun 		gfs2_glock_free(gl);
291*4882a593Smuzhiyun 		return;
292*4882a593Smuzhiyun 	}
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun 	clear_bit(GLF_BLOCKING, &gl->gl_flags);
295*4882a593Smuzhiyun 	gfs2_glstats_inc(gl, GFS2_LKS_DCOUNT);
296*4882a593Smuzhiyun 	gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT);
297*4882a593Smuzhiyun 	gfs2_update_request_times(gl);
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun 	/* don't want to call dlm if we've unmounted the lock protocol */
300*4882a593Smuzhiyun 	if (test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) {
301*4882a593Smuzhiyun 		gfs2_glock_free(gl);
302*4882a593Smuzhiyun 		return;
303*4882a593Smuzhiyun 	}
304*4882a593Smuzhiyun 	/* don't want to skip dlm_unlock writing the lvb when lock has one */
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 	if (test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags) &&
307*4882a593Smuzhiyun 	    !gl->gl_lksb.sb_lvbptr) {
308*4882a593Smuzhiyun 		gfs2_glock_free(gl);
309*4882a593Smuzhiyun 		return;
310*4882a593Smuzhiyun 	}
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun 	error = dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_VALBLK,
313*4882a593Smuzhiyun 			   NULL, gl);
314*4882a593Smuzhiyun 	if (error) {
315*4882a593Smuzhiyun 		fs_err(sdp, "gdlm_unlock %x,%llx err=%d\n",
316*4882a593Smuzhiyun 		       gl->gl_name.ln_type,
317*4882a593Smuzhiyun 		       (unsigned long long)gl->gl_name.ln_number, error);
318*4882a593Smuzhiyun 		return;
319*4882a593Smuzhiyun 	}
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun 
gdlm_cancel(struct gfs2_glock * gl)322*4882a593Smuzhiyun static void gdlm_cancel(struct gfs2_glock *gl)
323*4882a593Smuzhiyun {
324*4882a593Smuzhiyun 	struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct;
325*4882a593Smuzhiyun 	dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_CANCEL, NULL, gl);
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun /*
329*4882a593Smuzhiyun  * dlm/gfs2 recovery coordination using dlm_recover callbacks
330*4882a593Smuzhiyun  *
331*4882a593Smuzhiyun  *  0. gfs2 checks for another cluster node withdraw, needing journal replay
332*4882a593Smuzhiyun  *  1. dlm_controld sees lockspace members change
333*4882a593Smuzhiyun  *  2. dlm_controld blocks dlm-kernel locking activity
334*4882a593Smuzhiyun  *  3. dlm_controld within dlm-kernel notifies gfs2 (recover_prep)
335*4882a593Smuzhiyun  *  4. dlm_controld starts and finishes its own user level recovery
336*4882a593Smuzhiyun  *  5. dlm_controld starts dlm-kernel dlm_recoverd to do kernel recovery
337*4882a593Smuzhiyun  *  6. dlm_recoverd notifies gfs2 of failed nodes (recover_slot)
338*4882a593Smuzhiyun  *  7. dlm_recoverd does its own lock recovery
339*4882a593Smuzhiyun  *  8. dlm_recoverd unblocks dlm-kernel locking activity
340*4882a593Smuzhiyun  *  9. dlm_recoverd notifies gfs2 when done (recover_done with new generation)
341*4882a593Smuzhiyun  * 10. gfs2_control updates control_lock lvb with new generation and jid bits
342*4882a593Smuzhiyun  * 11. gfs2_control enqueues journals for gfs2_recover to recover (maybe none)
343*4882a593Smuzhiyun  * 12. gfs2_recover dequeues and recovers journals of failed nodes
344*4882a593Smuzhiyun  * 13. gfs2_recover provides recovery results to gfs2_control (recovery_result)
345*4882a593Smuzhiyun  * 14. gfs2_control updates control_lock lvb jid bits for recovered journals
346*4882a593Smuzhiyun  * 15. gfs2_control unblocks normal locking when all journals are recovered
347*4882a593Smuzhiyun  *
348*4882a593Smuzhiyun  * - failures during recovery
349*4882a593Smuzhiyun  *
350*4882a593Smuzhiyun  * recover_prep() may set BLOCK_LOCKS (step 3) again before gfs2_control
351*4882a593Smuzhiyun  * clears BLOCK_LOCKS (step 15), e.g. another node fails while still
352*4882a593Smuzhiyun  * recovering for a prior failure.  gfs2_control needs a way to detect
353*4882a593Smuzhiyun  * this so it can leave BLOCK_LOCKS set in step 15.  This is managed using
354*4882a593Smuzhiyun  * the recover_block and recover_start values.
355*4882a593Smuzhiyun  *
356*4882a593Smuzhiyun  * recover_done() provides a new lockspace generation number each time it
357*4882a593Smuzhiyun  * is called (step 9).  This generation number is saved as recover_start.
358*4882a593Smuzhiyun  * When recover_prep() is called, it sets BLOCK_LOCKS and sets
359*4882a593Smuzhiyun  * recover_block = recover_start.  So, while recover_block is equal to
360*4882a593Smuzhiyun  * recover_start, BLOCK_LOCKS should remain set.  (recover_spin must
361*4882a593Smuzhiyun  * be held around the BLOCK_LOCKS/recover_block/recover_start logic.)
362*4882a593Smuzhiyun  *
363*4882a593Smuzhiyun  * - more specific gfs2 steps in sequence above
364*4882a593Smuzhiyun  *
365*4882a593Smuzhiyun  *  3. recover_prep sets BLOCK_LOCKS and sets recover_block = recover_start
366*4882a593Smuzhiyun  *  6. recover_slot records any failed jids (maybe none)
367*4882a593Smuzhiyun  *  9. recover_done sets recover_start = new generation number
368*4882a593Smuzhiyun  * 10. gfs2_control sets control_lock lvb = new gen + bits for failed jids
369*4882a593Smuzhiyun  * 12. gfs2_recover does journal recoveries for failed jids identified above
370*4882a593Smuzhiyun  * 14. gfs2_control clears control_lock lvb bits for recovered jids
371*4882a593Smuzhiyun  * 15. gfs2_control checks if recover_block == recover_start (step 3 occured
372*4882a593Smuzhiyun  *     again) then do nothing, otherwise if recover_start > recover_block
373*4882a593Smuzhiyun  *     then clear BLOCK_LOCKS.
374*4882a593Smuzhiyun  *
375*4882a593Smuzhiyun  * - parallel recovery steps across all nodes
376*4882a593Smuzhiyun  *
377*4882a593Smuzhiyun  * All nodes attempt to update the control_lock lvb with the new generation
378*4882a593Smuzhiyun  * number and jid bits, but only the first to get the control_lock EX will
379*4882a593Smuzhiyun  * do so; others will see that it's already done (lvb already contains new
380*4882a593Smuzhiyun  * generation number.)
381*4882a593Smuzhiyun  *
382*4882a593Smuzhiyun  * . All nodes get the same recover_prep/recover_slot/recover_done callbacks
383*4882a593Smuzhiyun  * . All nodes attempt to set control_lock lvb gen + bits for the new gen
384*4882a593Smuzhiyun  * . One node gets control_lock first and writes the lvb, others see it's done
385*4882a593Smuzhiyun  * . All nodes attempt to recover jids for which they see control_lock bits set
386*4882a593Smuzhiyun  * . One node succeeds for a jid, and that one clears the jid bit in the lvb
387*4882a593Smuzhiyun  * . All nodes will eventually see all lvb bits clear and unblock locks
388*4882a593Smuzhiyun  *
389*4882a593Smuzhiyun  * - is there a problem with clearing an lvb bit that should be set
390*4882a593Smuzhiyun  *   and missing a journal recovery?
391*4882a593Smuzhiyun  *
392*4882a593Smuzhiyun  * 1. jid fails
393*4882a593Smuzhiyun  * 2. lvb bit set for step 1
394*4882a593Smuzhiyun  * 3. jid recovered for step 1
395*4882a593Smuzhiyun  * 4. jid taken again (new mount)
396*4882a593Smuzhiyun  * 5. jid fails (for step 4)
397*4882a593Smuzhiyun  * 6. lvb bit set for step 5 (will already be set)
398*4882a593Smuzhiyun  * 7. lvb bit cleared for step 3
399*4882a593Smuzhiyun  *
400*4882a593Smuzhiyun  * This is not a problem because the failure in step 5 does not
401*4882a593Smuzhiyun  * require recovery, because the mount in step 4 could not have
402*4882a593Smuzhiyun  * progressed far enough to unblock locks and access the fs.  The
403*4882a593Smuzhiyun  * control_mount() function waits for all recoveries to be complete
404*4882a593Smuzhiyun  * for the latest lockspace generation before ever unblocking locks
405*4882a593Smuzhiyun  * and returning.  The mount in step 4 waits until the recovery in
406*4882a593Smuzhiyun  * step 1 is done.
407*4882a593Smuzhiyun  *
408*4882a593Smuzhiyun  * - special case of first mounter: first node to mount the fs
409*4882a593Smuzhiyun  *
410*4882a593Smuzhiyun  * The first node to mount a gfs2 fs needs to check all the journals
411*4882a593Smuzhiyun  * and recover any that need recovery before other nodes are allowed
412*4882a593Smuzhiyun  * to mount the fs.  (Others may begin mounting, but they must wait
413*4882a593Smuzhiyun  * for the first mounter to be done before taking locks on the fs
414*4882a593Smuzhiyun  * or accessing the fs.)  This has two parts:
415*4882a593Smuzhiyun  *
416*4882a593Smuzhiyun  * 1. The mounted_lock tells a node it's the first to mount the fs.
417*4882a593Smuzhiyun  * Each node holds the mounted_lock in PR while it's mounted.
418*4882a593Smuzhiyun  * Each node tries to acquire the mounted_lock in EX when it mounts.
419*4882a593Smuzhiyun  * If a node is granted the mounted_lock EX it means there are no
420*4882a593Smuzhiyun  * other mounted nodes (no PR locks exist), and it is the first mounter.
421*4882a593Smuzhiyun  * The mounted_lock is demoted to PR when first recovery is done, so
422*4882a593Smuzhiyun  * others will fail to get an EX lock, but will get a PR lock.
423*4882a593Smuzhiyun  *
424*4882a593Smuzhiyun  * 2. The control_lock blocks others in control_mount() while the first
425*4882a593Smuzhiyun  * mounter is doing first mount recovery of all journals.
426*4882a593Smuzhiyun  * A mounting node needs to acquire control_lock in EX mode before
427*4882a593Smuzhiyun  * it can proceed.  The first mounter holds control_lock in EX while doing
428*4882a593Smuzhiyun  * the first mount recovery, blocking mounts from other nodes, then demotes
429*4882a593Smuzhiyun  * control_lock to NL when it's done (others_may_mount/first_done),
430*4882a593Smuzhiyun  * allowing other nodes to continue mounting.
431*4882a593Smuzhiyun  *
432*4882a593Smuzhiyun  * first mounter:
433*4882a593Smuzhiyun  * control_lock EX/NOQUEUE success
434*4882a593Smuzhiyun  * mounted_lock EX/NOQUEUE success (no other PR, so no other mounters)
435*4882a593Smuzhiyun  * set first=1
436*4882a593Smuzhiyun  * do first mounter recovery
437*4882a593Smuzhiyun  * mounted_lock EX->PR
438*4882a593Smuzhiyun  * control_lock EX->NL, write lvb generation
439*4882a593Smuzhiyun  *
440*4882a593Smuzhiyun  * other mounter:
441*4882a593Smuzhiyun  * control_lock EX/NOQUEUE success (if fail -EAGAIN, retry)
442*4882a593Smuzhiyun  * mounted_lock EX/NOQUEUE fail -EAGAIN (expected due to other mounters PR)
443*4882a593Smuzhiyun  * mounted_lock PR/NOQUEUE success
444*4882a593Smuzhiyun  * read lvb generation
445*4882a593Smuzhiyun  * control_lock EX->NL
446*4882a593Smuzhiyun  * set first=0
447*4882a593Smuzhiyun  *
448*4882a593Smuzhiyun  * - mount during recovery
449*4882a593Smuzhiyun  *
450*4882a593Smuzhiyun  * If a node mounts while others are doing recovery (not first mounter),
451*4882a593Smuzhiyun  * the mounting node will get its initial recover_done() callback without
452*4882a593Smuzhiyun  * having seen any previous failures/callbacks.
453*4882a593Smuzhiyun  *
454*4882a593Smuzhiyun  * It must wait for all recoveries preceding its mount to be finished
455*4882a593Smuzhiyun  * before it unblocks locks.  It does this by repeating the "other mounter"
456*4882a593Smuzhiyun  * steps above until the lvb generation number is >= its mount generation
457*4882a593Smuzhiyun  * number (from initial recover_done) and all lvb bits are clear.
458*4882a593Smuzhiyun  *
459*4882a593Smuzhiyun  * - control_lock lvb format
460*4882a593Smuzhiyun  *
461*4882a593Smuzhiyun  * 4 bytes generation number: the latest dlm lockspace generation number
462*4882a593Smuzhiyun  * from recover_done callback.  Indicates the jid bitmap has been updated
463*4882a593Smuzhiyun  * to reflect all slot failures through that generation.
464*4882a593Smuzhiyun  * 4 bytes unused.
465*4882a593Smuzhiyun  * GDLM_LVB_SIZE-8 bytes of jid bit map. If bit N is set, it indicates
466*4882a593Smuzhiyun  * that jid N needs recovery.
467*4882a593Smuzhiyun  */
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun #define JID_BITMAP_OFFSET 8 /* 4 byte generation number + 4 byte unused */
470*4882a593Smuzhiyun 
control_lvb_read(struct lm_lockstruct * ls,uint32_t * lvb_gen,char * lvb_bits)471*4882a593Smuzhiyun static void control_lvb_read(struct lm_lockstruct *ls, uint32_t *lvb_gen,
472*4882a593Smuzhiyun 			     char *lvb_bits)
473*4882a593Smuzhiyun {
474*4882a593Smuzhiyun 	__le32 gen;
475*4882a593Smuzhiyun 	memcpy(lvb_bits, ls->ls_control_lvb, GDLM_LVB_SIZE);
476*4882a593Smuzhiyun 	memcpy(&gen, lvb_bits, sizeof(__le32));
477*4882a593Smuzhiyun 	*lvb_gen = le32_to_cpu(gen);
478*4882a593Smuzhiyun }
479*4882a593Smuzhiyun 
control_lvb_write(struct lm_lockstruct * ls,uint32_t lvb_gen,char * lvb_bits)480*4882a593Smuzhiyun static void control_lvb_write(struct lm_lockstruct *ls, uint32_t lvb_gen,
481*4882a593Smuzhiyun 			      char *lvb_bits)
482*4882a593Smuzhiyun {
483*4882a593Smuzhiyun 	__le32 gen;
484*4882a593Smuzhiyun 	memcpy(ls->ls_control_lvb, lvb_bits, GDLM_LVB_SIZE);
485*4882a593Smuzhiyun 	gen = cpu_to_le32(lvb_gen);
486*4882a593Smuzhiyun 	memcpy(ls->ls_control_lvb, &gen, sizeof(__le32));
487*4882a593Smuzhiyun }
488*4882a593Smuzhiyun 
all_jid_bits_clear(char * lvb)489*4882a593Smuzhiyun static int all_jid_bits_clear(char *lvb)
490*4882a593Smuzhiyun {
491*4882a593Smuzhiyun 	return !memchr_inv(lvb + JID_BITMAP_OFFSET, 0,
492*4882a593Smuzhiyun 			GDLM_LVB_SIZE - JID_BITMAP_OFFSET);
493*4882a593Smuzhiyun }
494*4882a593Smuzhiyun 
sync_wait_cb(void * arg)495*4882a593Smuzhiyun static void sync_wait_cb(void *arg)
496*4882a593Smuzhiyun {
497*4882a593Smuzhiyun 	struct lm_lockstruct *ls = arg;
498*4882a593Smuzhiyun 	complete(&ls->ls_sync_wait);
499*4882a593Smuzhiyun }
500*4882a593Smuzhiyun 
sync_unlock(struct gfs2_sbd * sdp,struct dlm_lksb * lksb,char * name)501*4882a593Smuzhiyun static int sync_unlock(struct gfs2_sbd *sdp, struct dlm_lksb *lksb, char *name)
502*4882a593Smuzhiyun {
503*4882a593Smuzhiyun 	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
504*4882a593Smuzhiyun 	int error;
505*4882a593Smuzhiyun 
506*4882a593Smuzhiyun 	error = dlm_unlock(ls->ls_dlm, lksb->sb_lkid, 0, lksb, ls);
507*4882a593Smuzhiyun 	if (error) {
508*4882a593Smuzhiyun 		fs_err(sdp, "%s lkid %x error %d\n",
509*4882a593Smuzhiyun 		       name, lksb->sb_lkid, error);
510*4882a593Smuzhiyun 		return error;
511*4882a593Smuzhiyun 	}
512*4882a593Smuzhiyun 
513*4882a593Smuzhiyun 	wait_for_completion(&ls->ls_sync_wait);
514*4882a593Smuzhiyun 
515*4882a593Smuzhiyun 	if (lksb->sb_status != -DLM_EUNLOCK) {
516*4882a593Smuzhiyun 		fs_err(sdp, "%s lkid %x status %d\n",
517*4882a593Smuzhiyun 		       name, lksb->sb_lkid, lksb->sb_status);
518*4882a593Smuzhiyun 		return -1;
519*4882a593Smuzhiyun 	}
520*4882a593Smuzhiyun 	return 0;
521*4882a593Smuzhiyun }
522*4882a593Smuzhiyun 
sync_lock(struct gfs2_sbd * sdp,int mode,uint32_t flags,unsigned int num,struct dlm_lksb * lksb,char * name)523*4882a593Smuzhiyun static int sync_lock(struct gfs2_sbd *sdp, int mode, uint32_t flags,
524*4882a593Smuzhiyun 		     unsigned int num, struct dlm_lksb *lksb, char *name)
525*4882a593Smuzhiyun {
526*4882a593Smuzhiyun 	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
527*4882a593Smuzhiyun 	char strname[GDLM_STRNAME_BYTES];
528*4882a593Smuzhiyun 	int error, status;
529*4882a593Smuzhiyun 
530*4882a593Smuzhiyun 	memset(strname, 0, GDLM_STRNAME_BYTES);
531*4882a593Smuzhiyun 	snprintf(strname, GDLM_STRNAME_BYTES, "%8x%16x", LM_TYPE_NONDISK, num);
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun 	error = dlm_lock(ls->ls_dlm, mode, lksb, flags,
534*4882a593Smuzhiyun 			 strname, GDLM_STRNAME_BYTES - 1,
535*4882a593Smuzhiyun 			 0, sync_wait_cb, ls, NULL);
536*4882a593Smuzhiyun 	if (error) {
537*4882a593Smuzhiyun 		fs_err(sdp, "%s lkid %x flags %x mode %d error %d\n",
538*4882a593Smuzhiyun 		       name, lksb->sb_lkid, flags, mode, error);
539*4882a593Smuzhiyun 		return error;
540*4882a593Smuzhiyun 	}
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun 	wait_for_completion(&ls->ls_sync_wait);
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun 	status = lksb->sb_status;
545*4882a593Smuzhiyun 
546*4882a593Smuzhiyun 	if (status && status != -EAGAIN) {
547*4882a593Smuzhiyun 		fs_err(sdp, "%s lkid %x flags %x mode %d status %d\n",
548*4882a593Smuzhiyun 		       name, lksb->sb_lkid, flags, mode, status);
549*4882a593Smuzhiyun 	}
550*4882a593Smuzhiyun 
551*4882a593Smuzhiyun 	return status;
552*4882a593Smuzhiyun }
553*4882a593Smuzhiyun 
mounted_unlock(struct gfs2_sbd * sdp)554*4882a593Smuzhiyun static int mounted_unlock(struct gfs2_sbd *sdp)
555*4882a593Smuzhiyun {
556*4882a593Smuzhiyun 	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
557*4882a593Smuzhiyun 	return sync_unlock(sdp, &ls->ls_mounted_lksb, "mounted_lock");
558*4882a593Smuzhiyun }
559*4882a593Smuzhiyun 
mounted_lock(struct gfs2_sbd * sdp,int mode,uint32_t flags)560*4882a593Smuzhiyun static int mounted_lock(struct gfs2_sbd *sdp, int mode, uint32_t flags)
561*4882a593Smuzhiyun {
562*4882a593Smuzhiyun 	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
563*4882a593Smuzhiyun 	return sync_lock(sdp, mode, flags, GFS2_MOUNTED_LOCK,
564*4882a593Smuzhiyun 			 &ls->ls_mounted_lksb, "mounted_lock");
565*4882a593Smuzhiyun }
566*4882a593Smuzhiyun 
control_unlock(struct gfs2_sbd * sdp)567*4882a593Smuzhiyun static int control_unlock(struct gfs2_sbd *sdp)
568*4882a593Smuzhiyun {
569*4882a593Smuzhiyun 	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
570*4882a593Smuzhiyun 	return sync_unlock(sdp, &ls->ls_control_lksb, "control_lock");
571*4882a593Smuzhiyun }
572*4882a593Smuzhiyun 
control_lock(struct gfs2_sbd * sdp,int mode,uint32_t flags)573*4882a593Smuzhiyun static int control_lock(struct gfs2_sbd *sdp, int mode, uint32_t flags)
574*4882a593Smuzhiyun {
575*4882a593Smuzhiyun 	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
576*4882a593Smuzhiyun 	return sync_lock(sdp, mode, flags, GFS2_CONTROL_LOCK,
577*4882a593Smuzhiyun 			 &ls->ls_control_lksb, "control_lock");
578*4882a593Smuzhiyun }
579*4882a593Smuzhiyun 
580*4882a593Smuzhiyun /**
581*4882a593Smuzhiyun  * remote_withdraw - react to a node withdrawing from the file system
582*4882a593Smuzhiyun  * @sdp: The superblock
583*4882a593Smuzhiyun  */
remote_withdraw(struct gfs2_sbd * sdp)584*4882a593Smuzhiyun static void remote_withdraw(struct gfs2_sbd *sdp)
585*4882a593Smuzhiyun {
586*4882a593Smuzhiyun 	struct gfs2_jdesc *jd;
587*4882a593Smuzhiyun 	int ret = 0, count = 0;
588*4882a593Smuzhiyun 
589*4882a593Smuzhiyun 	list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
590*4882a593Smuzhiyun 		if (jd->jd_jid == sdp->sd_lockstruct.ls_jid)
591*4882a593Smuzhiyun 			continue;
592*4882a593Smuzhiyun 		ret = gfs2_recover_journal(jd, true);
593*4882a593Smuzhiyun 		if (ret)
594*4882a593Smuzhiyun 			break;
595*4882a593Smuzhiyun 		count++;
596*4882a593Smuzhiyun 	}
597*4882a593Smuzhiyun 
598*4882a593Smuzhiyun 	/* Now drop the additional reference we acquired */
599*4882a593Smuzhiyun 	fs_err(sdp, "Journals checked: %d, ret = %d.\n", count, ret);
600*4882a593Smuzhiyun }
601*4882a593Smuzhiyun 
gfs2_control_func(struct work_struct * work)602*4882a593Smuzhiyun static void gfs2_control_func(struct work_struct *work)
603*4882a593Smuzhiyun {
604*4882a593Smuzhiyun 	struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_control_work.work);
605*4882a593Smuzhiyun 	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
606*4882a593Smuzhiyun 	uint32_t block_gen, start_gen, lvb_gen, flags;
607*4882a593Smuzhiyun 	int recover_set = 0;
608*4882a593Smuzhiyun 	int write_lvb = 0;
609*4882a593Smuzhiyun 	int recover_size;
610*4882a593Smuzhiyun 	int i, error;
611*4882a593Smuzhiyun 
612*4882a593Smuzhiyun 	/* First check for other nodes that may have done a withdraw. */
613*4882a593Smuzhiyun 	if (test_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags)) {
614*4882a593Smuzhiyun 		remote_withdraw(sdp);
615*4882a593Smuzhiyun 		clear_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags);
616*4882a593Smuzhiyun 		return;
617*4882a593Smuzhiyun 	}
618*4882a593Smuzhiyun 
619*4882a593Smuzhiyun 	spin_lock(&ls->ls_recover_spin);
620*4882a593Smuzhiyun 	/*
621*4882a593Smuzhiyun 	 * No MOUNT_DONE means we're still mounting; control_mount()
622*4882a593Smuzhiyun 	 * will set this flag, after which this thread will take over
623*4882a593Smuzhiyun 	 * all further clearing of BLOCK_LOCKS.
624*4882a593Smuzhiyun 	 *
625*4882a593Smuzhiyun 	 * FIRST_MOUNT means this node is doing first mounter recovery,
626*4882a593Smuzhiyun 	 * for which recovery control is handled by
627*4882a593Smuzhiyun 	 * control_mount()/control_first_done(), not this thread.
628*4882a593Smuzhiyun 	 */
629*4882a593Smuzhiyun 	if (!test_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags) ||
630*4882a593Smuzhiyun 	     test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) {
631*4882a593Smuzhiyun 		spin_unlock(&ls->ls_recover_spin);
632*4882a593Smuzhiyun 		return;
633*4882a593Smuzhiyun 	}
634*4882a593Smuzhiyun 	block_gen = ls->ls_recover_block;
635*4882a593Smuzhiyun 	start_gen = ls->ls_recover_start;
636*4882a593Smuzhiyun 	spin_unlock(&ls->ls_recover_spin);
637*4882a593Smuzhiyun 
638*4882a593Smuzhiyun 	/*
639*4882a593Smuzhiyun 	 * Equal block_gen and start_gen implies we are between
640*4882a593Smuzhiyun 	 * recover_prep and recover_done callbacks, which means
641*4882a593Smuzhiyun 	 * dlm recovery is in progress and dlm locking is blocked.
642*4882a593Smuzhiyun 	 * There's no point trying to do any work until recover_done.
643*4882a593Smuzhiyun 	 */
644*4882a593Smuzhiyun 
645*4882a593Smuzhiyun 	if (block_gen == start_gen)
646*4882a593Smuzhiyun 		return;
647*4882a593Smuzhiyun 
648*4882a593Smuzhiyun 	/*
649*4882a593Smuzhiyun 	 * Propagate recover_submit[] and recover_result[] to lvb:
650*4882a593Smuzhiyun 	 * dlm_recoverd adds to recover_submit[] jids needing recovery
651*4882a593Smuzhiyun 	 * gfs2_recover adds to recover_result[] journal recovery results
652*4882a593Smuzhiyun 	 *
653*4882a593Smuzhiyun 	 * set lvb bit for jids in recover_submit[] if the lvb has not
654*4882a593Smuzhiyun 	 * yet been updated for the generation of the failure
655*4882a593Smuzhiyun 	 *
656*4882a593Smuzhiyun 	 * clear lvb bit for jids in recover_result[] if the result of
657*4882a593Smuzhiyun 	 * the journal recovery is SUCCESS
658*4882a593Smuzhiyun 	 */
659*4882a593Smuzhiyun 
660*4882a593Smuzhiyun 	error = control_lock(sdp, DLM_LOCK_EX, DLM_LKF_CONVERT|DLM_LKF_VALBLK);
661*4882a593Smuzhiyun 	if (error) {
662*4882a593Smuzhiyun 		fs_err(sdp, "control lock EX error %d\n", error);
663*4882a593Smuzhiyun 		return;
664*4882a593Smuzhiyun 	}
665*4882a593Smuzhiyun 
666*4882a593Smuzhiyun 	control_lvb_read(ls, &lvb_gen, ls->ls_lvb_bits);
667*4882a593Smuzhiyun 
668*4882a593Smuzhiyun 	spin_lock(&ls->ls_recover_spin);
669*4882a593Smuzhiyun 	if (block_gen != ls->ls_recover_block ||
670*4882a593Smuzhiyun 	    start_gen != ls->ls_recover_start) {
671*4882a593Smuzhiyun 		fs_info(sdp, "recover generation %u block1 %u %u\n",
672*4882a593Smuzhiyun 			start_gen, block_gen, ls->ls_recover_block);
673*4882a593Smuzhiyun 		spin_unlock(&ls->ls_recover_spin);
674*4882a593Smuzhiyun 		control_lock(sdp, DLM_LOCK_NL, DLM_LKF_CONVERT);
675*4882a593Smuzhiyun 		return;
676*4882a593Smuzhiyun 	}
677*4882a593Smuzhiyun 
678*4882a593Smuzhiyun 	recover_size = ls->ls_recover_size;
679*4882a593Smuzhiyun 
680*4882a593Smuzhiyun 	if (lvb_gen <= start_gen) {
681*4882a593Smuzhiyun 		/*
682*4882a593Smuzhiyun 		 * Clear lvb bits for jids we've successfully recovered.
683*4882a593Smuzhiyun 		 * Because all nodes attempt to recover failed journals,
684*4882a593Smuzhiyun 		 * a journal can be recovered multiple times successfully
685*4882a593Smuzhiyun 		 * in succession.  Only the first will really do recovery,
686*4882a593Smuzhiyun 		 * the others find it clean, but still report a successful
687*4882a593Smuzhiyun 		 * recovery.  So, another node may have already recovered
688*4882a593Smuzhiyun 		 * the jid and cleared the lvb bit for it.
689*4882a593Smuzhiyun 		 */
690*4882a593Smuzhiyun 		for (i = 0; i < recover_size; i++) {
691*4882a593Smuzhiyun 			if (ls->ls_recover_result[i] != LM_RD_SUCCESS)
692*4882a593Smuzhiyun 				continue;
693*4882a593Smuzhiyun 
694*4882a593Smuzhiyun 			ls->ls_recover_result[i] = 0;
695*4882a593Smuzhiyun 
696*4882a593Smuzhiyun 			if (!test_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET))
697*4882a593Smuzhiyun 				continue;
698*4882a593Smuzhiyun 
699*4882a593Smuzhiyun 			__clear_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET);
700*4882a593Smuzhiyun 			write_lvb = 1;
701*4882a593Smuzhiyun 		}
702*4882a593Smuzhiyun 	}
703*4882a593Smuzhiyun 
704*4882a593Smuzhiyun 	if (lvb_gen == start_gen) {
705*4882a593Smuzhiyun 		/*
706*4882a593Smuzhiyun 		 * Failed slots before start_gen are already set in lvb.
707*4882a593Smuzhiyun 		 */
708*4882a593Smuzhiyun 		for (i = 0; i < recover_size; i++) {
709*4882a593Smuzhiyun 			if (!ls->ls_recover_submit[i])
710*4882a593Smuzhiyun 				continue;
711*4882a593Smuzhiyun 			if (ls->ls_recover_submit[i] < lvb_gen)
712*4882a593Smuzhiyun 				ls->ls_recover_submit[i] = 0;
713*4882a593Smuzhiyun 		}
714*4882a593Smuzhiyun 	} else if (lvb_gen < start_gen) {
715*4882a593Smuzhiyun 		/*
716*4882a593Smuzhiyun 		 * Failed slots before start_gen are not yet set in lvb.
717*4882a593Smuzhiyun 		 */
718*4882a593Smuzhiyun 		for (i = 0; i < recover_size; i++) {
719*4882a593Smuzhiyun 			if (!ls->ls_recover_submit[i])
720*4882a593Smuzhiyun 				continue;
721*4882a593Smuzhiyun 			if (ls->ls_recover_submit[i] < start_gen) {
722*4882a593Smuzhiyun 				ls->ls_recover_submit[i] = 0;
723*4882a593Smuzhiyun 				__set_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET);
724*4882a593Smuzhiyun 			}
725*4882a593Smuzhiyun 		}
726*4882a593Smuzhiyun 		/* even if there are no bits to set, we need to write the
727*4882a593Smuzhiyun 		   latest generation to the lvb */
728*4882a593Smuzhiyun 		write_lvb = 1;
729*4882a593Smuzhiyun 	} else {
730*4882a593Smuzhiyun 		/*
731*4882a593Smuzhiyun 		 * we should be getting a recover_done() for lvb_gen soon
732*4882a593Smuzhiyun 		 */
733*4882a593Smuzhiyun 	}
734*4882a593Smuzhiyun 	spin_unlock(&ls->ls_recover_spin);
735*4882a593Smuzhiyun 
736*4882a593Smuzhiyun 	if (write_lvb) {
737*4882a593Smuzhiyun 		control_lvb_write(ls, start_gen, ls->ls_lvb_bits);
738*4882a593Smuzhiyun 		flags = DLM_LKF_CONVERT | DLM_LKF_VALBLK;
739*4882a593Smuzhiyun 	} else {
740*4882a593Smuzhiyun 		flags = DLM_LKF_CONVERT;
741*4882a593Smuzhiyun 	}
742*4882a593Smuzhiyun 
743*4882a593Smuzhiyun 	error = control_lock(sdp, DLM_LOCK_NL, flags);
744*4882a593Smuzhiyun 	if (error) {
745*4882a593Smuzhiyun 		fs_err(sdp, "control lock NL error %d\n", error);
746*4882a593Smuzhiyun 		return;
747*4882a593Smuzhiyun 	}
748*4882a593Smuzhiyun 
749*4882a593Smuzhiyun 	/*
750*4882a593Smuzhiyun 	 * Everyone will see jid bits set in the lvb, run gfs2_recover_set(),
751*4882a593Smuzhiyun 	 * and clear a jid bit in the lvb if the recovery is a success.
752*4882a593Smuzhiyun 	 * Eventually all journals will be recovered, all jid bits will
753*4882a593Smuzhiyun 	 * be cleared in the lvb, and everyone will clear BLOCK_LOCKS.
754*4882a593Smuzhiyun 	 */
755*4882a593Smuzhiyun 
756*4882a593Smuzhiyun 	for (i = 0; i < recover_size; i++) {
757*4882a593Smuzhiyun 		if (test_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET)) {
758*4882a593Smuzhiyun 			fs_info(sdp, "recover generation %u jid %d\n",
759*4882a593Smuzhiyun 				start_gen, i);
760*4882a593Smuzhiyun 			gfs2_recover_set(sdp, i);
761*4882a593Smuzhiyun 			recover_set++;
762*4882a593Smuzhiyun 		}
763*4882a593Smuzhiyun 	}
764*4882a593Smuzhiyun 	if (recover_set)
765*4882a593Smuzhiyun 		return;
766*4882a593Smuzhiyun 
767*4882a593Smuzhiyun 	/*
768*4882a593Smuzhiyun 	 * No more jid bits set in lvb, all recovery is done, unblock locks
769*4882a593Smuzhiyun 	 * (unless a new recover_prep callback has occured blocking locks
770*4882a593Smuzhiyun 	 * again while working above)
771*4882a593Smuzhiyun 	 */
772*4882a593Smuzhiyun 
773*4882a593Smuzhiyun 	spin_lock(&ls->ls_recover_spin);
774*4882a593Smuzhiyun 	if (ls->ls_recover_block == block_gen &&
775*4882a593Smuzhiyun 	    ls->ls_recover_start == start_gen) {
776*4882a593Smuzhiyun 		clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags);
777*4882a593Smuzhiyun 		spin_unlock(&ls->ls_recover_spin);
778*4882a593Smuzhiyun 		fs_info(sdp, "recover generation %u done\n", start_gen);
779*4882a593Smuzhiyun 		gfs2_glock_thaw(sdp);
780*4882a593Smuzhiyun 	} else {
781*4882a593Smuzhiyun 		fs_info(sdp, "recover generation %u block2 %u %u\n",
782*4882a593Smuzhiyun 			start_gen, block_gen, ls->ls_recover_block);
783*4882a593Smuzhiyun 		spin_unlock(&ls->ls_recover_spin);
784*4882a593Smuzhiyun 	}
785*4882a593Smuzhiyun }
786*4882a593Smuzhiyun 
control_mount(struct gfs2_sbd * sdp)787*4882a593Smuzhiyun static int control_mount(struct gfs2_sbd *sdp)
788*4882a593Smuzhiyun {
789*4882a593Smuzhiyun 	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
790*4882a593Smuzhiyun 	uint32_t start_gen, block_gen, mount_gen, lvb_gen;
791*4882a593Smuzhiyun 	int mounted_mode;
792*4882a593Smuzhiyun 	int retries = 0;
793*4882a593Smuzhiyun 	int error;
794*4882a593Smuzhiyun 
795*4882a593Smuzhiyun 	memset(&ls->ls_mounted_lksb, 0, sizeof(struct dlm_lksb));
796*4882a593Smuzhiyun 	memset(&ls->ls_control_lksb, 0, sizeof(struct dlm_lksb));
797*4882a593Smuzhiyun 	memset(&ls->ls_control_lvb, 0, GDLM_LVB_SIZE);
798*4882a593Smuzhiyun 	ls->ls_control_lksb.sb_lvbptr = ls->ls_control_lvb;
799*4882a593Smuzhiyun 	init_completion(&ls->ls_sync_wait);
800*4882a593Smuzhiyun 
801*4882a593Smuzhiyun 	set_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags);
802*4882a593Smuzhiyun 
803*4882a593Smuzhiyun 	error = control_lock(sdp, DLM_LOCK_NL, DLM_LKF_VALBLK);
804*4882a593Smuzhiyun 	if (error) {
805*4882a593Smuzhiyun 		fs_err(sdp, "control_mount control_lock NL error %d\n", error);
806*4882a593Smuzhiyun 		return error;
807*4882a593Smuzhiyun 	}
808*4882a593Smuzhiyun 
809*4882a593Smuzhiyun 	error = mounted_lock(sdp, DLM_LOCK_NL, 0);
810*4882a593Smuzhiyun 	if (error) {
811*4882a593Smuzhiyun 		fs_err(sdp, "control_mount mounted_lock NL error %d\n", error);
812*4882a593Smuzhiyun 		control_unlock(sdp);
813*4882a593Smuzhiyun 		return error;
814*4882a593Smuzhiyun 	}
815*4882a593Smuzhiyun 	mounted_mode = DLM_LOCK_NL;
816*4882a593Smuzhiyun 
817*4882a593Smuzhiyun restart:
818*4882a593Smuzhiyun 	if (retries++ && signal_pending(current)) {
819*4882a593Smuzhiyun 		error = -EINTR;
820*4882a593Smuzhiyun 		goto fail;
821*4882a593Smuzhiyun 	}
822*4882a593Smuzhiyun 
823*4882a593Smuzhiyun 	/*
824*4882a593Smuzhiyun 	 * We always start with both locks in NL. control_lock is
825*4882a593Smuzhiyun 	 * demoted to NL below so we don't need to do it here.
826*4882a593Smuzhiyun 	 */
827*4882a593Smuzhiyun 
828*4882a593Smuzhiyun 	if (mounted_mode != DLM_LOCK_NL) {
829*4882a593Smuzhiyun 		error = mounted_lock(sdp, DLM_LOCK_NL, DLM_LKF_CONVERT);
830*4882a593Smuzhiyun 		if (error)
831*4882a593Smuzhiyun 			goto fail;
832*4882a593Smuzhiyun 		mounted_mode = DLM_LOCK_NL;
833*4882a593Smuzhiyun 	}
834*4882a593Smuzhiyun 
835*4882a593Smuzhiyun 	/*
836*4882a593Smuzhiyun 	 * Other nodes need to do some work in dlm recovery and gfs2_control
837*4882a593Smuzhiyun 	 * before the recover_done and control_lock will be ready for us below.
838*4882a593Smuzhiyun 	 * A delay here is not required but often avoids having to retry.
839*4882a593Smuzhiyun 	 */
840*4882a593Smuzhiyun 
841*4882a593Smuzhiyun 	msleep_interruptible(500);
842*4882a593Smuzhiyun 
843*4882a593Smuzhiyun 	/*
844*4882a593Smuzhiyun 	 * Acquire control_lock in EX and mounted_lock in either EX or PR.
845*4882a593Smuzhiyun 	 * control_lock lvb keeps track of any pending journal recoveries.
846*4882a593Smuzhiyun 	 * mounted_lock indicates if any other nodes have the fs mounted.
847*4882a593Smuzhiyun 	 */
848*4882a593Smuzhiyun 
849*4882a593Smuzhiyun 	error = control_lock(sdp, DLM_LOCK_EX, DLM_LKF_CONVERT|DLM_LKF_NOQUEUE|DLM_LKF_VALBLK);
850*4882a593Smuzhiyun 	if (error == -EAGAIN) {
851*4882a593Smuzhiyun 		goto restart;
852*4882a593Smuzhiyun 	} else if (error) {
853*4882a593Smuzhiyun 		fs_err(sdp, "control_mount control_lock EX error %d\n", error);
854*4882a593Smuzhiyun 		goto fail;
855*4882a593Smuzhiyun 	}
856*4882a593Smuzhiyun 
857*4882a593Smuzhiyun 	/**
858*4882a593Smuzhiyun 	 * If we're a spectator, we don't want to take the lock in EX because
859*4882a593Smuzhiyun 	 * we cannot do the first-mount responsibility it implies: recovery.
860*4882a593Smuzhiyun 	 */
861*4882a593Smuzhiyun 	if (sdp->sd_args.ar_spectator)
862*4882a593Smuzhiyun 		goto locks_done;
863*4882a593Smuzhiyun 
864*4882a593Smuzhiyun 	error = mounted_lock(sdp, DLM_LOCK_EX, DLM_LKF_CONVERT|DLM_LKF_NOQUEUE);
865*4882a593Smuzhiyun 	if (!error) {
866*4882a593Smuzhiyun 		mounted_mode = DLM_LOCK_EX;
867*4882a593Smuzhiyun 		goto locks_done;
868*4882a593Smuzhiyun 	} else if (error != -EAGAIN) {
869*4882a593Smuzhiyun 		fs_err(sdp, "control_mount mounted_lock EX error %d\n", error);
870*4882a593Smuzhiyun 		goto fail;
871*4882a593Smuzhiyun 	}
872*4882a593Smuzhiyun 
873*4882a593Smuzhiyun 	error = mounted_lock(sdp, DLM_LOCK_PR, DLM_LKF_CONVERT|DLM_LKF_NOQUEUE);
874*4882a593Smuzhiyun 	if (!error) {
875*4882a593Smuzhiyun 		mounted_mode = DLM_LOCK_PR;
876*4882a593Smuzhiyun 		goto locks_done;
877*4882a593Smuzhiyun 	} else {
878*4882a593Smuzhiyun 		/* not even -EAGAIN should happen here */
879*4882a593Smuzhiyun 		fs_err(sdp, "control_mount mounted_lock PR error %d\n", error);
880*4882a593Smuzhiyun 		goto fail;
881*4882a593Smuzhiyun 	}
882*4882a593Smuzhiyun 
883*4882a593Smuzhiyun locks_done:
884*4882a593Smuzhiyun 	/*
885*4882a593Smuzhiyun 	 * If we got both locks above in EX, then we're the first mounter.
886*4882a593Smuzhiyun 	 * If not, then we need to wait for the control_lock lvb to be
887*4882a593Smuzhiyun 	 * updated by other mounted nodes to reflect our mount generation.
888*4882a593Smuzhiyun 	 *
889*4882a593Smuzhiyun 	 * In simple first mounter cases, first mounter will see zero lvb_gen,
890*4882a593Smuzhiyun 	 * but in cases where all existing nodes leave/fail before mounting
891*4882a593Smuzhiyun 	 * nodes finish control_mount, then all nodes will be mounting and
892*4882a593Smuzhiyun 	 * lvb_gen will be non-zero.
893*4882a593Smuzhiyun 	 */
894*4882a593Smuzhiyun 
895*4882a593Smuzhiyun 	control_lvb_read(ls, &lvb_gen, ls->ls_lvb_bits);
896*4882a593Smuzhiyun 
897*4882a593Smuzhiyun 	if (lvb_gen == 0xFFFFFFFF) {
898*4882a593Smuzhiyun 		/* special value to force mount attempts to fail */
899*4882a593Smuzhiyun 		fs_err(sdp, "control_mount control_lock disabled\n");
900*4882a593Smuzhiyun 		error = -EINVAL;
901*4882a593Smuzhiyun 		goto fail;
902*4882a593Smuzhiyun 	}
903*4882a593Smuzhiyun 
904*4882a593Smuzhiyun 	if (mounted_mode == DLM_LOCK_EX) {
905*4882a593Smuzhiyun 		/* first mounter, keep both EX while doing first recovery */
906*4882a593Smuzhiyun 		spin_lock(&ls->ls_recover_spin);
907*4882a593Smuzhiyun 		clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags);
908*4882a593Smuzhiyun 		set_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags);
909*4882a593Smuzhiyun 		set_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags);
910*4882a593Smuzhiyun 		spin_unlock(&ls->ls_recover_spin);
911*4882a593Smuzhiyun 		fs_info(sdp, "first mounter control generation %u\n", lvb_gen);
912*4882a593Smuzhiyun 		return 0;
913*4882a593Smuzhiyun 	}
914*4882a593Smuzhiyun 
915*4882a593Smuzhiyun 	error = control_lock(sdp, DLM_LOCK_NL, DLM_LKF_CONVERT);
916*4882a593Smuzhiyun 	if (error)
917*4882a593Smuzhiyun 		goto fail;
918*4882a593Smuzhiyun 
919*4882a593Smuzhiyun 	/*
920*4882a593Smuzhiyun 	 * We are not first mounter, now we need to wait for the control_lock
921*4882a593Smuzhiyun 	 * lvb generation to be >= the generation from our first recover_done
922*4882a593Smuzhiyun 	 * and all lvb bits to be clear (no pending journal recoveries.)
923*4882a593Smuzhiyun 	 */
924*4882a593Smuzhiyun 
925*4882a593Smuzhiyun 	if (!all_jid_bits_clear(ls->ls_lvb_bits)) {
926*4882a593Smuzhiyun 		/* journals need recovery, wait until all are clear */
927*4882a593Smuzhiyun 		fs_info(sdp, "control_mount wait for journal recovery\n");
928*4882a593Smuzhiyun 		goto restart;
929*4882a593Smuzhiyun 	}
930*4882a593Smuzhiyun 
931*4882a593Smuzhiyun 	spin_lock(&ls->ls_recover_spin);
932*4882a593Smuzhiyun 	block_gen = ls->ls_recover_block;
933*4882a593Smuzhiyun 	start_gen = ls->ls_recover_start;
934*4882a593Smuzhiyun 	mount_gen = ls->ls_recover_mount;
935*4882a593Smuzhiyun 
936*4882a593Smuzhiyun 	if (lvb_gen < mount_gen) {
937*4882a593Smuzhiyun 		/* wait for mounted nodes to update control_lock lvb to our
938*4882a593Smuzhiyun 		   generation, which might include new recovery bits set */
939*4882a593Smuzhiyun 		if (sdp->sd_args.ar_spectator) {
940*4882a593Smuzhiyun 			fs_info(sdp, "Recovery is required. Waiting for a "
941*4882a593Smuzhiyun 				"non-spectator to mount.\n");
942*4882a593Smuzhiyun 			msleep_interruptible(1000);
943*4882a593Smuzhiyun 		} else {
944*4882a593Smuzhiyun 			fs_info(sdp, "control_mount wait1 block %u start %u "
945*4882a593Smuzhiyun 				"mount %u lvb %u flags %lx\n", block_gen,
946*4882a593Smuzhiyun 				start_gen, mount_gen, lvb_gen,
947*4882a593Smuzhiyun 				ls->ls_recover_flags);
948*4882a593Smuzhiyun 		}
949*4882a593Smuzhiyun 		spin_unlock(&ls->ls_recover_spin);
950*4882a593Smuzhiyun 		goto restart;
951*4882a593Smuzhiyun 	}
952*4882a593Smuzhiyun 
953*4882a593Smuzhiyun 	if (lvb_gen != start_gen) {
954*4882a593Smuzhiyun 		/* wait for mounted nodes to update control_lock lvb to the
955*4882a593Smuzhiyun 		   latest recovery generation */
956*4882a593Smuzhiyun 		fs_info(sdp, "control_mount wait2 block %u start %u mount %u "
957*4882a593Smuzhiyun 			"lvb %u flags %lx\n", block_gen, start_gen, mount_gen,
958*4882a593Smuzhiyun 			lvb_gen, ls->ls_recover_flags);
959*4882a593Smuzhiyun 		spin_unlock(&ls->ls_recover_spin);
960*4882a593Smuzhiyun 		goto restart;
961*4882a593Smuzhiyun 	}
962*4882a593Smuzhiyun 
963*4882a593Smuzhiyun 	if (block_gen == start_gen) {
964*4882a593Smuzhiyun 		/* dlm recovery in progress, wait for it to finish */
965*4882a593Smuzhiyun 		fs_info(sdp, "control_mount wait3 block %u start %u mount %u "
966*4882a593Smuzhiyun 			"lvb %u flags %lx\n", block_gen, start_gen, mount_gen,
967*4882a593Smuzhiyun 			lvb_gen, ls->ls_recover_flags);
968*4882a593Smuzhiyun 		spin_unlock(&ls->ls_recover_spin);
969*4882a593Smuzhiyun 		goto restart;
970*4882a593Smuzhiyun 	}
971*4882a593Smuzhiyun 
972*4882a593Smuzhiyun 	clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags);
973*4882a593Smuzhiyun 	set_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags);
974*4882a593Smuzhiyun 	memset(ls->ls_recover_submit, 0, ls->ls_recover_size*sizeof(uint32_t));
975*4882a593Smuzhiyun 	memset(ls->ls_recover_result, 0, ls->ls_recover_size*sizeof(uint32_t));
976*4882a593Smuzhiyun 	spin_unlock(&ls->ls_recover_spin);
977*4882a593Smuzhiyun 	return 0;
978*4882a593Smuzhiyun 
979*4882a593Smuzhiyun fail:
980*4882a593Smuzhiyun 	mounted_unlock(sdp);
981*4882a593Smuzhiyun 	control_unlock(sdp);
982*4882a593Smuzhiyun 	return error;
983*4882a593Smuzhiyun }
984*4882a593Smuzhiyun 
control_first_done(struct gfs2_sbd * sdp)985*4882a593Smuzhiyun static int control_first_done(struct gfs2_sbd *sdp)
986*4882a593Smuzhiyun {
987*4882a593Smuzhiyun 	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
988*4882a593Smuzhiyun 	uint32_t start_gen, block_gen;
989*4882a593Smuzhiyun 	int error;
990*4882a593Smuzhiyun 
991*4882a593Smuzhiyun restart:
992*4882a593Smuzhiyun 	spin_lock(&ls->ls_recover_spin);
993*4882a593Smuzhiyun 	start_gen = ls->ls_recover_start;
994*4882a593Smuzhiyun 	block_gen = ls->ls_recover_block;
995*4882a593Smuzhiyun 
996*4882a593Smuzhiyun 	if (test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags) ||
997*4882a593Smuzhiyun 	    !test_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags) ||
998*4882a593Smuzhiyun 	    !test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) {
999*4882a593Smuzhiyun 		/* sanity check, should not happen */
1000*4882a593Smuzhiyun 		fs_err(sdp, "control_first_done start %u block %u flags %lx\n",
1001*4882a593Smuzhiyun 		       start_gen, block_gen, ls->ls_recover_flags);
1002*4882a593Smuzhiyun 		spin_unlock(&ls->ls_recover_spin);
1003*4882a593Smuzhiyun 		control_unlock(sdp);
1004*4882a593Smuzhiyun 		return -1;
1005*4882a593Smuzhiyun 	}
1006*4882a593Smuzhiyun 
1007*4882a593Smuzhiyun 	if (start_gen == block_gen) {
1008*4882a593Smuzhiyun 		/*
1009*4882a593Smuzhiyun 		 * Wait for the end of a dlm recovery cycle to switch from
1010*4882a593Smuzhiyun 		 * first mounter recovery.  We can ignore any recover_slot
1011*4882a593Smuzhiyun 		 * callbacks between the recover_prep and next recover_done
1012*4882a593Smuzhiyun 		 * because we are still the first mounter and any failed nodes
1013*4882a593Smuzhiyun 		 * have not fully mounted, so they don't need recovery.
1014*4882a593Smuzhiyun 		 */
1015*4882a593Smuzhiyun 		spin_unlock(&ls->ls_recover_spin);
1016*4882a593Smuzhiyun 		fs_info(sdp, "control_first_done wait gen %u\n", start_gen);
1017*4882a593Smuzhiyun 
1018*4882a593Smuzhiyun 		wait_on_bit(&ls->ls_recover_flags, DFL_DLM_RECOVERY,
1019*4882a593Smuzhiyun 			    TASK_UNINTERRUPTIBLE);
1020*4882a593Smuzhiyun 		goto restart;
1021*4882a593Smuzhiyun 	}
1022*4882a593Smuzhiyun 
1023*4882a593Smuzhiyun 	clear_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags);
1024*4882a593Smuzhiyun 	set_bit(DFL_FIRST_MOUNT_DONE, &ls->ls_recover_flags);
1025*4882a593Smuzhiyun 	memset(ls->ls_recover_submit, 0, ls->ls_recover_size*sizeof(uint32_t));
1026*4882a593Smuzhiyun 	memset(ls->ls_recover_result, 0, ls->ls_recover_size*sizeof(uint32_t));
1027*4882a593Smuzhiyun 	spin_unlock(&ls->ls_recover_spin);
1028*4882a593Smuzhiyun 
1029*4882a593Smuzhiyun 	memset(ls->ls_lvb_bits, 0, GDLM_LVB_SIZE);
1030*4882a593Smuzhiyun 	control_lvb_write(ls, start_gen, ls->ls_lvb_bits);
1031*4882a593Smuzhiyun 
1032*4882a593Smuzhiyun 	error = mounted_lock(sdp, DLM_LOCK_PR, DLM_LKF_CONVERT);
1033*4882a593Smuzhiyun 	if (error)
1034*4882a593Smuzhiyun 		fs_err(sdp, "control_first_done mounted PR error %d\n", error);
1035*4882a593Smuzhiyun 
1036*4882a593Smuzhiyun 	error = control_lock(sdp, DLM_LOCK_NL, DLM_LKF_CONVERT|DLM_LKF_VALBLK);
1037*4882a593Smuzhiyun 	if (error)
1038*4882a593Smuzhiyun 		fs_err(sdp, "control_first_done control NL error %d\n", error);
1039*4882a593Smuzhiyun 
1040*4882a593Smuzhiyun 	return error;
1041*4882a593Smuzhiyun }
1042*4882a593Smuzhiyun 
1043*4882a593Smuzhiyun /*
1044*4882a593Smuzhiyun  * Expand static jid arrays if necessary (by increments of RECOVER_SIZE_INC)
1045*4882a593Smuzhiyun  * to accomodate the largest slot number.  (NB dlm slot numbers start at 1,
1046*4882a593Smuzhiyun  * gfs2 jids start at 0, so jid = slot - 1)
1047*4882a593Smuzhiyun  */
1048*4882a593Smuzhiyun 
1049*4882a593Smuzhiyun #define RECOVER_SIZE_INC 16
1050*4882a593Smuzhiyun 
set_recover_size(struct gfs2_sbd * sdp,struct dlm_slot * slots,int num_slots)1051*4882a593Smuzhiyun static int set_recover_size(struct gfs2_sbd *sdp, struct dlm_slot *slots,
1052*4882a593Smuzhiyun 			    int num_slots)
1053*4882a593Smuzhiyun {
1054*4882a593Smuzhiyun 	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
1055*4882a593Smuzhiyun 	uint32_t *submit = NULL;
1056*4882a593Smuzhiyun 	uint32_t *result = NULL;
1057*4882a593Smuzhiyun 	uint32_t old_size, new_size;
1058*4882a593Smuzhiyun 	int i, max_jid;
1059*4882a593Smuzhiyun 
1060*4882a593Smuzhiyun 	if (!ls->ls_lvb_bits) {
1061*4882a593Smuzhiyun 		ls->ls_lvb_bits = kzalloc(GDLM_LVB_SIZE, GFP_NOFS);
1062*4882a593Smuzhiyun 		if (!ls->ls_lvb_bits)
1063*4882a593Smuzhiyun 			return -ENOMEM;
1064*4882a593Smuzhiyun 	}
1065*4882a593Smuzhiyun 
1066*4882a593Smuzhiyun 	max_jid = 0;
1067*4882a593Smuzhiyun 	for (i = 0; i < num_slots; i++) {
1068*4882a593Smuzhiyun 		if (max_jid < slots[i].slot - 1)
1069*4882a593Smuzhiyun 			max_jid = slots[i].slot - 1;
1070*4882a593Smuzhiyun 	}
1071*4882a593Smuzhiyun 
1072*4882a593Smuzhiyun 	old_size = ls->ls_recover_size;
1073*4882a593Smuzhiyun 	new_size = old_size;
1074*4882a593Smuzhiyun 	while (new_size < max_jid + 1)
1075*4882a593Smuzhiyun 		new_size += RECOVER_SIZE_INC;
1076*4882a593Smuzhiyun 	if (new_size == old_size)
1077*4882a593Smuzhiyun 		return 0;
1078*4882a593Smuzhiyun 
1079*4882a593Smuzhiyun 	submit = kcalloc(new_size, sizeof(uint32_t), GFP_NOFS);
1080*4882a593Smuzhiyun 	result = kcalloc(new_size, sizeof(uint32_t), GFP_NOFS);
1081*4882a593Smuzhiyun 	if (!submit || !result) {
1082*4882a593Smuzhiyun 		kfree(submit);
1083*4882a593Smuzhiyun 		kfree(result);
1084*4882a593Smuzhiyun 		return -ENOMEM;
1085*4882a593Smuzhiyun 	}
1086*4882a593Smuzhiyun 
1087*4882a593Smuzhiyun 	spin_lock(&ls->ls_recover_spin);
1088*4882a593Smuzhiyun 	memcpy(submit, ls->ls_recover_submit, old_size * sizeof(uint32_t));
1089*4882a593Smuzhiyun 	memcpy(result, ls->ls_recover_result, old_size * sizeof(uint32_t));
1090*4882a593Smuzhiyun 	kfree(ls->ls_recover_submit);
1091*4882a593Smuzhiyun 	kfree(ls->ls_recover_result);
1092*4882a593Smuzhiyun 	ls->ls_recover_submit = submit;
1093*4882a593Smuzhiyun 	ls->ls_recover_result = result;
1094*4882a593Smuzhiyun 	ls->ls_recover_size = new_size;
1095*4882a593Smuzhiyun 	spin_unlock(&ls->ls_recover_spin);
1096*4882a593Smuzhiyun 	return 0;
1097*4882a593Smuzhiyun }
1098*4882a593Smuzhiyun 
free_recover_size(struct lm_lockstruct * ls)1099*4882a593Smuzhiyun static void free_recover_size(struct lm_lockstruct *ls)
1100*4882a593Smuzhiyun {
1101*4882a593Smuzhiyun 	kfree(ls->ls_lvb_bits);
1102*4882a593Smuzhiyun 	kfree(ls->ls_recover_submit);
1103*4882a593Smuzhiyun 	kfree(ls->ls_recover_result);
1104*4882a593Smuzhiyun 	ls->ls_recover_submit = NULL;
1105*4882a593Smuzhiyun 	ls->ls_recover_result = NULL;
1106*4882a593Smuzhiyun 	ls->ls_recover_size = 0;
1107*4882a593Smuzhiyun 	ls->ls_lvb_bits = NULL;
1108*4882a593Smuzhiyun }
1109*4882a593Smuzhiyun 
1110*4882a593Smuzhiyun /* dlm calls before it does lock recovery */
1111*4882a593Smuzhiyun 
gdlm_recover_prep(void * arg)1112*4882a593Smuzhiyun static void gdlm_recover_prep(void *arg)
1113*4882a593Smuzhiyun {
1114*4882a593Smuzhiyun 	struct gfs2_sbd *sdp = arg;
1115*4882a593Smuzhiyun 	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
1116*4882a593Smuzhiyun 
1117*4882a593Smuzhiyun 	if (gfs2_withdrawn(sdp)) {
1118*4882a593Smuzhiyun 		fs_err(sdp, "recover_prep ignored due to withdraw.\n");
1119*4882a593Smuzhiyun 		return;
1120*4882a593Smuzhiyun 	}
1121*4882a593Smuzhiyun 	spin_lock(&ls->ls_recover_spin);
1122*4882a593Smuzhiyun 	ls->ls_recover_block = ls->ls_recover_start;
1123*4882a593Smuzhiyun 	set_bit(DFL_DLM_RECOVERY, &ls->ls_recover_flags);
1124*4882a593Smuzhiyun 
1125*4882a593Smuzhiyun 	if (!test_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags) ||
1126*4882a593Smuzhiyun 	     test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) {
1127*4882a593Smuzhiyun 		spin_unlock(&ls->ls_recover_spin);
1128*4882a593Smuzhiyun 		return;
1129*4882a593Smuzhiyun 	}
1130*4882a593Smuzhiyun 	set_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags);
1131*4882a593Smuzhiyun 	spin_unlock(&ls->ls_recover_spin);
1132*4882a593Smuzhiyun }
1133*4882a593Smuzhiyun 
1134*4882a593Smuzhiyun /* dlm calls after recover_prep has been completed on all lockspace members;
1135*4882a593Smuzhiyun    identifies slot/jid of failed member */
1136*4882a593Smuzhiyun 
gdlm_recover_slot(void * arg,struct dlm_slot * slot)1137*4882a593Smuzhiyun static void gdlm_recover_slot(void *arg, struct dlm_slot *slot)
1138*4882a593Smuzhiyun {
1139*4882a593Smuzhiyun 	struct gfs2_sbd *sdp = arg;
1140*4882a593Smuzhiyun 	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
1141*4882a593Smuzhiyun 	int jid = slot->slot - 1;
1142*4882a593Smuzhiyun 
1143*4882a593Smuzhiyun 	if (gfs2_withdrawn(sdp)) {
1144*4882a593Smuzhiyun 		fs_err(sdp, "recover_slot jid %d ignored due to withdraw.\n",
1145*4882a593Smuzhiyun 		       jid);
1146*4882a593Smuzhiyun 		return;
1147*4882a593Smuzhiyun 	}
1148*4882a593Smuzhiyun 	spin_lock(&ls->ls_recover_spin);
1149*4882a593Smuzhiyun 	if (ls->ls_recover_size < jid + 1) {
1150*4882a593Smuzhiyun 		fs_err(sdp, "recover_slot jid %d gen %u short size %d\n",
1151*4882a593Smuzhiyun 		       jid, ls->ls_recover_block, ls->ls_recover_size);
1152*4882a593Smuzhiyun 		spin_unlock(&ls->ls_recover_spin);
1153*4882a593Smuzhiyun 		return;
1154*4882a593Smuzhiyun 	}
1155*4882a593Smuzhiyun 
1156*4882a593Smuzhiyun 	if (ls->ls_recover_submit[jid]) {
1157*4882a593Smuzhiyun 		fs_info(sdp, "recover_slot jid %d gen %u prev %u\n",
1158*4882a593Smuzhiyun 			jid, ls->ls_recover_block, ls->ls_recover_submit[jid]);
1159*4882a593Smuzhiyun 	}
1160*4882a593Smuzhiyun 	ls->ls_recover_submit[jid] = ls->ls_recover_block;
1161*4882a593Smuzhiyun 	spin_unlock(&ls->ls_recover_spin);
1162*4882a593Smuzhiyun }
1163*4882a593Smuzhiyun 
1164*4882a593Smuzhiyun /* dlm calls after recover_slot and after it completes lock recovery */
1165*4882a593Smuzhiyun 
gdlm_recover_done(void * arg,struct dlm_slot * slots,int num_slots,int our_slot,uint32_t generation)1166*4882a593Smuzhiyun static void gdlm_recover_done(void *arg, struct dlm_slot *slots, int num_slots,
1167*4882a593Smuzhiyun 			      int our_slot, uint32_t generation)
1168*4882a593Smuzhiyun {
1169*4882a593Smuzhiyun 	struct gfs2_sbd *sdp = arg;
1170*4882a593Smuzhiyun 	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
1171*4882a593Smuzhiyun 
1172*4882a593Smuzhiyun 	if (gfs2_withdrawn(sdp)) {
1173*4882a593Smuzhiyun 		fs_err(sdp, "recover_done ignored due to withdraw.\n");
1174*4882a593Smuzhiyun 		return;
1175*4882a593Smuzhiyun 	}
1176*4882a593Smuzhiyun 	/* ensure the ls jid arrays are large enough */
1177*4882a593Smuzhiyun 	set_recover_size(sdp, slots, num_slots);
1178*4882a593Smuzhiyun 
1179*4882a593Smuzhiyun 	spin_lock(&ls->ls_recover_spin);
1180*4882a593Smuzhiyun 	ls->ls_recover_start = generation;
1181*4882a593Smuzhiyun 
1182*4882a593Smuzhiyun 	if (!ls->ls_recover_mount) {
1183*4882a593Smuzhiyun 		ls->ls_recover_mount = generation;
1184*4882a593Smuzhiyun 		ls->ls_jid = our_slot - 1;
1185*4882a593Smuzhiyun 	}
1186*4882a593Smuzhiyun 
1187*4882a593Smuzhiyun 	if (!test_bit(DFL_UNMOUNT, &ls->ls_recover_flags))
1188*4882a593Smuzhiyun 		queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work, 0);
1189*4882a593Smuzhiyun 
1190*4882a593Smuzhiyun 	clear_bit(DFL_DLM_RECOVERY, &ls->ls_recover_flags);
1191*4882a593Smuzhiyun 	smp_mb__after_atomic();
1192*4882a593Smuzhiyun 	wake_up_bit(&ls->ls_recover_flags, DFL_DLM_RECOVERY);
1193*4882a593Smuzhiyun 	spin_unlock(&ls->ls_recover_spin);
1194*4882a593Smuzhiyun }
1195*4882a593Smuzhiyun 
1196*4882a593Smuzhiyun /* gfs2_recover thread has a journal recovery result */
1197*4882a593Smuzhiyun 
gdlm_recovery_result(struct gfs2_sbd * sdp,unsigned int jid,unsigned int result)1198*4882a593Smuzhiyun static void gdlm_recovery_result(struct gfs2_sbd *sdp, unsigned int jid,
1199*4882a593Smuzhiyun 				 unsigned int result)
1200*4882a593Smuzhiyun {
1201*4882a593Smuzhiyun 	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
1202*4882a593Smuzhiyun 
1203*4882a593Smuzhiyun 	if (gfs2_withdrawn(sdp)) {
1204*4882a593Smuzhiyun 		fs_err(sdp, "recovery_result jid %d ignored due to withdraw.\n",
1205*4882a593Smuzhiyun 		       jid);
1206*4882a593Smuzhiyun 		return;
1207*4882a593Smuzhiyun 	}
1208*4882a593Smuzhiyun 	if (test_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags))
1209*4882a593Smuzhiyun 		return;
1210*4882a593Smuzhiyun 
1211*4882a593Smuzhiyun 	/* don't care about the recovery of own journal during mount */
1212*4882a593Smuzhiyun 	if (jid == ls->ls_jid)
1213*4882a593Smuzhiyun 		return;
1214*4882a593Smuzhiyun 
1215*4882a593Smuzhiyun 	spin_lock(&ls->ls_recover_spin);
1216*4882a593Smuzhiyun 	if (test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) {
1217*4882a593Smuzhiyun 		spin_unlock(&ls->ls_recover_spin);
1218*4882a593Smuzhiyun 		return;
1219*4882a593Smuzhiyun 	}
1220*4882a593Smuzhiyun 	if (ls->ls_recover_size < jid + 1) {
1221*4882a593Smuzhiyun 		fs_err(sdp, "recovery_result jid %d short size %d\n",
1222*4882a593Smuzhiyun 		       jid, ls->ls_recover_size);
1223*4882a593Smuzhiyun 		spin_unlock(&ls->ls_recover_spin);
1224*4882a593Smuzhiyun 		return;
1225*4882a593Smuzhiyun 	}
1226*4882a593Smuzhiyun 
1227*4882a593Smuzhiyun 	fs_info(sdp, "recover jid %d result %s\n", jid,
1228*4882a593Smuzhiyun 		result == LM_RD_GAVEUP ? "busy" : "success");
1229*4882a593Smuzhiyun 
1230*4882a593Smuzhiyun 	ls->ls_recover_result[jid] = result;
1231*4882a593Smuzhiyun 
1232*4882a593Smuzhiyun 	/* GAVEUP means another node is recovering the journal; delay our
1233*4882a593Smuzhiyun 	   next attempt to recover it, to give the other node a chance to
1234*4882a593Smuzhiyun 	   finish before trying again */
1235*4882a593Smuzhiyun 
1236*4882a593Smuzhiyun 	if (!test_bit(DFL_UNMOUNT, &ls->ls_recover_flags))
1237*4882a593Smuzhiyun 		queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work,
1238*4882a593Smuzhiyun 				   result == LM_RD_GAVEUP ? HZ : 0);
1239*4882a593Smuzhiyun 	spin_unlock(&ls->ls_recover_spin);
1240*4882a593Smuzhiyun }
1241*4882a593Smuzhiyun 
1242*4882a593Smuzhiyun static const struct dlm_lockspace_ops gdlm_lockspace_ops = {
1243*4882a593Smuzhiyun 	.recover_prep = gdlm_recover_prep,
1244*4882a593Smuzhiyun 	.recover_slot = gdlm_recover_slot,
1245*4882a593Smuzhiyun 	.recover_done = gdlm_recover_done,
1246*4882a593Smuzhiyun };
1247*4882a593Smuzhiyun 
gdlm_mount(struct gfs2_sbd * sdp,const char * table)1248*4882a593Smuzhiyun static int gdlm_mount(struct gfs2_sbd *sdp, const char *table)
1249*4882a593Smuzhiyun {
1250*4882a593Smuzhiyun 	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
1251*4882a593Smuzhiyun 	char cluster[GFS2_LOCKNAME_LEN];
1252*4882a593Smuzhiyun 	const char *fsname;
1253*4882a593Smuzhiyun 	uint32_t flags;
1254*4882a593Smuzhiyun 	int error, ops_result;
1255*4882a593Smuzhiyun 
1256*4882a593Smuzhiyun 	/*
1257*4882a593Smuzhiyun 	 * initialize everything
1258*4882a593Smuzhiyun 	 */
1259*4882a593Smuzhiyun 
1260*4882a593Smuzhiyun 	INIT_DELAYED_WORK(&sdp->sd_control_work, gfs2_control_func);
1261*4882a593Smuzhiyun 	spin_lock_init(&ls->ls_recover_spin);
1262*4882a593Smuzhiyun 	ls->ls_recover_flags = 0;
1263*4882a593Smuzhiyun 	ls->ls_recover_mount = 0;
1264*4882a593Smuzhiyun 	ls->ls_recover_start = 0;
1265*4882a593Smuzhiyun 	ls->ls_recover_block = 0;
1266*4882a593Smuzhiyun 	ls->ls_recover_size = 0;
1267*4882a593Smuzhiyun 	ls->ls_recover_submit = NULL;
1268*4882a593Smuzhiyun 	ls->ls_recover_result = NULL;
1269*4882a593Smuzhiyun 	ls->ls_lvb_bits = NULL;
1270*4882a593Smuzhiyun 
1271*4882a593Smuzhiyun 	error = set_recover_size(sdp, NULL, 0);
1272*4882a593Smuzhiyun 	if (error)
1273*4882a593Smuzhiyun 		goto fail;
1274*4882a593Smuzhiyun 
1275*4882a593Smuzhiyun 	/*
1276*4882a593Smuzhiyun 	 * prepare dlm_new_lockspace args
1277*4882a593Smuzhiyun 	 */
1278*4882a593Smuzhiyun 
1279*4882a593Smuzhiyun 	fsname = strchr(table, ':');
1280*4882a593Smuzhiyun 	if (!fsname) {
1281*4882a593Smuzhiyun 		fs_info(sdp, "no fsname found\n");
1282*4882a593Smuzhiyun 		error = -EINVAL;
1283*4882a593Smuzhiyun 		goto fail_free;
1284*4882a593Smuzhiyun 	}
1285*4882a593Smuzhiyun 	memset(cluster, 0, sizeof(cluster));
1286*4882a593Smuzhiyun 	memcpy(cluster, table, strlen(table) - strlen(fsname));
1287*4882a593Smuzhiyun 	fsname++;
1288*4882a593Smuzhiyun 
1289*4882a593Smuzhiyun 	flags = DLM_LSFL_FS | DLM_LSFL_NEWEXCL;
1290*4882a593Smuzhiyun 
1291*4882a593Smuzhiyun 	/*
1292*4882a593Smuzhiyun 	 * create/join lockspace
1293*4882a593Smuzhiyun 	 */
1294*4882a593Smuzhiyun 
1295*4882a593Smuzhiyun 	error = dlm_new_lockspace(fsname, cluster, flags, GDLM_LVB_SIZE,
1296*4882a593Smuzhiyun 				  &gdlm_lockspace_ops, sdp, &ops_result,
1297*4882a593Smuzhiyun 				  &ls->ls_dlm);
1298*4882a593Smuzhiyun 	if (error) {
1299*4882a593Smuzhiyun 		fs_err(sdp, "dlm_new_lockspace error %d\n", error);
1300*4882a593Smuzhiyun 		goto fail_free;
1301*4882a593Smuzhiyun 	}
1302*4882a593Smuzhiyun 
1303*4882a593Smuzhiyun 	if (ops_result < 0) {
1304*4882a593Smuzhiyun 		/*
1305*4882a593Smuzhiyun 		 * dlm does not support ops callbacks,
1306*4882a593Smuzhiyun 		 * old dlm_controld/gfs_controld are used, try without ops.
1307*4882a593Smuzhiyun 		 */
1308*4882a593Smuzhiyun 		fs_info(sdp, "dlm lockspace ops not used\n");
1309*4882a593Smuzhiyun 		free_recover_size(ls);
1310*4882a593Smuzhiyun 		set_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags);
1311*4882a593Smuzhiyun 		return 0;
1312*4882a593Smuzhiyun 	}
1313*4882a593Smuzhiyun 
1314*4882a593Smuzhiyun 	if (!test_bit(SDF_NOJOURNALID, &sdp->sd_flags)) {
1315*4882a593Smuzhiyun 		fs_err(sdp, "dlm lockspace ops disallow jid preset\n");
1316*4882a593Smuzhiyun 		error = -EINVAL;
1317*4882a593Smuzhiyun 		goto fail_release;
1318*4882a593Smuzhiyun 	}
1319*4882a593Smuzhiyun 
1320*4882a593Smuzhiyun 	/*
1321*4882a593Smuzhiyun 	 * control_mount() uses control_lock to determine first mounter,
1322*4882a593Smuzhiyun 	 * and for later mounts, waits for any recoveries to be cleared.
1323*4882a593Smuzhiyun 	 */
1324*4882a593Smuzhiyun 
1325*4882a593Smuzhiyun 	error = control_mount(sdp);
1326*4882a593Smuzhiyun 	if (error) {
1327*4882a593Smuzhiyun 		fs_err(sdp, "mount control error %d\n", error);
1328*4882a593Smuzhiyun 		goto fail_release;
1329*4882a593Smuzhiyun 	}
1330*4882a593Smuzhiyun 
1331*4882a593Smuzhiyun 	ls->ls_first = !!test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags);
1332*4882a593Smuzhiyun 	clear_bit(SDF_NOJOURNALID, &sdp->sd_flags);
1333*4882a593Smuzhiyun 	smp_mb__after_atomic();
1334*4882a593Smuzhiyun 	wake_up_bit(&sdp->sd_flags, SDF_NOJOURNALID);
1335*4882a593Smuzhiyun 	return 0;
1336*4882a593Smuzhiyun 
1337*4882a593Smuzhiyun fail_release:
1338*4882a593Smuzhiyun 	dlm_release_lockspace(ls->ls_dlm, 2);
1339*4882a593Smuzhiyun fail_free:
1340*4882a593Smuzhiyun 	free_recover_size(ls);
1341*4882a593Smuzhiyun fail:
1342*4882a593Smuzhiyun 	return error;
1343*4882a593Smuzhiyun }
1344*4882a593Smuzhiyun 
gdlm_first_done(struct gfs2_sbd * sdp)1345*4882a593Smuzhiyun static void gdlm_first_done(struct gfs2_sbd *sdp)
1346*4882a593Smuzhiyun {
1347*4882a593Smuzhiyun 	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
1348*4882a593Smuzhiyun 	int error;
1349*4882a593Smuzhiyun 
1350*4882a593Smuzhiyun 	if (test_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags))
1351*4882a593Smuzhiyun 		return;
1352*4882a593Smuzhiyun 
1353*4882a593Smuzhiyun 	error = control_first_done(sdp);
1354*4882a593Smuzhiyun 	if (error)
1355*4882a593Smuzhiyun 		fs_err(sdp, "mount first_done error %d\n", error);
1356*4882a593Smuzhiyun }
1357*4882a593Smuzhiyun 
gdlm_unmount(struct gfs2_sbd * sdp)1358*4882a593Smuzhiyun static void gdlm_unmount(struct gfs2_sbd *sdp)
1359*4882a593Smuzhiyun {
1360*4882a593Smuzhiyun 	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
1361*4882a593Smuzhiyun 
1362*4882a593Smuzhiyun 	if (test_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags))
1363*4882a593Smuzhiyun 		goto release;
1364*4882a593Smuzhiyun 
1365*4882a593Smuzhiyun 	/* wait for gfs2_control_wq to be done with this mount */
1366*4882a593Smuzhiyun 
1367*4882a593Smuzhiyun 	spin_lock(&ls->ls_recover_spin);
1368*4882a593Smuzhiyun 	set_bit(DFL_UNMOUNT, &ls->ls_recover_flags);
1369*4882a593Smuzhiyun 	spin_unlock(&ls->ls_recover_spin);
1370*4882a593Smuzhiyun 	flush_delayed_work(&sdp->sd_control_work);
1371*4882a593Smuzhiyun 
1372*4882a593Smuzhiyun 	/* mounted_lock and control_lock will be purged in dlm recovery */
1373*4882a593Smuzhiyun release:
1374*4882a593Smuzhiyun 	if (ls->ls_dlm) {
1375*4882a593Smuzhiyun 		dlm_release_lockspace(ls->ls_dlm, 2);
1376*4882a593Smuzhiyun 		ls->ls_dlm = NULL;
1377*4882a593Smuzhiyun 	}
1378*4882a593Smuzhiyun 
1379*4882a593Smuzhiyun 	free_recover_size(ls);
1380*4882a593Smuzhiyun }
1381*4882a593Smuzhiyun 
1382*4882a593Smuzhiyun static const match_table_t dlm_tokens = {
1383*4882a593Smuzhiyun 	{ Opt_jid, "jid=%d"},
1384*4882a593Smuzhiyun 	{ Opt_id, "id=%d"},
1385*4882a593Smuzhiyun 	{ Opt_first, "first=%d"},
1386*4882a593Smuzhiyun 	{ Opt_nodir, "nodir=%d"},
1387*4882a593Smuzhiyun 	{ Opt_err, NULL },
1388*4882a593Smuzhiyun };
1389*4882a593Smuzhiyun 
1390*4882a593Smuzhiyun const struct lm_lockops gfs2_dlm_ops = {
1391*4882a593Smuzhiyun 	.lm_proto_name = "lock_dlm",
1392*4882a593Smuzhiyun 	.lm_mount = gdlm_mount,
1393*4882a593Smuzhiyun 	.lm_first_done = gdlm_first_done,
1394*4882a593Smuzhiyun 	.lm_recovery_result = gdlm_recovery_result,
1395*4882a593Smuzhiyun 	.lm_unmount = gdlm_unmount,
1396*4882a593Smuzhiyun 	.lm_put_lock = gdlm_put_lock,
1397*4882a593Smuzhiyun 	.lm_lock = gdlm_lock,
1398*4882a593Smuzhiyun 	.lm_cancel = gdlm_cancel,
1399*4882a593Smuzhiyun 	.lm_tokens = &dlm_tokens,
1400*4882a593Smuzhiyun };
1401*4882a593Smuzhiyun 
1402