xref: /OK3568_Linux_fs/kernel/fs/ext4/mmp.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun #include <linux/fs.h>
3*4882a593Smuzhiyun #include <linux/random.h>
4*4882a593Smuzhiyun #include <linux/buffer_head.h>
5*4882a593Smuzhiyun #include <linux/utsname.h>
6*4882a593Smuzhiyun #include <linux/kthread.h>
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include "ext4.h"
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun /* Checksumming functions */
ext4_mmp_csum(struct super_block * sb,struct mmp_struct * mmp)11*4882a593Smuzhiyun static __le32 ext4_mmp_csum(struct super_block *sb, struct mmp_struct *mmp)
12*4882a593Smuzhiyun {
13*4882a593Smuzhiyun 	struct ext4_sb_info *sbi = EXT4_SB(sb);
14*4882a593Smuzhiyun 	int offset = offsetof(struct mmp_struct, mmp_checksum);
15*4882a593Smuzhiyun 	__u32 csum;
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun 	csum = ext4_chksum(sbi, sbi->s_csum_seed, (char *)mmp, offset);
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun 	return cpu_to_le32(csum);
20*4882a593Smuzhiyun }
21*4882a593Smuzhiyun 
ext4_mmp_csum_verify(struct super_block * sb,struct mmp_struct * mmp)22*4882a593Smuzhiyun static int ext4_mmp_csum_verify(struct super_block *sb, struct mmp_struct *mmp)
23*4882a593Smuzhiyun {
24*4882a593Smuzhiyun 	if (!ext4_has_metadata_csum(sb))
25*4882a593Smuzhiyun 		return 1;
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun 	return mmp->mmp_checksum == ext4_mmp_csum(sb, mmp);
28*4882a593Smuzhiyun }
29*4882a593Smuzhiyun 
ext4_mmp_csum_set(struct super_block * sb,struct mmp_struct * mmp)30*4882a593Smuzhiyun static void ext4_mmp_csum_set(struct super_block *sb, struct mmp_struct *mmp)
31*4882a593Smuzhiyun {
32*4882a593Smuzhiyun 	if (!ext4_has_metadata_csum(sb))
33*4882a593Smuzhiyun 		return;
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun 	mmp->mmp_checksum = ext4_mmp_csum(sb, mmp);
36*4882a593Smuzhiyun }
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun /*
39*4882a593Smuzhiyun  * Write the MMP block using REQ_SYNC to try to get the block on-disk
40*4882a593Smuzhiyun  * faster.
41*4882a593Smuzhiyun  */
write_mmp_block(struct super_block * sb,struct buffer_head * bh)42*4882a593Smuzhiyun static int write_mmp_block(struct super_block *sb, struct buffer_head *bh)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun 	struct mmp_struct *mmp = (struct mmp_struct *)(bh->b_data);
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun 	/*
47*4882a593Smuzhiyun 	 * We protect against freezing so that we don't create dirty buffers
48*4882a593Smuzhiyun 	 * on frozen filesystem.
49*4882a593Smuzhiyun 	 */
50*4882a593Smuzhiyun 	sb_start_write(sb);
51*4882a593Smuzhiyun 	ext4_mmp_csum_set(sb, mmp);
52*4882a593Smuzhiyun 	lock_buffer(bh);
53*4882a593Smuzhiyun 	bh->b_end_io = end_buffer_write_sync;
54*4882a593Smuzhiyun 	get_bh(bh);
55*4882a593Smuzhiyun 	submit_bh(REQ_OP_WRITE, REQ_SYNC | REQ_META | REQ_PRIO, bh);
56*4882a593Smuzhiyun 	wait_on_buffer(bh);
57*4882a593Smuzhiyun 	sb_end_write(sb);
58*4882a593Smuzhiyun 	if (unlikely(!buffer_uptodate(bh)))
59*4882a593Smuzhiyun 		return -EIO;
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun 	return 0;
62*4882a593Smuzhiyun }
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun /*
65*4882a593Smuzhiyun  * Read the MMP block. It _must_ be read from disk and hence we clear the
66*4882a593Smuzhiyun  * uptodate flag on the buffer.
67*4882a593Smuzhiyun  */
read_mmp_block(struct super_block * sb,struct buffer_head ** bh,ext4_fsblk_t mmp_block)68*4882a593Smuzhiyun static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
69*4882a593Smuzhiyun 			  ext4_fsblk_t mmp_block)
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun 	struct mmp_struct *mmp;
72*4882a593Smuzhiyun 	int ret;
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 	if (*bh)
75*4882a593Smuzhiyun 		clear_buffer_uptodate(*bh);
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	/* This would be sb_bread(sb, mmp_block), except we need to be sure
78*4882a593Smuzhiyun 	 * that the MD RAID device cache has been bypassed, and that the read
79*4882a593Smuzhiyun 	 * is not blocked in the elevator. */
80*4882a593Smuzhiyun 	if (!*bh) {
81*4882a593Smuzhiyun 		*bh = sb_getblk(sb, mmp_block);
82*4882a593Smuzhiyun 		if (!*bh) {
83*4882a593Smuzhiyun 			ret = -ENOMEM;
84*4882a593Smuzhiyun 			goto warn_exit;
85*4882a593Smuzhiyun 		}
86*4882a593Smuzhiyun 	}
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 	lock_buffer(*bh);
89*4882a593Smuzhiyun 	ret = ext4_read_bh(*bh, REQ_META | REQ_PRIO, NULL);
90*4882a593Smuzhiyun 	if (ret)
91*4882a593Smuzhiyun 		goto warn_exit;
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	mmp = (struct mmp_struct *)((*bh)->b_data);
94*4882a593Smuzhiyun 	if (le32_to_cpu(mmp->mmp_magic) != EXT4_MMP_MAGIC) {
95*4882a593Smuzhiyun 		ret = -EFSCORRUPTED;
96*4882a593Smuzhiyun 		goto warn_exit;
97*4882a593Smuzhiyun 	}
98*4882a593Smuzhiyun 	if (!ext4_mmp_csum_verify(sb, mmp)) {
99*4882a593Smuzhiyun 		ret = -EFSBADCRC;
100*4882a593Smuzhiyun 		goto warn_exit;
101*4882a593Smuzhiyun 	}
102*4882a593Smuzhiyun 	return 0;
103*4882a593Smuzhiyun warn_exit:
104*4882a593Smuzhiyun 	brelse(*bh);
105*4882a593Smuzhiyun 	*bh = NULL;
106*4882a593Smuzhiyun 	ext4_warning(sb, "Error %d while reading MMP block %llu",
107*4882a593Smuzhiyun 		     ret, mmp_block);
108*4882a593Smuzhiyun 	return ret;
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun /*
112*4882a593Smuzhiyun  * Dump as much information as possible to help the admin.
113*4882a593Smuzhiyun  */
__dump_mmp_msg(struct super_block * sb,struct mmp_struct * mmp,const char * function,unsigned int line,const char * msg)114*4882a593Smuzhiyun void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
115*4882a593Smuzhiyun 		    const char *function, unsigned int line, const char *msg)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun 	__ext4_warning(sb, function, line, "%s", msg);
118*4882a593Smuzhiyun 	__ext4_warning(sb, function, line,
119*4882a593Smuzhiyun 		       "MMP failure info: last update time: %llu, last update node: %.*s, last update device: %.*s",
120*4882a593Smuzhiyun 		       (unsigned long long)le64_to_cpu(mmp->mmp_time),
121*4882a593Smuzhiyun 		       (int)sizeof(mmp->mmp_nodename), mmp->mmp_nodename,
122*4882a593Smuzhiyun 		       (int)sizeof(mmp->mmp_bdevname), mmp->mmp_bdevname);
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun /*
126*4882a593Smuzhiyun  * kmmpd will update the MMP sequence every s_mmp_update_interval seconds
127*4882a593Smuzhiyun  */
kmmpd(void * data)128*4882a593Smuzhiyun static int kmmpd(void *data)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun 	struct super_block *sb = (struct super_block *) data;
131*4882a593Smuzhiyun 	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
132*4882a593Smuzhiyun 	struct buffer_head *bh = EXT4_SB(sb)->s_mmp_bh;
133*4882a593Smuzhiyun 	struct mmp_struct *mmp;
134*4882a593Smuzhiyun 	ext4_fsblk_t mmp_block;
135*4882a593Smuzhiyun 	u32 seq = 0;
136*4882a593Smuzhiyun 	unsigned long failed_writes = 0;
137*4882a593Smuzhiyun 	int mmp_update_interval = le16_to_cpu(es->s_mmp_update_interval);
138*4882a593Smuzhiyun 	unsigned mmp_check_interval;
139*4882a593Smuzhiyun 	unsigned long last_update_time;
140*4882a593Smuzhiyun 	unsigned long diff;
141*4882a593Smuzhiyun 	int retval;
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	mmp_block = le64_to_cpu(es->s_mmp_block);
144*4882a593Smuzhiyun 	mmp = (struct mmp_struct *)(bh->b_data);
145*4882a593Smuzhiyun 	mmp->mmp_time = cpu_to_le64(ktime_get_real_seconds());
146*4882a593Smuzhiyun 	/*
147*4882a593Smuzhiyun 	 * Start with the higher mmp_check_interval and reduce it if
148*4882a593Smuzhiyun 	 * the MMP block is being updated on time.
149*4882a593Smuzhiyun 	 */
150*4882a593Smuzhiyun 	mmp_check_interval = max(EXT4_MMP_CHECK_MULT * mmp_update_interval,
151*4882a593Smuzhiyun 				 EXT4_MMP_MIN_CHECK_INTERVAL);
152*4882a593Smuzhiyun 	mmp->mmp_check_interval = cpu_to_le16(mmp_check_interval);
153*4882a593Smuzhiyun 	BUILD_BUG_ON(sizeof(mmp->mmp_bdevname) < BDEVNAME_SIZE);
154*4882a593Smuzhiyun 	bdevname(bh->b_bdev, mmp->mmp_bdevname);
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 	memcpy(mmp->mmp_nodename, init_utsname()->nodename,
157*4882a593Smuzhiyun 	       sizeof(mmp->mmp_nodename));
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	while (!kthread_should_stop() && !sb_rdonly(sb)) {
160*4882a593Smuzhiyun 		if (!ext4_has_feature_mmp(sb)) {
161*4882a593Smuzhiyun 			ext4_warning(sb, "kmmpd being stopped since MMP feature"
162*4882a593Smuzhiyun 				     " has been disabled.");
163*4882a593Smuzhiyun 			goto wait_to_exit;
164*4882a593Smuzhiyun 		}
165*4882a593Smuzhiyun 		if (++seq > EXT4_MMP_SEQ_MAX)
166*4882a593Smuzhiyun 			seq = 1;
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 		mmp->mmp_seq = cpu_to_le32(seq);
169*4882a593Smuzhiyun 		mmp->mmp_time = cpu_to_le64(ktime_get_real_seconds());
170*4882a593Smuzhiyun 		last_update_time = jiffies;
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 		retval = write_mmp_block(sb, bh);
173*4882a593Smuzhiyun 		/*
174*4882a593Smuzhiyun 		 * Don't spew too many error messages. Print one every
175*4882a593Smuzhiyun 		 * (s_mmp_update_interval * 60) seconds.
176*4882a593Smuzhiyun 		 */
177*4882a593Smuzhiyun 		if (retval) {
178*4882a593Smuzhiyun 			if ((failed_writes % 60) == 0) {
179*4882a593Smuzhiyun 				ext4_error_err(sb, -retval,
180*4882a593Smuzhiyun 					       "Error writing to MMP block");
181*4882a593Smuzhiyun 			}
182*4882a593Smuzhiyun 			failed_writes++;
183*4882a593Smuzhiyun 		}
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 		diff = jiffies - last_update_time;
186*4882a593Smuzhiyun 		if (diff < mmp_update_interval * HZ)
187*4882a593Smuzhiyun 			schedule_timeout_interruptible(mmp_update_interval *
188*4882a593Smuzhiyun 						       HZ - diff);
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 		/*
191*4882a593Smuzhiyun 		 * We need to make sure that more than mmp_check_interval
192*4882a593Smuzhiyun 		 * seconds have not passed since writing. If that has happened
193*4882a593Smuzhiyun 		 * we need to check if the MMP block is as we left it.
194*4882a593Smuzhiyun 		 */
195*4882a593Smuzhiyun 		diff = jiffies - last_update_time;
196*4882a593Smuzhiyun 		if (diff > mmp_check_interval * HZ) {
197*4882a593Smuzhiyun 			struct buffer_head *bh_check = NULL;
198*4882a593Smuzhiyun 			struct mmp_struct *mmp_check;
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 			retval = read_mmp_block(sb, &bh_check, mmp_block);
201*4882a593Smuzhiyun 			if (retval) {
202*4882a593Smuzhiyun 				ext4_error_err(sb, -retval,
203*4882a593Smuzhiyun 					       "error reading MMP data: %d",
204*4882a593Smuzhiyun 					       retval);
205*4882a593Smuzhiyun 				goto wait_to_exit;
206*4882a593Smuzhiyun 			}
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 			mmp_check = (struct mmp_struct *)(bh_check->b_data);
209*4882a593Smuzhiyun 			if (mmp->mmp_seq != mmp_check->mmp_seq ||
210*4882a593Smuzhiyun 			    memcmp(mmp->mmp_nodename, mmp_check->mmp_nodename,
211*4882a593Smuzhiyun 				   sizeof(mmp->mmp_nodename))) {
212*4882a593Smuzhiyun 				dump_mmp_msg(sb, mmp_check,
213*4882a593Smuzhiyun 					     "Error while updating MMP info. "
214*4882a593Smuzhiyun 					     "The filesystem seems to have been"
215*4882a593Smuzhiyun 					     " multiply mounted.");
216*4882a593Smuzhiyun 				ext4_error_err(sb, EBUSY, "abort");
217*4882a593Smuzhiyun 				put_bh(bh_check);
218*4882a593Smuzhiyun 				retval = -EBUSY;
219*4882a593Smuzhiyun 				goto wait_to_exit;
220*4882a593Smuzhiyun 			}
221*4882a593Smuzhiyun 			put_bh(bh_check);
222*4882a593Smuzhiyun 		}
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 		 /*
225*4882a593Smuzhiyun 		 * Adjust the mmp_check_interval depending on how much time
226*4882a593Smuzhiyun 		 * it took for the MMP block to be written.
227*4882a593Smuzhiyun 		 */
228*4882a593Smuzhiyun 		mmp_check_interval = max(min(EXT4_MMP_CHECK_MULT * diff / HZ,
229*4882a593Smuzhiyun 					     EXT4_MMP_MAX_CHECK_INTERVAL),
230*4882a593Smuzhiyun 					 EXT4_MMP_MIN_CHECK_INTERVAL);
231*4882a593Smuzhiyun 		mmp->mmp_check_interval = cpu_to_le16(mmp_check_interval);
232*4882a593Smuzhiyun 	}
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 	/*
235*4882a593Smuzhiyun 	 * Unmount seems to be clean.
236*4882a593Smuzhiyun 	 */
237*4882a593Smuzhiyun 	mmp->mmp_seq = cpu_to_le32(EXT4_MMP_SEQ_CLEAN);
238*4882a593Smuzhiyun 	mmp->mmp_time = cpu_to_le64(ktime_get_real_seconds());
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	retval = write_mmp_block(sb, bh);
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun wait_to_exit:
243*4882a593Smuzhiyun 	while (!kthread_should_stop()) {
244*4882a593Smuzhiyun 		set_current_state(TASK_INTERRUPTIBLE);
245*4882a593Smuzhiyun 		if (!kthread_should_stop())
246*4882a593Smuzhiyun 			schedule();
247*4882a593Smuzhiyun 	}
248*4882a593Smuzhiyun 	set_current_state(TASK_RUNNING);
249*4882a593Smuzhiyun 	return retval;
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun 
ext4_stop_mmpd(struct ext4_sb_info * sbi)252*4882a593Smuzhiyun void ext4_stop_mmpd(struct ext4_sb_info *sbi)
253*4882a593Smuzhiyun {
254*4882a593Smuzhiyun 	if (sbi->s_mmp_tsk) {
255*4882a593Smuzhiyun 		kthread_stop(sbi->s_mmp_tsk);
256*4882a593Smuzhiyun 		brelse(sbi->s_mmp_bh);
257*4882a593Smuzhiyun 		sbi->s_mmp_tsk = NULL;
258*4882a593Smuzhiyun 	}
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun /*
262*4882a593Smuzhiyun  * Get a random new sequence number but make sure it is not greater than
263*4882a593Smuzhiyun  * EXT4_MMP_SEQ_MAX.
264*4882a593Smuzhiyun  */
mmp_new_seq(void)265*4882a593Smuzhiyun static unsigned int mmp_new_seq(void)
266*4882a593Smuzhiyun {
267*4882a593Smuzhiyun 	u32 new_seq;
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	do {
270*4882a593Smuzhiyun 		new_seq = prandom_u32();
271*4882a593Smuzhiyun 	} while (new_seq > EXT4_MMP_SEQ_MAX);
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 	return new_seq;
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun /*
277*4882a593Smuzhiyun  * Protect the filesystem from being mounted more than once.
278*4882a593Smuzhiyun  */
ext4_multi_mount_protect(struct super_block * sb,ext4_fsblk_t mmp_block)279*4882a593Smuzhiyun int ext4_multi_mount_protect(struct super_block *sb,
280*4882a593Smuzhiyun 				    ext4_fsblk_t mmp_block)
281*4882a593Smuzhiyun {
282*4882a593Smuzhiyun 	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
283*4882a593Smuzhiyun 	struct buffer_head *bh = NULL;
284*4882a593Smuzhiyun 	struct mmp_struct *mmp = NULL;
285*4882a593Smuzhiyun 	u32 seq;
286*4882a593Smuzhiyun 	unsigned int mmp_check_interval = le16_to_cpu(es->s_mmp_update_interval);
287*4882a593Smuzhiyun 	unsigned int wait_time = 0;
288*4882a593Smuzhiyun 	int retval;
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun 	if (mmp_block < le32_to_cpu(es->s_first_data_block) ||
291*4882a593Smuzhiyun 	    mmp_block >= ext4_blocks_count(es)) {
292*4882a593Smuzhiyun 		ext4_warning(sb, "Invalid MMP block in superblock");
293*4882a593Smuzhiyun 		goto failed;
294*4882a593Smuzhiyun 	}
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	retval = read_mmp_block(sb, &bh, mmp_block);
297*4882a593Smuzhiyun 	if (retval)
298*4882a593Smuzhiyun 		goto failed;
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	mmp = (struct mmp_struct *)(bh->b_data);
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 	if (mmp_check_interval < EXT4_MMP_MIN_CHECK_INTERVAL)
303*4882a593Smuzhiyun 		mmp_check_interval = EXT4_MMP_MIN_CHECK_INTERVAL;
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 	/*
306*4882a593Smuzhiyun 	 * If check_interval in MMP block is larger, use that instead of
307*4882a593Smuzhiyun 	 * update_interval from the superblock.
308*4882a593Smuzhiyun 	 */
309*4882a593Smuzhiyun 	if (le16_to_cpu(mmp->mmp_check_interval) > mmp_check_interval)
310*4882a593Smuzhiyun 		mmp_check_interval = le16_to_cpu(mmp->mmp_check_interval);
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun 	seq = le32_to_cpu(mmp->mmp_seq);
313*4882a593Smuzhiyun 	if (seq == EXT4_MMP_SEQ_CLEAN)
314*4882a593Smuzhiyun 		goto skip;
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	if (seq == EXT4_MMP_SEQ_FSCK) {
317*4882a593Smuzhiyun 		dump_mmp_msg(sb, mmp, "fsck is running on the filesystem");
318*4882a593Smuzhiyun 		goto failed;
319*4882a593Smuzhiyun 	}
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 	wait_time = min(mmp_check_interval * 2 + 1,
322*4882a593Smuzhiyun 			mmp_check_interval + 60);
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 	/* Print MMP interval if more than 20 secs. */
325*4882a593Smuzhiyun 	if (wait_time > EXT4_MMP_MIN_CHECK_INTERVAL * 4)
326*4882a593Smuzhiyun 		ext4_warning(sb, "MMP interval %u higher than expected, please"
327*4882a593Smuzhiyun 			     " wait.\n", wait_time * 2);
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun 	if (schedule_timeout_interruptible(HZ * wait_time) != 0) {
330*4882a593Smuzhiyun 		ext4_warning(sb, "MMP startup interrupted, failing mount\n");
331*4882a593Smuzhiyun 		goto failed;
332*4882a593Smuzhiyun 	}
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun 	retval = read_mmp_block(sb, &bh, mmp_block);
335*4882a593Smuzhiyun 	if (retval)
336*4882a593Smuzhiyun 		goto failed;
337*4882a593Smuzhiyun 	mmp = (struct mmp_struct *)(bh->b_data);
338*4882a593Smuzhiyun 	if (seq != le32_to_cpu(mmp->mmp_seq)) {
339*4882a593Smuzhiyun 		dump_mmp_msg(sb, mmp,
340*4882a593Smuzhiyun 			     "Device is already active on another node.");
341*4882a593Smuzhiyun 		goto failed;
342*4882a593Smuzhiyun 	}
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun skip:
345*4882a593Smuzhiyun 	/*
346*4882a593Smuzhiyun 	 * write a new random sequence number.
347*4882a593Smuzhiyun 	 */
348*4882a593Smuzhiyun 	seq = mmp_new_seq();
349*4882a593Smuzhiyun 	mmp->mmp_seq = cpu_to_le32(seq);
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 	retval = write_mmp_block(sb, bh);
352*4882a593Smuzhiyun 	if (retval)
353*4882a593Smuzhiyun 		goto failed;
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 	/*
356*4882a593Smuzhiyun 	 * wait for MMP interval and check mmp_seq.
357*4882a593Smuzhiyun 	 */
358*4882a593Smuzhiyun 	if (schedule_timeout_interruptible(HZ * wait_time) != 0) {
359*4882a593Smuzhiyun 		ext4_warning(sb, "MMP startup interrupted, failing mount");
360*4882a593Smuzhiyun 		goto failed;
361*4882a593Smuzhiyun 	}
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 	retval = read_mmp_block(sb, &bh, mmp_block);
364*4882a593Smuzhiyun 	if (retval)
365*4882a593Smuzhiyun 		goto failed;
366*4882a593Smuzhiyun 	mmp = (struct mmp_struct *)(bh->b_data);
367*4882a593Smuzhiyun 	if (seq != le32_to_cpu(mmp->mmp_seq)) {
368*4882a593Smuzhiyun 		dump_mmp_msg(sb, mmp,
369*4882a593Smuzhiyun 			     "Device is already active on another node.");
370*4882a593Smuzhiyun 		goto failed;
371*4882a593Smuzhiyun 	}
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun 	EXT4_SB(sb)->s_mmp_bh = bh;
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun 	/*
376*4882a593Smuzhiyun 	 * Start a kernel thread to update the MMP block periodically.
377*4882a593Smuzhiyun 	 */
378*4882a593Smuzhiyun 	EXT4_SB(sb)->s_mmp_tsk = kthread_run(kmmpd, sb, "kmmpd-%.*s",
379*4882a593Smuzhiyun 					     (int)sizeof(mmp->mmp_bdevname),
380*4882a593Smuzhiyun 					     bdevname(bh->b_bdev,
381*4882a593Smuzhiyun 						      mmp->mmp_bdevname));
382*4882a593Smuzhiyun 	if (IS_ERR(EXT4_SB(sb)->s_mmp_tsk)) {
383*4882a593Smuzhiyun 		EXT4_SB(sb)->s_mmp_tsk = NULL;
384*4882a593Smuzhiyun 		ext4_warning(sb, "Unable to create kmmpd thread for %s.",
385*4882a593Smuzhiyun 			     sb->s_id);
386*4882a593Smuzhiyun 		goto failed;
387*4882a593Smuzhiyun 	}
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 	return 0;
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun failed:
392*4882a593Smuzhiyun 	brelse(bh);
393*4882a593Smuzhiyun 	return 1;
394*4882a593Smuzhiyun }
395