xref: /OK3568_Linux_fs/kernel/fs/ceph/mdsmap.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun #include <linux/ceph/ceph_debug.h>
3*4882a593Smuzhiyun 
4*4882a593Smuzhiyun #include <linux/bug.h>
5*4882a593Smuzhiyun #include <linux/err.h>
6*4882a593Smuzhiyun #include <linux/random.h>
7*4882a593Smuzhiyun #include <linux/slab.h>
8*4882a593Smuzhiyun #include <linux/types.h>
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <linux/ceph/mdsmap.h>
11*4882a593Smuzhiyun #include <linux/ceph/messenger.h>
12*4882a593Smuzhiyun #include <linux/ceph/decode.h>
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #include "super.h"
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #define CEPH_MDS_IS_READY(i, ignore_laggy) \
17*4882a593Smuzhiyun 	(m->m_info[i].state > 0 && ignore_laggy ? true : !m->m_info[i].laggy)
18*4882a593Smuzhiyun 
__mdsmap_get_random_mds(struct ceph_mdsmap * m,bool ignore_laggy)19*4882a593Smuzhiyun static int __mdsmap_get_random_mds(struct ceph_mdsmap *m, bool ignore_laggy)
20*4882a593Smuzhiyun {
21*4882a593Smuzhiyun 	int n = 0;
22*4882a593Smuzhiyun 	int i, j;
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun 	/* count */
25*4882a593Smuzhiyun 	for (i = 0; i < m->possible_max_rank; i++)
26*4882a593Smuzhiyun 		if (CEPH_MDS_IS_READY(i, ignore_laggy))
27*4882a593Smuzhiyun 			n++;
28*4882a593Smuzhiyun 	if (n == 0)
29*4882a593Smuzhiyun 		return -1;
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun 	/* pick */
32*4882a593Smuzhiyun 	n = prandom_u32() % n;
33*4882a593Smuzhiyun 	for (j = 0, i = 0; i < m->possible_max_rank; i++) {
34*4882a593Smuzhiyun 		if (CEPH_MDS_IS_READY(i, ignore_laggy))
35*4882a593Smuzhiyun 			j++;
36*4882a593Smuzhiyun 		if (j > n)
37*4882a593Smuzhiyun 			break;
38*4882a593Smuzhiyun 	}
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun 	return i;
41*4882a593Smuzhiyun }
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun /*
44*4882a593Smuzhiyun  * choose a random mds that is "up" (i.e. has a state > 0), or -1.
45*4882a593Smuzhiyun  */
ceph_mdsmap_get_random_mds(struct ceph_mdsmap * m)46*4882a593Smuzhiyun int ceph_mdsmap_get_random_mds(struct ceph_mdsmap *m)
47*4882a593Smuzhiyun {
48*4882a593Smuzhiyun 	int mds;
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun 	mds = __mdsmap_get_random_mds(m, false);
51*4882a593Smuzhiyun 	if (mds == m->possible_max_rank || mds == -1)
52*4882a593Smuzhiyun 		mds = __mdsmap_get_random_mds(m, true);
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun 	return mds == m->possible_max_rank ? -1 : mds;
55*4882a593Smuzhiyun }
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun #define __decode_and_drop_type(p, end, type, bad)		\
58*4882a593Smuzhiyun 	do {							\
59*4882a593Smuzhiyun 		if (*p + sizeof(type) > end)			\
60*4882a593Smuzhiyun 			goto bad;				\
61*4882a593Smuzhiyun 		*p += sizeof(type);				\
62*4882a593Smuzhiyun 	} while (0)
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun #define __decode_and_drop_set(p, end, type, bad)		\
65*4882a593Smuzhiyun 	do {							\
66*4882a593Smuzhiyun 		u32 n;						\
67*4882a593Smuzhiyun 		size_t need;					\
68*4882a593Smuzhiyun 		ceph_decode_32_safe(p, end, n, bad);		\
69*4882a593Smuzhiyun 		need = sizeof(type) * n;			\
70*4882a593Smuzhiyun 		ceph_decode_need(p, end, need, bad);		\
71*4882a593Smuzhiyun 		*p += need;					\
72*4882a593Smuzhiyun 	} while (0)
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun #define __decode_and_drop_map(p, end, ktype, vtype, bad)	\
75*4882a593Smuzhiyun 	do {							\
76*4882a593Smuzhiyun 		u32 n;						\
77*4882a593Smuzhiyun 		size_t need;					\
78*4882a593Smuzhiyun 		ceph_decode_32_safe(p, end, n, bad);		\
79*4882a593Smuzhiyun 		need = (sizeof(ktype) + sizeof(vtype)) * n;	\
80*4882a593Smuzhiyun 		ceph_decode_need(p, end, need, bad);		\
81*4882a593Smuzhiyun 		*p += need;					\
82*4882a593Smuzhiyun 	} while (0)
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 
__decode_and_drop_compat_set(void ** p,void * end)85*4882a593Smuzhiyun static int __decode_and_drop_compat_set(void **p, void* end)
86*4882a593Smuzhiyun {
87*4882a593Smuzhiyun 	int i;
88*4882a593Smuzhiyun 	/* compat, ro_compat, incompat*/
89*4882a593Smuzhiyun 	for (i = 0; i < 3; i++) {
90*4882a593Smuzhiyun 		u32 n;
91*4882a593Smuzhiyun 		ceph_decode_need(p, end, sizeof(u64) + sizeof(u32), bad);
92*4882a593Smuzhiyun 		/* mask */
93*4882a593Smuzhiyun 		*p += sizeof(u64);
94*4882a593Smuzhiyun 		/* names (map<u64, string>) */
95*4882a593Smuzhiyun 		n = ceph_decode_32(p);
96*4882a593Smuzhiyun 		while (n-- > 0) {
97*4882a593Smuzhiyun 			u32 len;
98*4882a593Smuzhiyun 			ceph_decode_need(p, end, sizeof(u64) + sizeof(u32),
99*4882a593Smuzhiyun 					 bad);
100*4882a593Smuzhiyun 			*p += sizeof(u64);
101*4882a593Smuzhiyun 			len = ceph_decode_32(p);
102*4882a593Smuzhiyun 			ceph_decode_need(p, end, len, bad);
103*4882a593Smuzhiyun 			*p += len;
104*4882a593Smuzhiyun 		}
105*4882a593Smuzhiyun 	}
106*4882a593Smuzhiyun 	return 0;
107*4882a593Smuzhiyun bad:
108*4882a593Smuzhiyun 	return -1;
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun /*
112*4882a593Smuzhiyun  * Decode an MDS map
113*4882a593Smuzhiyun  *
114*4882a593Smuzhiyun  * Ignore any fields we don't care about (there are quite a few of
115*4882a593Smuzhiyun  * them).
116*4882a593Smuzhiyun  */
ceph_mdsmap_decode(void ** p,void * end)117*4882a593Smuzhiyun struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
118*4882a593Smuzhiyun {
119*4882a593Smuzhiyun 	struct ceph_mdsmap *m;
120*4882a593Smuzhiyun 	const void *start = *p;
121*4882a593Smuzhiyun 	int i, j, n;
122*4882a593Smuzhiyun 	int err;
123*4882a593Smuzhiyun 	u8 mdsmap_v;
124*4882a593Smuzhiyun 	u16 mdsmap_ev;
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	m = kzalloc(sizeof(*m), GFP_NOFS);
127*4882a593Smuzhiyun 	if (!m)
128*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	ceph_decode_need(p, end, 1 + 1, bad);
131*4882a593Smuzhiyun 	mdsmap_v = ceph_decode_8(p);
132*4882a593Smuzhiyun 	*p += sizeof(u8);			/* mdsmap_cv */
133*4882a593Smuzhiyun 	if (mdsmap_v >= 4) {
134*4882a593Smuzhiyun 	       u32 mdsmap_len;
135*4882a593Smuzhiyun 	       ceph_decode_32_safe(p, end, mdsmap_len, bad);
136*4882a593Smuzhiyun 	       if (end < *p + mdsmap_len)
137*4882a593Smuzhiyun 		       goto bad;
138*4882a593Smuzhiyun 	       end = *p + mdsmap_len;
139*4882a593Smuzhiyun 	}
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 	ceph_decode_need(p, end, 8*sizeof(u32) + sizeof(u64), bad);
142*4882a593Smuzhiyun 	m->m_epoch = ceph_decode_32(p);
143*4882a593Smuzhiyun 	m->m_client_epoch = ceph_decode_32(p);
144*4882a593Smuzhiyun 	m->m_last_failure = ceph_decode_32(p);
145*4882a593Smuzhiyun 	m->m_root = ceph_decode_32(p);
146*4882a593Smuzhiyun 	m->m_session_timeout = ceph_decode_32(p);
147*4882a593Smuzhiyun 	m->m_session_autoclose = ceph_decode_32(p);
148*4882a593Smuzhiyun 	m->m_max_file_size = ceph_decode_64(p);
149*4882a593Smuzhiyun 	m->m_max_mds = ceph_decode_32(p);
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	/*
152*4882a593Smuzhiyun 	 * pick out the active nodes as the m_num_active_mds, the
153*4882a593Smuzhiyun 	 * m_num_active_mds maybe larger than m_max_mds when decreasing
154*4882a593Smuzhiyun 	 * the max_mds in cluster side, in other case it should less
155*4882a593Smuzhiyun 	 * than or equal to m_max_mds.
156*4882a593Smuzhiyun 	 */
157*4882a593Smuzhiyun 	m->m_num_active_mds = n = ceph_decode_32(p);
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	/*
160*4882a593Smuzhiyun 	 * the possible max rank, it maybe larger than the m_num_active_mds,
161*4882a593Smuzhiyun 	 * for example if the mds_max == 2 in the cluster, when the MDS(0)
162*4882a593Smuzhiyun 	 * was laggy and being replaced by a new MDS, we will temporarily
163*4882a593Smuzhiyun 	 * receive a new mds map with n_num_mds == 1 and the active MDS(1),
164*4882a593Smuzhiyun 	 * and the mds rank >= m_num_active_mds.
165*4882a593Smuzhiyun 	 */
166*4882a593Smuzhiyun 	m->possible_max_rank = max(m->m_num_active_mds, m->m_max_mds);
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	m->m_info = kcalloc(m->possible_max_rank, sizeof(*m->m_info), GFP_NOFS);
169*4882a593Smuzhiyun 	if (!m->m_info)
170*4882a593Smuzhiyun 		goto nomem;
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	/* pick out active nodes from mds_info (state > 0) */
173*4882a593Smuzhiyun 	for (i = 0; i < n; i++) {
174*4882a593Smuzhiyun 		u64 global_id;
175*4882a593Smuzhiyun 		u32 namelen;
176*4882a593Smuzhiyun 		s32 mds, inc, state;
177*4882a593Smuzhiyun 		u8 info_v;
178*4882a593Smuzhiyun 		void *info_end = NULL;
179*4882a593Smuzhiyun 		struct ceph_entity_addr addr;
180*4882a593Smuzhiyun 		u32 num_export_targets;
181*4882a593Smuzhiyun 		void *pexport_targets = NULL;
182*4882a593Smuzhiyun 		struct ceph_timespec laggy_since;
183*4882a593Smuzhiyun 		struct ceph_mds_info *info;
184*4882a593Smuzhiyun 		bool laggy;
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 		ceph_decode_need(p, end, sizeof(u64) + 1, bad);
187*4882a593Smuzhiyun 		global_id = ceph_decode_64(p);
188*4882a593Smuzhiyun 		info_v= ceph_decode_8(p);
189*4882a593Smuzhiyun 		if (info_v >= 4) {
190*4882a593Smuzhiyun 			u32 info_len;
191*4882a593Smuzhiyun 			ceph_decode_need(p, end, 1 + sizeof(u32), bad);
192*4882a593Smuzhiyun 			*p += sizeof(u8);	/* info_cv */
193*4882a593Smuzhiyun 			info_len = ceph_decode_32(p);
194*4882a593Smuzhiyun 			info_end = *p + info_len;
195*4882a593Smuzhiyun 			if (info_end > end)
196*4882a593Smuzhiyun 				goto bad;
197*4882a593Smuzhiyun 		}
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 		ceph_decode_need(p, end, sizeof(u64) + sizeof(u32), bad);
200*4882a593Smuzhiyun 		*p += sizeof(u64);
201*4882a593Smuzhiyun 		namelen = ceph_decode_32(p);  /* skip mds name */
202*4882a593Smuzhiyun 		*p += namelen;
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 		ceph_decode_need(p, end,
205*4882a593Smuzhiyun 				 4*sizeof(u32) + sizeof(u64) +
206*4882a593Smuzhiyun 				 sizeof(addr) + sizeof(struct ceph_timespec),
207*4882a593Smuzhiyun 				 bad);
208*4882a593Smuzhiyun 		mds = ceph_decode_32(p);
209*4882a593Smuzhiyun 		inc = ceph_decode_32(p);
210*4882a593Smuzhiyun 		state = ceph_decode_32(p);
211*4882a593Smuzhiyun 		*p += sizeof(u64);		/* state_seq */
212*4882a593Smuzhiyun 		err = ceph_decode_entity_addr(p, end, &addr);
213*4882a593Smuzhiyun 		if (err)
214*4882a593Smuzhiyun 			goto corrupt;
215*4882a593Smuzhiyun 		ceph_decode_copy(p, &laggy_since, sizeof(laggy_since));
216*4882a593Smuzhiyun 		laggy = laggy_since.tv_sec != 0 || laggy_since.tv_nsec != 0;
217*4882a593Smuzhiyun 		*p += sizeof(u32);
218*4882a593Smuzhiyun 		ceph_decode_32_safe(p, end, namelen, bad);
219*4882a593Smuzhiyun 		*p += namelen;
220*4882a593Smuzhiyun 		if (info_v >= 2) {
221*4882a593Smuzhiyun 			ceph_decode_32_safe(p, end, num_export_targets, bad);
222*4882a593Smuzhiyun 			pexport_targets = *p;
223*4882a593Smuzhiyun 			*p += num_export_targets * sizeof(u32);
224*4882a593Smuzhiyun 		} else {
225*4882a593Smuzhiyun 			num_export_targets = 0;
226*4882a593Smuzhiyun 		}
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 		if (info_end && *p != info_end) {
229*4882a593Smuzhiyun 			if (*p > info_end)
230*4882a593Smuzhiyun 				goto bad;
231*4882a593Smuzhiyun 			*p = info_end;
232*4882a593Smuzhiyun 		}
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 		dout("mdsmap_decode %d/%d %lld mds%d.%d %s %s%s\n",
235*4882a593Smuzhiyun 		     i+1, n, global_id, mds, inc,
236*4882a593Smuzhiyun 		     ceph_pr_addr(&addr),
237*4882a593Smuzhiyun 		     ceph_mds_state_name(state),
238*4882a593Smuzhiyun 		     laggy ? "(laggy)" : "");
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 		if (mds < 0 || mds >= m->possible_max_rank) {
241*4882a593Smuzhiyun 			pr_warn("mdsmap_decode got incorrect mds(%d)\n", mds);
242*4882a593Smuzhiyun 			continue;
243*4882a593Smuzhiyun 		}
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 		if (state <= 0) {
246*4882a593Smuzhiyun 			dout("mdsmap_decode got incorrect state(%s)\n",
247*4882a593Smuzhiyun 			     ceph_mds_state_name(state));
248*4882a593Smuzhiyun 			continue;
249*4882a593Smuzhiyun 		}
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 		info = &m->m_info[mds];
252*4882a593Smuzhiyun 		info->global_id = global_id;
253*4882a593Smuzhiyun 		info->state = state;
254*4882a593Smuzhiyun 		info->addr = addr;
255*4882a593Smuzhiyun 		info->laggy = laggy;
256*4882a593Smuzhiyun 		info->num_export_targets = num_export_targets;
257*4882a593Smuzhiyun 		if (num_export_targets) {
258*4882a593Smuzhiyun 			info->export_targets = kcalloc(num_export_targets,
259*4882a593Smuzhiyun 						       sizeof(u32), GFP_NOFS);
260*4882a593Smuzhiyun 			if (!info->export_targets)
261*4882a593Smuzhiyun 				goto nomem;
262*4882a593Smuzhiyun 			for (j = 0; j < num_export_targets; j++)
263*4882a593Smuzhiyun 				info->export_targets[j] =
264*4882a593Smuzhiyun 				       ceph_decode_32(&pexport_targets);
265*4882a593Smuzhiyun 		} else {
266*4882a593Smuzhiyun 			info->export_targets = NULL;
267*4882a593Smuzhiyun 		}
268*4882a593Smuzhiyun 	}
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun 	/* pg_pools */
271*4882a593Smuzhiyun 	ceph_decode_32_safe(p, end, n, bad);
272*4882a593Smuzhiyun 	m->m_num_data_pg_pools = n;
273*4882a593Smuzhiyun 	m->m_data_pg_pools = kcalloc(n, sizeof(u64), GFP_NOFS);
274*4882a593Smuzhiyun 	if (!m->m_data_pg_pools)
275*4882a593Smuzhiyun 		goto nomem;
276*4882a593Smuzhiyun 	ceph_decode_need(p, end, sizeof(u64)*(n+1), bad);
277*4882a593Smuzhiyun 	for (i = 0; i < n; i++)
278*4882a593Smuzhiyun 		m->m_data_pg_pools[i] = ceph_decode_64(p);
279*4882a593Smuzhiyun 	m->m_cas_pg_pool = ceph_decode_64(p);
280*4882a593Smuzhiyun 	m->m_enabled = m->m_epoch > 1;
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 	mdsmap_ev = 1;
283*4882a593Smuzhiyun 	if (mdsmap_v >= 2) {
284*4882a593Smuzhiyun 		ceph_decode_16_safe(p, end, mdsmap_ev, bad_ext);
285*4882a593Smuzhiyun 	}
286*4882a593Smuzhiyun 	if (mdsmap_ev >= 3) {
287*4882a593Smuzhiyun 		if (__decode_and_drop_compat_set(p, end) < 0)
288*4882a593Smuzhiyun 			goto bad_ext;
289*4882a593Smuzhiyun 	}
290*4882a593Smuzhiyun 	/* metadata_pool */
291*4882a593Smuzhiyun 	if (mdsmap_ev < 5) {
292*4882a593Smuzhiyun 		__decode_and_drop_type(p, end, u32, bad_ext);
293*4882a593Smuzhiyun 	} else {
294*4882a593Smuzhiyun 		__decode_and_drop_type(p, end, u64, bad_ext);
295*4882a593Smuzhiyun 	}
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	/* created + modified + tableserver */
298*4882a593Smuzhiyun 	__decode_and_drop_type(p, end, struct ceph_timespec, bad_ext);
299*4882a593Smuzhiyun 	__decode_and_drop_type(p, end, struct ceph_timespec, bad_ext);
300*4882a593Smuzhiyun 	__decode_and_drop_type(p, end, u32, bad_ext);
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 	/* in */
303*4882a593Smuzhiyun 	{
304*4882a593Smuzhiyun 		int num_laggy = 0;
305*4882a593Smuzhiyun 		ceph_decode_32_safe(p, end, n, bad_ext);
306*4882a593Smuzhiyun 		ceph_decode_need(p, end, sizeof(u32) * n, bad_ext);
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 		for (i = 0; i < n; i++) {
309*4882a593Smuzhiyun 			s32 mds = ceph_decode_32(p);
310*4882a593Smuzhiyun 			if (mds >= 0 && mds < m->possible_max_rank) {
311*4882a593Smuzhiyun 				if (m->m_info[mds].laggy)
312*4882a593Smuzhiyun 					num_laggy++;
313*4882a593Smuzhiyun 			}
314*4882a593Smuzhiyun 		}
315*4882a593Smuzhiyun 		m->m_num_laggy = num_laggy;
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 		if (n > m->possible_max_rank) {
318*4882a593Smuzhiyun 			void *new_m_info = krealloc(m->m_info,
319*4882a593Smuzhiyun 						    n * sizeof(*m->m_info),
320*4882a593Smuzhiyun 						    GFP_NOFS | __GFP_ZERO);
321*4882a593Smuzhiyun 			if (!new_m_info)
322*4882a593Smuzhiyun 				goto nomem;
323*4882a593Smuzhiyun 			m->m_info = new_m_info;
324*4882a593Smuzhiyun 		}
325*4882a593Smuzhiyun 		m->possible_max_rank = n;
326*4882a593Smuzhiyun 	}
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	/* inc */
329*4882a593Smuzhiyun 	__decode_and_drop_map(p, end, u32, u32, bad_ext);
330*4882a593Smuzhiyun 	/* up */
331*4882a593Smuzhiyun 	__decode_and_drop_map(p, end, u32, u64, bad_ext);
332*4882a593Smuzhiyun 	/* failed */
333*4882a593Smuzhiyun 	__decode_and_drop_set(p, end, u32, bad_ext);
334*4882a593Smuzhiyun 	/* stopped */
335*4882a593Smuzhiyun 	__decode_and_drop_set(p, end, u32, bad_ext);
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 	if (mdsmap_ev >= 4) {
338*4882a593Smuzhiyun 		/* last_failure_osd_epoch */
339*4882a593Smuzhiyun 		__decode_and_drop_type(p, end, u32, bad_ext);
340*4882a593Smuzhiyun 	}
341*4882a593Smuzhiyun 	if (mdsmap_ev >= 6) {
342*4882a593Smuzhiyun 		/* ever_allowed_snaps */
343*4882a593Smuzhiyun 		__decode_and_drop_type(p, end, u8, bad_ext);
344*4882a593Smuzhiyun 		/* explicitly_allowed_snaps */
345*4882a593Smuzhiyun 		__decode_and_drop_type(p, end, u8, bad_ext);
346*4882a593Smuzhiyun 	}
347*4882a593Smuzhiyun 	if (mdsmap_ev >= 7) {
348*4882a593Smuzhiyun 		/* inline_data_enabled */
349*4882a593Smuzhiyun 		__decode_and_drop_type(p, end, u8, bad_ext);
350*4882a593Smuzhiyun 	}
351*4882a593Smuzhiyun 	if (mdsmap_ev >= 8) {
352*4882a593Smuzhiyun 		u32 name_len;
353*4882a593Smuzhiyun 		/* enabled */
354*4882a593Smuzhiyun 		ceph_decode_8_safe(p, end, m->m_enabled, bad_ext);
355*4882a593Smuzhiyun 		ceph_decode_32_safe(p, end, name_len, bad_ext);
356*4882a593Smuzhiyun 		ceph_decode_need(p, end, name_len, bad_ext);
357*4882a593Smuzhiyun 		*p += name_len;
358*4882a593Smuzhiyun 	}
359*4882a593Smuzhiyun 	/* damaged */
360*4882a593Smuzhiyun 	if (mdsmap_ev >= 9) {
361*4882a593Smuzhiyun 		size_t need;
362*4882a593Smuzhiyun 		ceph_decode_32_safe(p, end, n, bad_ext);
363*4882a593Smuzhiyun 		need = sizeof(u32) * n;
364*4882a593Smuzhiyun 		ceph_decode_need(p, end, need, bad_ext);
365*4882a593Smuzhiyun 		*p += need;
366*4882a593Smuzhiyun 		m->m_damaged = n > 0;
367*4882a593Smuzhiyun 	} else {
368*4882a593Smuzhiyun 		m->m_damaged = false;
369*4882a593Smuzhiyun 	}
370*4882a593Smuzhiyun bad_ext:
371*4882a593Smuzhiyun 	dout("mdsmap_decode m_enabled: %d, m_damaged: %d, m_num_laggy: %d\n",
372*4882a593Smuzhiyun 	     !!m->m_enabled, !!m->m_damaged, m->m_num_laggy);
373*4882a593Smuzhiyun 	*p = end;
374*4882a593Smuzhiyun 	dout("mdsmap_decode success epoch %u\n", m->m_epoch);
375*4882a593Smuzhiyun 	return m;
376*4882a593Smuzhiyun nomem:
377*4882a593Smuzhiyun 	err = -ENOMEM;
378*4882a593Smuzhiyun 	goto out_err;
379*4882a593Smuzhiyun corrupt:
380*4882a593Smuzhiyun 	pr_err("corrupt mdsmap\n");
381*4882a593Smuzhiyun 	print_hex_dump(KERN_DEBUG, "mdsmap: ",
382*4882a593Smuzhiyun 		       DUMP_PREFIX_OFFSET, 16, 1,
383*4882a593Smuzhiyun 		       start, end - start, true);
384*4882a593Smuzhiyun out_err:
385*4882a593Smuzhiyun 	ceph_mdsmap_destroy(m);
386*4882a593Smuzhiyun 	return ERR_PTR(err);
387*4882a593Smuzhiyun bad:
388*4882a593Smuzhiyun 	err = -EINVAL;
389*4882a593Smuzhiyun 	goto corrupt;
390*4882a593Smuzhiyun }
391*4882a593Smuzhiyun 
ceph_mdsmap_destroy(struct ceph_mdsmap * m)392*4882a593Smuzhiyun void ceph_mdsmap_destroy(struct ceph_mdsmap *m)
393*4882a593Smuzhiyun {
394*4882a593Smuzhiyun 	int i;
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 	if (m->m_info) {
397*4882a593Smuzhiyun 		for (i = 0; i < m->possible_max_rank; i++)
398*4882a593Smuzhiyun 			kfree(m->m_info[i].export_targets);
399*4882a593Smuzhiyun 		kfree(m->m_info);
400*4882a593Smuzhiyun 	}
401*4882a593Smuzhiyun 	kfree(m->m_data_pg_pools);
402*4882a593Smuzhiyun 	kfree(m);
403*4882a593Smuzhiyun }
404*4882a593Smuzhiyun 
ceph_mdsmap_is_cluster_available(struct ceph_mdsmap * m)405*4882a593Smuzhiyun bool ceph_mdsmap_is_cluster_available(struct ceph_mdsmap *m)
406*4882a593Smuzhiyun {
407*4882a593Smuzhiyun 	int i, nr_active = 0;
408*4882a593Smuzhiyun 	if (!m->m_enabled)
409*4882a593Smuzhiyun 		return false;
410*4882a593Smuzhiyun 	if (m->m_damaged)
411*4882a593Smuzhiyun 		return false;
412*4882a593Smuzhiyun 	if (m->m_num_laggy == m->m_num_active_mds)
413*4882a593Smuzhiyun 		return false;
414*4882a593Smuzhiyun 	for (i = 0; i < m->possible_max_rank; i++) {
415*4882a593Smuzhiyun 		if (m->m_info[i].state == CEPH_MDS_STATE_ACTIVE)
416*4882a593Smuzhiyun 			nr_active++;
417*4882a593Smuzhiyun 	}
418*4882a593Smuzhiyun 	return nr_active > 0;
419*4882a593Smuzhiyun }
420