xref: /OK3568_Linux_fs/u-boot/drivers/mtd/mtdpart.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Simple MTD partitioning layer
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Copyright © 2000 Nicolas Pitre <nico@fluxnic.net>
5*4882a593Smuzhiyun  * Copyright © 2002 Thomas Gleixner <gleixner@linutronix.de>
6*4882a593Smuzhiyun  * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org>
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * SPDX-License-Identifier:	GPL-2.0+
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  */
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #ifndef __UBOOT__
13*4882a593Smuzhiyun #include <linux/module.h>
14*4882a593Smuzhiyun #include <linux/types.h>
15*4882a593Smuzhiyun #include <linux/kernel.h>
16*4882a593Smuzhiyun #include <linux/slab.h>
17*4882a593Smuzhiyun #include <linux/list.h>
18*4882a593Smuzhiyun #include <linux/kmod.h>
19*4882a593Smuzhiyun #endif
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun #include <common.h>
22*4882a593Smuzhiyun #include <malloc.h>
23*4882a593Smuzhiyun #include <linux/errno.h>
24*4882a593Smuzhiyun #include <linux/compat.h>
25*4882a593Smuzhiyun #include <ubi_uboot.h>
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun #include <linux/mtd/mtd.h>
28*4882a593Smuzhiyun #include <linux/mtd/partitions.h>
29*4882a593Smuzhiyun #include <linux/err.h>
30*4882a593Smuzhiyun #include <linux/sizes.h>
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun #include "mtdcore.h"
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun #ifndef __UBOOT__
35*4882a593Smuzhiyun static DEFINE_MUTEX(mtd_partitions_mutex);
36*4882a593Smuzhiyun #else
37*4882a593Smuzhiyun DEFINE_MUTEX(mtd_partitions_mutex);
38*4882a593Smuzhiyun #endif
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun #ifdef __UBOOT__
41*4882a593Smuzhiyun /* from mm/util.c */
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun /**
44*4882a593Smuzhiyun  * kstrdup - allocate space for and copy an existing string
45*4882a593Smuzhiyun  * @s: the string to duplicate
46*4882a593Smuzhiyun  * @gfp: the GFP mask used in the kmalloc() call when allocating memory
47*4882a593Smuzhiyun  */
kstrdup(const char * s,gfp_t gfp)48*4882a593Smuzhiyun char *kstrdup(const char *s, gfp_t gfp)
49*4882a593Smuzhiyun {
50*4882a593Smuzhiyun 	size_t len;
51*4882a593Smuzhiyun 	char *buf;
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun 	if (!s)
54*4882a593Smuzhiyun 		return NULL;
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun 	len = strlen(s) + 1;
57*4882a593Smuzhiyun 	buf = kmalloc(len, gfp);
58*4882a593Smuzhiyun 	if (buf)
59*4882a593Smuzhiyun 		memcpy(buf, s, len);
60*4882a593Smuzhiyun 	return buf;
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun #endif
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun #define MTD_SIZE_REMAINING		(~0LLU)
65*4882a593Smuzhiyun #define MTD_OFFSET_NOT_SPECIFIED	(~0LLU)
66*4882a593Smuzhiyun 
mtd_partitions_used(struct mtd_info * master)67*4882a593Smuzhiyun bool mtd_partitions_used(struct mtd_info *master)
68*4882a593Smuzhiyun {
69*4882a593Smuzhiyun 	struct mtd_info *slave;
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun 	list_for_each_entry(slave, &master->partitions, node) {
72*4882a593Smuzhiyun 		if (slave->usecount)
73*4882a593Smuzhiyun 			return true;
74*4882a593Smuzhiyun 	}
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 	return false;
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun /**
80*4882a593Smuzhiyun  * mtd_parse_partition - Parse @mtdparts partition definition, fill @partition
81*4882a593Smuzhiyun  *                       with it and update the @mtdparts string pointer.
82*4882a593Smuzhiyun  *
83*4882a593Smuzhiyun  * The partition name is allocated and must be freed by the caller.
84*4882a593Smuzhiyun  *
85*4882a593Smuzhiyun  * This function is widely inspired from part_parse (mtdparts.c).
86*4882a593Smuzhiyun  *
87*4882a593Smuzhiyun  * @mtdparts: String describing the partition with mtdparts command syntax
88*4882a593Smuzhiyun  * @partition: MTD partition structure to fill
89*4882a593Smuzhiyun  *
90*4882a593Smuzhiyun  * @return 0 on success, an error otherwise.
91*4882a593Smuzhiyun  */
mtd_parse_partition(const char ** _mtdparts,struct mtd_partition * partition)92*4882a593Smuzhiyun static int mtd_parse_partition(const char **_mtdparts,
93*4882a593Smuzhiyun 			       struct mtd_partition *partition)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun 	const char *mtdparts = *_mtdparts;
96*4882a593Smuzhiyun 	const char *name = NULL;
97*4882a593Smuzhiyun 	int name_len;
98*4882a593Smuzhiyun 	char *buf;
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	/* Ensure the partition structure is empty */
101*4882a593Smuzhiyun 	memset(partition, 0, sizeof(struct mtd_partition));
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 	/* Fetch the partition size */
104*4882a593Smuzhiyun 	if (*mtdparts == '-') {
105*4882a593Smuzhiyun 		/* Assign all remaining space to this partition */
106*4882a593Smuzhiyun 		partition->size = MTD_SIZE_REMAINING;
107*4882a593Smuzhiyun 		mtdparts++;
108*4882a593Smuzhiyun 	} else {
109*4882a593Smuzhiyun 		partition->size = ustrtoull(mtdparts, (char **)&mtdparts, 0);
110*4882a593Smuzhiyun 		if (partition->size < SZ_4K) {
111*4882a593Smuzhiyun 			printf("Minimum partition size 4kiB, %lldB requested\n",
112*4882a593Smuzhiyun 			       partition->size);
113*4882a593Smuzhiyun 			return -EINVAL;
114*4882a593Smuzhiyun 		}
115*4882a593Smuzhiyun 	}
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 	/* Check for the offset */
118*4882a593Smuzhiyun 	partition->offset = MTD_OFFSET_NOT_SPECIFIED;
119*4882a593Smuzhiyun 	if (*mtdparts == '@') {
120*4882a593Smuzhiyun 		mtdparts++;
121*4882a593Smuzhiyun 		partition->offset = ustrtoull(mtdparts, (char **)&mtdparts, 0);
122*4882a593Smuzhiyun 	}
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	/* Now look for the name */
125*4882a593Smuzhiyun 	if (*mtdparts == '(') {
126*4882a593Smuzhiyun 		name = ++mtdparts;
127*4882a593Smuzhiyun 		mtdparts = strchr(name, ')');
128*4882a593Smuzhiyun 		if (!mtdparts) {
129*4882a593Smuzhiyun 			printf("No closing ')' found in partition name\n");
130*4882a593Smuzhiyun 			return -EINVAL;
131*4882a593Smuzhiyun 		}
132*4882a593Smuzhiyun 		name_len = mtdparts - name + 1;
133*4882a593Smuzhiyun 		if ((name_len - 1) == 0) {
134*4882a593Smuzhiyun 			printf("Empty partition name\n");
135*4882a593Smuzhiyun 			return -EINVAL;
136*4882a593Smuzhiyun 		}
137*4882a593Smuzhiyun 		mtdparts++;
138*4882a593Smuzhiyun 	} else {
139*4882a593Smuzhiyun 		/* Name will be of the form size@offset */
140*4882a593Smuzhiyun 		name_len = 22;
141*4882a593Smuzhiyun 	}
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	/* Check if the partition is read-only */
144*4882a593Smuzhiyun 	if (strncmp(mtdparts, "ro", 2) == 0) {
145*4882a593Smuzhiyun 		partition->mask_flags |= MTD_WRITEABLE;
146*4882a593Smuzhiyun 		mtdparts += 2;
147*4882a593Smuzhiyun 	}
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	/* Check for a potential next partition definition */
150*4882a593Smuzhiyun 	if (*mtdparts == ',') {
151*4882a593Smuzhiyun 		if (partition->size == MTD_SIZE_REMAINING) {
152*4882a593Smuzhiyun 			printf("No partitions allowed after a fill-up\n");
153*4882a593Smuzhiyun 			return -EINVAL;
154*4882a593Smuzhiyun 		}
155*4882a593Smuzhiyun 		++mtdparts;
156*4882a593Smuzhiyun 	} else if ((*mtdparts == ';') || (*mtdparts == '\0')) {
157*4882a593Smuzhiyun 		/* NOP */
158*4882a593Smuzhiyun 	} else {
159*4882a593Smuzhiyun 		printf("Unexpected character '%c' in mtdparts\n", *mtdparts);
160*4882a593Smuzhiyun 		return -EINVAL;
161*4882a593Smuzhiyun 	}
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 	/*
164*4882a593Smuzhiyun 	 * Allocate a buffer for the name and either copy the provided name or
165*4882a593Smuzhiyun 	 * auto-generate it with the form 'size@offset'.
166*4882a593Smuzhiyun 	 */
167*4882a593Smuzhiyun 	buf = malloc(name_len);
168*4882a593Smuzhiyun 	if (!buf)
169*4882a593Smuzhiyun 		return -ENOMEM;
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 	if (name)
172*4882a593Smuzhiyun 		strncpy(buf, name, name_len - 1);
173*4882a593Smuzhiyun 	else
174*4882a593Smuzhiyun 		snprintf(buf, name_len, "0x%08llx@0x%08llx",
175*4882a593Smuzhiyun 			 partition->size, partition->offset);
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	buf[name_len - 1] = '\0';
178*4882a593Smuzhiyun 	partition->name = buf;
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 	*_mtdparts = mtdparts;
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	return 0;
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun /**
186*4882a593Smuzhiyun  * mtd_parse_partitions - Create a partition array from an mtdparts definition
187*4882a593Smuzhiyun  *
188*4882a593Smuzhiyun  * Stateless function that takes a @parent MTD device, a string @_mtdparts
189*4882a593Smuzhiyun  * describing the partitions (with the "mtdparts" command syntax) and creates
190*4882a593Smuzhiyun  * the corresponding MTD partition structure array @_parts. Both the name and
191*4882a593Smuzhiyun  * the structure partition itself must be freed freed, the caller may use
192*4882a593Smuzhiyun  * @mtd_free_parsed_partitions() for this purpose.
193*4882a593Smuzhiyun  *
194*4882a593Smuzhiyun  * @parent: MTD device which contains the partitions
195*4882a593Smuzhiyun  * @_mtdparts: Pointer to a string describing the partitions with "mtdparts"
196*4882a593Smuzhiyun  *             command syntax.
197*4882a593Smuzhiyun  * @_parts: Allocated array containing the partitions, must be freed by the
198*4882a593Smuzhiyun  *          caller.
199*4882a593Smuzhiyun  * @_nparts: Size of @_parts array.
200*4882a593Smuzhiyun  *
201*4882a593Smuzhiyun  * @return 0 on success, an error otherwise.
202*4882a593Smuzhiyun  */
mtd_parse_partitions(struct mtd_info * parent,const char ** _mtdparts,struct mtd_partition ** _parts,int * _nparts)203*4882a593Smuzhiyun int mtd_parse_partitions(struct mtd_info *parent, const char **_mtdparts,
204*4882a593Smuzhiyun 			 struct mtd_partition **_parts, int *_nparts)
205*4882a593Smuzhiyun {
206*4882a593Smuzhiyun 	struct mtd_partition partition = {}, *parts;
207*4882a593Smuzhiyun 	const char *mtdparts = *_mtdparts;
208*4882a593Smuzhiyun 	int cur_off = 0, cur_sz = 0;
209*4882a593Smuzhiyun 	int nparts = 0;
210*4882a593Smuzhiyun 	int ret, idx;
211*4882a593Smuzhiyun 	u64 sz;
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 	/* First, iterate over the partitions until we know their number */
214*4882a593Smuzhiyun 	while (mtdparts[0] != '\0' && mtdparts[0] != ';') {
215*4882a593Smuzhiyun 		ret = mtd_parse_partition(&mtdparts, &partition);
216*4882a593Smuzhiyun 		if (ret)
217*4882a593Smuzhiyun 			return ret;
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 		free((char *)partition.name);
220*4882a593Smuzhiyun 		nparts++;
221*4882a593Smuzhiyun 	}
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 	/* Allocate an array of partitions to give back to the caller */
224*4882a593Smuzhiyun 	parts = malloc(sizeof(*parts) * nparts);
225*4882a593Smuzhiyun 	if (!parts) {
226*4882a593Smuzhiyun 		printf("Not enough space to save partitions meta-data\n");
227*4882a593Smuzhiyun 		return -ENOMEM;
228*4882a593Smuzhiyun 	}
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 	/* Iterate again over each partition to save the data in our array */
231*4882a593Smuzhiyun 	for (idx = 0; idx < nparts; idx++) {
232*4882a593Smuzhiyun 		ret = mtd_parse_partition(_mtdparts, &parts[idx]);
233*4882a593Smuzhiyun 		if (ret)
234*4882a593Smuzhiyun 			return ret;
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 		if (parts[idx].size == MTD_SIZE_REMAINING)
237*4882a593Smuzhiyun 			parts[idx].size = parent->size - cur_sz;
238*4882a593Smuzhiyun 		cur_sz += parts[idx].size;
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 		sz = parts[idx].size;
241*4882a593Smuzhiyun 		if (sz < parent->writesize || do_div(sz, parent->writesize)) {
242*4882a593Smuzhiyun 			printf("Partition size must be a multiple of %d\n",
243*4882a593Smuzhiyun 			       parent->writesize);
244*4882a593Smuzhiyun 			return -EINVAL;
245*4882a593Smuzhiyun 		}
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun 		if (parts[idx].offset == MTD_OFFSET_NOT_SPECIFIED)
248*4882a593Smuzhiyun 			parts[idx].offset = cur_off;
249*4882a593Smuzhiyun 		cur_off += parts[idx].size;
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 		parts[idx].ecclayout = parent->ecclayout;
252*4882a593Smuzhiyun 	}
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 	/* Offset by one mtdparts to point to the next device if any */
255*4882a593Smuzhiyun 	if (*_mtdparts[0] == ';')
256*4882a593Smuzhiyun 		(*_mtdparts)++;
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 	*_parts = parts;
259*4882a593Smuzhiyun 	*_nparts = nparts;
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	return 0;
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun /**
265*4882a593Smuzhiyun  * mtd_free_parsed_partitions - Free dynamically allocated partitions
266*4882a593Smuzhiyun  *
267*4882a593Smuzhiyun  * Each successful call to @mtd_parse_partitions must be followed by a call to
268*4882a593Smuzhiyun  * @mtd_free_parsed_partitions to free any allocated array during the parsing
269*4882a593Smuzhiyun  * process.
270*4882a593Smuzhiyun  *
271*4882a593Smuzhiyun  * @parts: Array containing the partitions that will be freed.
272*4882a593Smuzhiyun  * @nparts: Size of @parts array.
273*4882a593Smuzhiyun  */
mtd_free_parsed_partitions(struct mtd_partition * parts,unsigned int nparts)274*4882a593Smuzhiyun void mtd_free_parsed_partitions(struct mtd_partition *parts,
275*4882a593Smuzhiyun 				unsigned int nparts)
276*4882a593Smuzhiyun {
277*4882a593Smuzhiyun 	int i;
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 	for (i = 0; i < nparts; i++)
280*4882a593Smuzhiyun 		free((char *)parts[i].name);
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 	free(parts);
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun /*
286*4882a593Smuzhiyun  * MTD methods which simply translate the effective address and pass through
287*4882a593Smuzhiyun  * to the _real_ device.
288*4882a593Smuzhiyun  */
289*4882a593Smuzhiyun 
part_read(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)290*4882a593Smuzhiyun static int part_read(struct mtd_info *mtd, loff_t from, size_t len,
291*4882a593Smuzhiyun 		size_t *retlen, u_char *buf)
292*4882a593Smuzhiyun {
293*4882a593Smuzhiyun 	struct mtd_ecc_stats stats;
294*4882a593Smuzhiyun 	int res;
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	stats = mtd->parent->ecc_stats;
297*4882a593Smuzhiyun 	res = mtd->parent->_read(mtd->parent, from + mtd->offset, len,
298*4882a593Smuzhiyun 				 retlen, buf);
299*4882a593Smuzhiyun 	if (unlikely(mtd_is_eccerr(res)))
300*4882a593Smuzhiyun 		mtd->ecc_stats.failed +=
301*4882a593Smuzhiyun 			mtd->parent->ecc_stats.failed - stats.failed;
302*4882a593Smuzhiyun 	else
303*4882a593Smuzhiyun 		mtd->ecc_stats.corrected +=
304*4882a593Smuzhiyun 			mtd->parent->ecc_stats.corrected - stats.corrected;
305*4882a593Smuzhiyun 	return res;
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun #ifndef __UBOOT__
part_point(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,void ** virt,resource_size_t * phys)309*4882a593Smuzhiyun static int part_point(struct mtd_info *mtd, loff_t from, size_t len,
310*4882a593Smuzhiyun 		size_t *retlen, void **virt, resource_size_t *phys)
311*4882a593Smuzhiyun {
312*4882a593Smuzhiyun 	return mtd->parent->_point(mtd->parent, from + mtd->offset, len,
313*4882a593Smuzhiyun 				   retlen, virt, phys);
314*4882a593Smuzhiyun }
315*4882a593Smuzhiyun 
part_unpoint(struct mtd_info * mtd,loff_t from,size_t len)316*4882a593Smuzhiyun static int part_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
317*4882a593Smuzhiyun {
318*4882a593Smuzhiyun 	return mtd->parent->_unpoint(mtd->parent, from + mtd->offset, len);
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun #endif
321*4882a593Smuzhiyun 
part_get_unmapped_area(struct mtd_info * mtd,unsigned long len,unsigned long offset,unsigned long flags)322*4882a593Smuzhiyun static unsigned long part_get_unmapped_area(struct mtd_info *mtd,
323*4882a593Smuzhiyun 					    unsigned long len,
324*4882a593Smuzhiyun 					    unsigned long offset,
325*4882a593Smuzhiyun 					    unsigned long flags)
326*4882a593Smuzhiyun {
327*4882a593Smuzhiyun 	offset += mtd->offset;
328*4882a593Smuzhiyun 	return mtd->parent->_get_unmapped_area(mtd->parent, len, offset, flags);
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun 
part_read_oob(struct mtd_info * mtd,loff_t from,struct mtd_oob_ops * ops)331*4882a593Smuzhiyun static int part_read_oob(struct mtd_info *mtd, loff_t from,
332*4882a593Smuzhiyun 		struct mtd_oob_ops *ops)
333*4882a593Smuzhiyun {
334*4882a593Smuzhiyun 	int res;
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	if (from >= mtd->size)
337*4882a593Smuzhiyun 		return -EINVAL;
338*4882a593Smuzhiyun 	if (ops->datbuf && from + ops->len > mtd->size)
339*4882a593Smuzhiyun 		return -EINVAL;
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 	/*
342*4882a593Smuzhiyun 	 * If OOB is also requested, make sure that we do not read past the end
343*4882a593Smuzhiyun 	 * of this partition.
344*4882a593Smuzhiyun 	 */
345*4882a593Smuzhiyun 	if (ops->oobbuf) {
346*4882a593Smuzhiyun 		size_t len, pages;
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 		if (ops->mode == MTD_OPS_AUTO_OOB)
349*4882a593Smuzhiyun 			len = mtd->oobavail;
350*4882a593Smuzhiyun 		else
351*4882a593Smuzhiyun 			len = mtd->oobsize;
352*4882a593Smuzhiyun 		pages = mtd_div_by_ws(mtd->size, mtd);
353*4882a593Smuzhiyun 		pages -= mtd_div_by_ws(from, mtd);
354*4882a593Smuzhiyun 		if (ops->ooboffs + ops->ooblen > pages * len)
355*4882a593Smuzhiyun 			return -EINVAL;
356*4882a593Smuzhiyun 	}
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 	res = mtd->parent->_read_oob(mtd->parent, from + mtd->offset, ops);
359*4882a593Smuzhiyun 	if (unlikely(res)) {
360*4882a593Smuzhiyun 		if (mtd_is_bitflip(res))
361*4882a593Smuzhiyun 			mtd->ecc_stats.corrected++;
362*4882a593Smuzhiyun 		if (mtd_is_eccerr(res))
363*4882a593Smuzhiyun 			mtd->ecc_stats.failed++;
364*4882a593Smuzhiyun 	}
365*4882a593Smuzhiyun 	return res;
366*4882a593Smuzhiyun }
367*4882a593Smuzhiyun 
part_read_user_prot_reg(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)368*4882a593Smuzhiyun static int part_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
369*4882a593Smuzhiyun 		size_t len, size_t *retlen, u_char *buf)
370*4882a593Smuzhiyun {
371*4882a593Smuzhiyun 	return mtd->parent->_read_user_prot_reg(mtd->parent, from, len,
372*4882a593Smuzhiyun 						retlen, buf);
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun 
part_get_user_prot_info(struct mtd_info * mtd,size_t len,size_t * retlen,struct otp_info * buf)375*4882a593Smuzhiyun static int part_get_user_prot_info(struct mtd_info *mtd, size_t len,
376*4882a593Smuzhiyun 				   size_t *retlen, struct otp_info *buf)
377*4882a593Smuzhiyun {
378*4882a593Smuzhiyun 	return mtd->parent->_get_user_prot_info(mtd->parent, len, retlen,
379*4882a593Smuzhiyun 						buf);
380*4882a593Smuzhiyun }
381*4882a593Smuzhiyun 
part_read_fact_prot_reg(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)382*4882a593Smuzhiyun static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
383*4882a593Smuzhiyun 		size_t len, size_t *retlen, u_char *buf)
384*4882a593Smuzhiyun {
385*4882a593Smuzhiyun 	return mtd->parent->_read_fact_prot_reg(mtd->parent, from, len,
386*4882a593Smuzhiyun 						retlen, buf);
387*4882a593Smuzhiyun }
388*4882a593Smuzhiyun 
part_get_fact_prot_info(struct mtd_info * mtd,size_t len,size_t * retlen,struct otp_info * buf)389*4882a593Smuzhiyun static int part_get_fact_prot_info(struct mtd_info *mtd, size_t len,
390*4882a593Smuzhiyun 				   size_t *retlen, struct otp_info *buf)
391*4882a593Smuzhiyun {
392*4882a593Smuzhiyun 	return mtd->parent->_get_fact_prot_info(mtd->parent, len, retlen,
393*4882a593Smuzhiyun 						buf);
394*4882a593Smuzhiyun }
395*4882a593Smuzhiyun 
part_write(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const u_char * buf)396*4882a593Smuzhiyun static int part_write(struct mtd_info *mtd, loff_t to, size_t len,
397*4882a593Smuzhiyun 		size_t *retlen, const u_char *buf)
398*4882a593Smuzhiyun {
399*4882a593Smuzhiyun 	return mtd->parent->_write(mtd->parent, to + mtd->offset, len,
400*4882a593Smuzhiyun 				   retlen, buf);
401*4882a593Smuzhiyun }
402*4882a593Smuzhiyun 
part_panic_write(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const u_char * buf)403*4882a593Smuzhiyun static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
404*4882a593Smuzhiyun 		size_t *retlen, const u_char *buf)
405*4882a593Smuzhiyun {
406*4882a593Smuzhiyun 	return mtd->parent->_panic_write(mtd->parent, to + mtd->offset, len,
407*4882a593Smuzhiyun 					 retlen, buf);
408*4882a593Smuzhiyun }
409*4882a593Smuzhiyun 
part_write_oob(struct mtd_info * mtd,loff_t to,struct mtd_oob_ops * ops)410*4882a593Smuzhiyun static int part_write_oob(struct mtd_info *mtd, loff_t to,
411*4882a593Smuzhiyun 		struct mtd_oob_ops *ops)
412*4882a593Smuzhiyun {
413*4882a593Smuzhiyun 	if (to >= mtd->size)
414*4882a593Smuzhiyun 		return -EINVAL;
415*4882a593Smuzhiyun 	if (ops->datbuf && to + ops->len > mtd->size)
416*4882a593Smuzhiyun 		return -EINVAL;
417*4882a593Smuzhiyun 	return mtd->parent->_write_oob(mtd->parent, to + mtd->offset, ops);
418*4882a593Smuzhiyun }
419*4882a593Smuzhiyun 
part_write_user_prot_reg(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)420*4882a593Smuzhiyun static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
421*4882a593Smuzhiyun 		size_t len, size_t *retlen, u_char *buf)
422*4882a593Smuzhiyun {
423*4882a593Smuzhiyun 	return mtd->parent->_write_user_prot_reg(mtd->parent, from, len,
424*4882a593Smuzhiyun 						 retlen, buf);
425*4882a593Smuzhiyun }
426*4882a593Smuzhiyun 
part_lock_user_prot_reg(struct mtd_info * mtd,loff_t from,size_t len)427*4882a593Smuzhiyun static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
428*4882a593Smuzhiyun 		size_t len)
429*4882a593Smuzhiyun {
430*4882a593Smuzhiyun 	return mtd->parent->_lock_user_prot_reg(mtd->parent, from, len);
431*4882a593Smuzhiyun }
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun #ifndef __UBOOT__
part_writev(struct mtd_info * mtd,const struct kvec * vecs,unsigned long count,loff_t to,size_t * retlen)434*4882a593Smuzhiyun static int part_writev(struct mtd_info *mtd, const struct kvec *vecs,
435*4882a593Smuzhiyun 		unsigned long count, loff_t to, size_t *retlen)
436*4882a593Smuzhiyun {
437*4882a593Smuzhiyun 	return mtd->parent->_writev(mtd->parent, vecs, count,
438*4882a593Smuzhiyun 				    to + mtd->offset, retlen);
439*4882a593Smuzhiyun }
440*4882a593Smuzhiyun #endif
441*4882a593Smuzhiyun 
part_erase(struct mtd_info * mtd,struct erase_info * instr)442*4882a593Smuzhiyun static int part_erase(struct mtd_info *mtd, struct erase_info *instr)
443*4882a593Smuzhiyun {
444*4882a593Smuzhiyun 	int ret;
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun 	instr->addr += mtd->offset;
447*4882a593Smuzhiyun 	ret = mtd->parent->_erase(mtd->parent, instr);
448*4882a593Smuzhiyun 	if (ret) {
449*4882a593Smuzhiyun 		if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
450*4882a593Smuzhiyun 			instr->fail_addr -= mtd->offset;
451*4882a593Smuzhiyun 		instr->addr -= mtd->offset;
452*4882a593Smuzhiyun 	}
453*4882a593Smuzhiyun 	return ret;
454*4882a593Smuzhiyun }
455*4882a593Smuzhiyun 
mtd_erase_callback(struct erase_info * instr)456*4882a593Smuzhiyun void mtd_erase_callback(struct erase_info *instr)
457*4882a593Smuzhiyun {
458*4882a593Smuzhiyun 	if (instr->mtd->_erase == part_erase) {
459*4882a593Smuzhiyun 		if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
460*4882a593Smuzhiyun 			instr->fail_addr -= instr->mtd->offset;
461*4882a593Smuzhiyun 		instr->addr -= instr->mtd->offset;
462*4882a593Smuzhiyun 	}
463*4882a593Smuzhiyun 	if (instr->callback)
464*4882a593Smuzhiyun 		instr->callback(instr);
465*4882a593Smuzhiyun }
466*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mtd_erase_callback);
467*4882a593Smuzhiyun 
part_lock(struct mtd_info * mtd,loff_t ofs,uint64_t len)468*4882a593Smuzhiyun static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
469*4882a593Smuzhiyun {
470*4882a593Smuzhiyun 	return mtd->parent->_lock(mtd->parent, ofs + mtd->offset, len);
471*4882a593Smuzhiyun }
472*4882a593Smuzhiyun 
part_unlock(struct mtd_info * mtd,loff_t ofs,uint64_t len)473*4882a593Smuzhiyun static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
474*4882a593Smuzhiyun {
475*4882a593Smuzhiyun 	return mtd->parent->_unlock(mtd->parent, ofs + mtd->offset, len);
476*4882a593Smuzhiyun }
477*4882a593Smuzhiyun 
part_is_locked(struct mtd_info * mtd,loff_t ofs,uint64_t len)478*4882a593Smuzhiyun static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
479*4882a593Smuzhiyun {
480*4882a593Smuzhiyun 	return mtd->parent->_is_locked(mtd->parent, ofs + mtd->offset, len);
481*4882a593Smuzhiyun }
482*4882a593Smuzhiyun 
part_sync(struct mtd_info * mtd)483*4882a593Smuzhiyun static void part_sync(struct mtd_info *mtd)
484*4882a593Smuzhiyun {
485*4882a593Smuzhiyun 	mtd->parent->_sync(mtd->parent);
486*4882a593Smuzhiyun }
487*4882a593Smuzhiyun 
488*4882a593Smuzhiyun #ifndef __UBOOT__
part_suspend(struct mtd_info * mtd)489*4882a593Smuzhiyun static int part_suspend(struct mtd_info *mtd)
490*4882a593Smuzhiyun {
491*4882a593Smuzhiyun 	return mtd->parent->_suspend(mtd->parent);
492*4882a593Smuzhiyun }
493*4882a593Smuzhiyun 
part_resume(struct mtd_info * mtd)494*4882a593Smuzhiyun static void part_resume(struct mtd_info *mtd)
495*4882a593Smuzhiyun {
496*4882a593Smuzhiyun 	mtd->parent->_resume(mtd->parent);
497*4882a593Smuzhiyun }
498*4882a593Smuzhiyun #endif
499*4882a593Smuzhiyun 
part_block_isreserved(struct mtd_info * mtd,loff_t ofs)500*4882a593Smuzhiyun static int part_block_isreserved(struct mtd_info *mtd, loff_t ofs)
501*4882a593Smuzhiyun {
502*4882a593Smuzhiyun 	ofs += mtd->offset;
503*4882a593Smuzhiyun 	return mtd->parent->_block_isreserved(mtd->parent, ofs);
504*4882a593Smuzhiyun }
505*4882a593Smuzhiyun 
part_block_isbad(struct mtd_info * mtd,loff_t ofs)506*4882a593Smuzhiyun static int part_block_isbad(struct mtd_info *mtd, loff_t ofs)
507*4882a593Smuzhiyun {
508*4882a593Smuzhiyun 	ofs += mtd->offset;
509*4882a593Smuzhiyun 	return mtd->parent->_block_isbad(mtd->parent, ofs);
510*4882a593Smuzhiyun }
511*4882a593Smuzhiyun 
part_block_markbad(struct mtd_info * mtd,loff_t ofs)512*4882a593Smuzhiyun static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
513*4882a593Smuzhiyun {
514*4882a593Smuzhiyun 	int res;
515*4882a593Smuzhiyun 
516*4882a593Smuzhiyun 	ofs += mtd->offset;
517*4882a593Smuzhiyun 	res = mtd->parent->_block_markbad(mtd->parent, ofs);
518*4882a593Smuzhiyun 	if (!res)
519*4882a593Smuzhiyun 		mtd->ecc_stats.badblocks++;
520*4882a593Smuzhiyun 	return res;
521*4882a593Smuzhiyun }
522*4882a593Smuzhiyun 
free_partition(struct mtd_info * p)523*4882a593Smuzhiyun static inline void free_partition(struct mtd_info *p)
524*4882a593Smuzhiyun {
525*4882a593Smuzhiyun 	kfree(p->name);
526*4882a593Smuzhiyun 	kfree(p);
527*4882a593Smuzhiyun }
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun /*
530*4882a593Smuzhiyun  * This function unregisters and destroy all slave MTD objects which are
531*4882a593Smuzhiyun  * attached to the given master MTD object, recursively.
532*4882a593Smuzhiyun  */
do_del_mtd_partitions(struct mtd_info * master)533*4882a593Smuzhiyun static int do_del_mtd_partitions(struct mtd_info *master)
534*4882a593Smuzhiyun {
535*4882a593Smuzhiyun 	struct mtd_info *slave, *next;
536*4882a593Smuzhiyun 	int ret, err = 0;
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun 	list_for_each_entry_safe(slave, next, &master->partitions, node) {
539*4882a593Smuzhiyun 		if (mtd_has_partitions(slave))
540*4882a593Smuzhiyun 			del_mtd_partitions(slave);
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun 		debug("Deleting %s MTD partition\n", slave->name);
543*4882a593Smuzhiyun 		ret = del_mtd_device(slave);
544*4882a593Smuzhiyun 		if (ret < 0) {
545*4882a593Smuzhiyun 			printf("Error when deleting partition \"%s\" (%d)\n",
546*4882a593Smuzhiyun 			       slave->name, ret);
547*4882a593Smuzhiyun 			err = ret;
548*4882a593Smuzhiyun 			continue;
549*4882a593Smuzhiyun 		}
550*4882a593Smuzhiyun 
551*4882a593Smuzhiyun 		list_del(&slave->node);
552*4882a593Smuzhiyun 		free_partition(slave);
553*4882a593Smuzhiyun 	}
554*4882a593Smuzhiyun 
555*4882a593Smuzhiyun 	return err;
556*4882a593Smuzhiyun }
557*4882a593Smuzhiyun 
del_mtd_partitions(struct mtd_info * master)558*4882a593Smuzhiyun int del_mtd_partitions(struct mtd_info *master)
559*4882a593Smuzhiyun {
560*4882a593Smuzhiyun 	int ret;
561*4882a593Smuzhiyun 
562*4882a593Smuzhiyun 	debug("Deleting MTD partitions on \"%s\":\n", master->name);
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun 	mutex_lock(&mtd_partitions_mutex);
565*4882a593Smuzhiyun 	ret = do_del_mtd_partitions(master);
566*4882a593Smuzhiyun 	mutex_unlock(&mtd_partitions_mutex);
567*4882a593Smuzhiyun 
568*4882a593Smuzhiyun 	return ret;
569*4882a593Smuzhiyun }
570*4882a593Smuzhiyun 
allocate_partition(struct mtd_info * master,const struct mtd_partition * part,int partno,uint64_t cur_offset)571*4882a593Smuzhiyun static struct mtd_info *allocate_partition(struct mtd_info *master,
572*4882a593Smuzhiyun 					   const struct mtd_partition *part,
573*4882a593Smuzhiyun 					   int partno, uint64_t cur_offset)
574*4882a593Smuzhiyun {
575*4882a593Smuzhiyun 	struct mtd_info *slave;
576*4882a593Smuzhiyun 	char *name;
577*4882a593Smuzhiyun 
578*4882a593Smuzhiyun 	/* allocate the partition structure */
579*4882a593Smuzhiyun 	slave = kzalloc(sizeof(*slave), GFP_KERNEL);
580*4882a593Smuzhiyun 	name = kstrdup(part->name, GFP_KERNEL);
581*4882a593Smuzhiyun 	if (!name || !slave) {
582*4882a593Smuzhiyun 		printk(KERN_ERR"memory allocation error while creating partitions for \"%s\"\n",
583*4882a593Smuzhiyun 		       master->name);
584*4882a593Smuzhiyun 		kfree(name);
585*4882a593Smuzhiyun 		kfree(slave);
586*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
587*4882a593Smuzhiyun 	}
588*4882a593Smuzhiyun 
589*4882a593Smuzhiyun 	/* set up the MTD object for this partition */
590*4882a593Smuzhiyun 	slave->type = master->type;
591*4882a593Smuzhiyun 	slave->flags = master->flags & ~part->mask_flags;
592*4882a593Smuzhiyun 	slave->size = part->size;
593*4882a593Smuzhiyun 	slave->writesize = master->writesize;
594*4882a593Smuzhiyun 	slave->writebufsize = master->writebufsize;
595*4882a593Smuzhiyun 	slave->oobsize = master->oobsize;
596*4882a593Smuzhiyun 	slave->oobavail = master->oobavail;
597*4882a593Smuzhiyun 	slave->subpage_sft = master->subpage_sft;
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun 	slave->name = name;
600*4882a593Smuzhiyun 	slave->owner = master->owner;
601*4882a593Smuzhiyun #ifndef __UBOOT__
602*4882a593Smuzhiyun 	slave->backing_dev_info = master->backing_dev_info;
603*4882a593Smuzhiyun 
604*4882a593Smuzhiyun 	/* NOTE:  we don't arrange MTDs as a tree; it'd be error-prone
605*4882a593Smuzhiyun 	 * to have the same data be in two different partitions.
606*4882a593Smuzhiyun 	 */
607*4882a593Smuzhiyun 	slave->dev.parent = master->dev.parent;
608*4882a593Smuzhiyun #endif
609*4882a593Smuzhiyun 
610*4882a593Smuzhiyun 	if (master->_read)
611*4882a593Smuzhiyun 		slave->_read = part_read;
612*4882a593Smuzhiyun 	if (master->_write)
613*4882a593Smuzhiyun 		slave->_write = part_write;
614*4882a593Smuzhiyun 
615*4882a593Smuzhiyun 	if (master->_panic_write)
616*4882a593Smuzhiyun 		slave->_panic_write = part_panic_write;
617*4882a593Smuzhiyun 
618*4882a593Smuzhiyun #ifndef __UBOOT__
619*4882a593Smuzhiyun 	if (master->_point && master->_unpoint) {
620*4882a593Smuzhiyun 		slave->_point = part_point;
621*4882a593Smuzhiyun 		slave->_unpoint = part_unpoint;
622*4882a593Smuzhiyun 	}
623*4882a593Smuzhiyun #endif
624*4882a593Smuzhiyun 
625*4882a593Smuzhiyun 	if (master->_get_unmapped_area)
626*4882a593Smuzhiyun 		slave->_get_unmapped_area = part_get_unmapped_area;
627*4882a593Smuzhiyun 	if (master->_read_oob)
628*4882a593Smuzhiyun 		slave->_read_oob = part_read_oob;
629*4882a593Smuzhiyun 	if (master->_write_oob)
630*4882a593Smuzhiyun 		slave->_write_oob = part_write_oob;
631*4882a593Smuzhiyun 	if (master->_read_user_prot_reg)
632*4882a593Smuzhiyun 		slave->_read_user_prot_reg = part_read_user_prot_reg;
633*4882a593Smuzhiyun 	if (master->_read_fact_prot_reg)
634*4882a593Smuzhiyun 		slave->_read_fact_prot_reg = part_read_fact_prot_reg;
635*4882a593Smuzhiyun 	if (master->_write_user_prot_reg)
636*4882a593Smuzhiyun 		slave->_write_user_prot_reg = part_write_user_prot_reg;
637*4882a593Smuzhiyun 	if (master->_lock_user_prot_reg)
638*4882a593Smuzhiyun 		slave->_lock_user_prot_reg = part_lock_user_prot_reg;
639*4882a593Smuzhiyun 	if (master->_get_user_prot_info)
640*4882a593Smuzhiyun 		slave->_get_user_prot_info = part_get_user_prot_info;
641*4882a593Smuzhiyun 	if (master->_get_fact_prot_info)
642*4882a593Smuzhiyun 		slave->_get_fact_prot_info = part_get_fact_prot_info;
643*4882a593Smuzhiyun 	if (master->_sync)
644*4882a593Smuzhiyun 		slave->_sync = part_sync;
645*4882a593Smuzhiyun #ifndef __UBOOT__
646*4882a593Smuzhiyun 	if (!partno && !master->dev.class && master->_suspend &&
647*4882a593Smuzhiyun 	    master->_resume) {
648*4882a593Smuzhiyun 		slave->_suspend = part_suspend;
649*4882a593Smuzhiyun 		slave->_resume = part_resume;
650*4882a593Smuzhiyun 	}
651*4882a593Smuzhiyun 	if (master->_writev)
652*4882a593Smuzhiyun 		slave->_writev = part_writev;
653*4882a593Smuzhiyun #endif
654*4882a593Smuzhiyun 	if (master->_lock)
655*4882a593Smuzhiyun 		slave->_lock = part_lock;
656*4882a593Smuzhiyun 	if (master->_unlock)
657*4882a593Smuzhiyun 		slave->_unlock = part_unlock;
658*4882a593Smuzhiyun 	if (master->_is_locked)
659*4882a593Smuzhiyun 		slave->_is_locked = part_is_locked;
660*4882a593Smuzhiyun 	if (master->_block_isreserved)
661*4882a593Smuzhiyun 		slave->_block_isreserved = part_block_isreserved;
662*4882a593Smuzhiyun 	if (master->_block_isbad)
663*4882a593Smuzhiyun 		slave->_block_isbad = part_block_isbad;
664*4882a593Smuzhiyun 	if (master->_block_markbad)
665*4882a593Smuzhiyun 		slave->_block_markbad = part_block_markbad;
666*4882a593Smuzhiyun 	slave->_erase = part_erase;
667*4882a593Smuzhiyun 	slave->parent = master;
668*4882a593Smuzhiyun 	slave->offset = part->offset;
669*4882a593Smuzhiyun 	INIT_LIST_HEAD(&slave->partitions);
670*4882a593Smuzhiyun 	INIT_LIST_HEAD(&slave->node);
671*4882a593Smuzhiyun 
672*4882a593Smuzhiyun 	if (slave->offset == MTDPART_OFS_APPEND)
673*4882a593Smuzhiyun 		slave->offset = cur_offset;
674*4882a593Smuzhiyun 	if (slave->offset == MTDPART_OFS_NXTBLK) {
675*4882a593Smuzhiyun 		slave->offset = cur_offset;
676*4882a593Smuzhiyun 		if (mtd_mod_by_eb(cur_offset, master) != 0) {
677*4882a593Smuzhiyun 			/* Round up to next erasesize */
678*4882a593Smuzhiyun 			slave->offset = (mtd_div_by_eb(cur_offset, master) + 1) * master->erasesize;
679*4882a593Smuzhiyun 			debug("Moving partition %d: "
680*4882a593Smuzhiyun 			       "0x%012llx -> 0x%012llx\n", partno,
681*4882a593Smuzhiyun 			       (unsigned long long)cur_offset, (unsigned long long)slave->offset);
682*4882a593Smuzhiyun 		}
683*4882a593Smuzhiyun 	}
684*4882a593Smuzhiyun 	if (slave->offset == MTDPART_OFS_RETAIN) {
685*4882a593Smuzhiyun 		slave->offset = cur_offset;
686*4882a593Smuzhiyun 		if (master->size - slave->offset >= slave->size) {
687*4882a593Smuzhiyun 			slave->size = master->size - slave->offset
688*4882a593Smuzhiyun 							- slave->size;
689*4882a593Smuzhiyun 		} else {
690*4882a593Smuzhiyun 			debug("mtd partition \"%s\" doesn't have enough space: %#llx < %#llx, disabled\n",
691*4882a593Smuzhiyun 				part->name, master->size - slave->offset,
692*4882a593Smuzhiyun 				slave->size);
693*4882a593Smuzhiyun 			/* register to preserve ordering */
694*4882a593Smuzhiyun 			goto out_register;
695*4882a593Smuzhiyun 		}
696*4882a593Smuzhiyun 	}
697*4882a593Smuzhiyun 	if (slave->size == MTDPART_SIZ_FULL)
698*4882a593Smuzhiyun 		slave->size = master->size - slave->offset;
699*4882a593Smuzhiyun 
700*4882a593Smuzhiyun 	debug("0x%012llx-0x%012llx : \"%s\"\n", (unsigned long long)slave->offset,
701*4882a593Smuzhiyun 		(unsigned long long)(slave->offset + slave->size), slave->name);
702*4882a593Smuzhiyun 
703*4882a593Smuzhiyun 	/* let's do some sanity checks */
704*4882a593Smuzhiyun 	if (slave->offset >= master->size) {
705*4882a593Smuzhiyun 		/* let's register it anyway to preserve ordering */
706*4882a593Smuzhiyun 		slave->offset = 0;
707*4882a593Smuzhiyun 		slave->size = 0;
708*4882a593Smuzhiyun 		printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n",
709*4882a593Smuzhiyun 			part->name);
710*4882a593Smuzhiyun 		goto out_register;
711*4882a593Smuzhiyun 	}
712*4882a593Smuzhiyun 	if (slave->offset + slave->size > master->size) {
713*4882a593Smuzhiyun 		slave->size = master->size - slave->offset;
714*4882a593Smuzhiyun 		printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#llx\n",
715*4882a593Smuzhiyun 		       part->name, master->name, slave->size);
716*4882a593Smuzhiyun 	}
717*4882a593Smuzhiyun 	if (master->numeraseregions > 1) {
718*4882a593Smuzhiyun 		/* Deal with variable erase size stuff */
719*4882a593Smuzhiyun 		int i, max = master->numeraseregions;
720*4882a593Smuzhiyun 		u64 end = slave->offset + slave->size;
721*4882a593Smuzhiyun 		struct mtd_erase_region_info *regions = master->eraseregions;
722*4882a593Smuzhiyun 
723*4882a593Smuzhiyun 		/* Find the first erase regions which is part of this
724*4882a593Smuzhiyun 		 * partition. */
725*4882a593Smuzhiyun 		for (i = 0; i < max && regions[i].offset <= slave->offset; i++)
726*4882a593Smuzhiyun 			;
727*4882a593Smuzhiyun 		/* The loop searched for the region _behind_ the first one */
728*4882a593Smuzhiyun 		if (i > 0)
729*4882a593Smuzhiyun 			i--;
730*4882a593Smuzhiyun 
731*4882a593Smuzhiyun 		/* Pick biggest erasesize */
732*4882a593Smuzhiyun 		for (; i < max && regions[i].offset < end; i++) {
733*4882a593Smuzhiyun 			if (slave->erasesize < regions[i].erasesize)
734*4882a593Smuzhiyun 				slave->erasesize = regions[i].erasesize;
735*4882a593Smuzhiyun 		}
736*4882a593Smuzhiyun 		WARN_ON(slave->erasesize == 0);
737*4882a593Smuzhiyun 	} else {
738*4882a593Smuzhiyun 		/* Single erase size */
739*4882a593Smuzhiyun 		slave->erasesize = master->erasesize;
740*4882a593Smuzhiyun 	}
741*4882a593Smuzhiyun 
742*4882a593Smuzhiyun 	if ((slave->flags & MTD_WRITEABLE) &&
743*4882a593Smuzhiyun 	    mtd_mod_by_eb(slave->offset, slave)) {
744*4882a593Smuzhiyun 		/* Doesn't start on a boundary of major erase size */
745*4882a593Smuzhiyun 		/* FIXME: Let it be writable if it is on a boundary of
746*4882a593Smuzhiyun 		 * _minor_ erase size though */
747*4882a593Smuzhiyun 		slave->flags &= ~MTD_WRITEABLE;
748*4882a593Smuzhiyun 		printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n",
749*4882a593Smuzhiyun 			part->name);
750*4882a593Smuzhiyun 	}
751*4882a593Smuzhiyun 	if ((slave->flags & MTD_WRITEABLE) &&
752*4882a593Smuzhiyun 	    mtd_mod_by_eb(slave->size, slave)) {
753*4882a593Smuzhiyun 		slave->flags &= ~MTD_WRITEABLE;
754*4882a593Smuzhiyun 		printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n",
755*4882a593Smuzhiyun 			part->name);
756*4882a593Smuzhiyun 	}
757*4882a593Smuzhiyun 
758*4882a593Smuzhiyun 	slave->ecclayout = master->ecclayout;
759*4882a593Smuzhiyun 	slave->ecc_step_size = master->ecc_step_size;
760*4882a593Smuzhiyun 	slave->ecc_strength = master->ecc_strength;
761*4882a593Smuzhiyun 	slave->bitflip_threshold = master->bitflip_threshold;
762*4882a593Smuzhiyun 
763*4882a593Smuzhiyun 	if (master->_block_isbad) {
764*4882a593Smuzhiyun 		uint64_t offs = 0;
765*4882a593Smuzhiyun 
766*4882a593Smuzhiyun 		while (offs < slave->size) {
767*4882a593Smuzhiyun 			if (mtd_block_isbad(master, offs + slave->offset))
768*4882a593Smuzhiyun 				slave->ecc_stats.badblocks++;
769*4882a593Smuzhiyun 			offs += slave->erasesize;
770*4882a593Smuzhiyun 		}
771*4882a593Smuzhiyun 	}
772*4882a593Smuzhiyun 
773*4882a593Smuzhiyun out_register:
774*4882a593Smuzhiyun 	return slave;
775*4882a593Smuzhiyun }
776*4882a593Smuzhiyun 
777*4882a593Smuzhiyun #ifndef __UBOOT__
mtd_add_partition(struct mtd_info * master,const char * name,long long offset,long long length)778*4882a593Smuzhiyun int mtd_add_partition(struct mtd_info *master, const char *name,
779*4882a593Smuzhiyun 		      long long offset, long long length)
780*4882a593Smuzhiyun {
781*4882a593Smuzhiyun 	struct mtd_partition part;
782*4882a593Smuzhiyun 	struct mtd_info *p, *new;
783*4882a593Smuzhiyun 	uint64_t start, end;
784*4882a593Smuzhiyun 	int ret = 0;
785*4882a593Smuzhiyun 
786*4882a593Smuzhiyun 	/* the direct offset is expected */
787*4882a593Smuzhiyun 	if (offset == MTDPART_OFS_APPEND ||
788*4882a593Smuzhiyun 	    offset == MTDPART_OFS_NXTBLK)
789*4882a593Smuzhiyun 		return -EINVAL;
790*4882a593Smuzhiyun 
791*4882a593Smuzhiyun 	if (length == MTDPART_SIZ_FULL)
792*4882a593Smuzhiyun 		length = master->size - offset;
793*4882a593Smuzhiyun 
794*4882a593Smuzhiyun 	if (length <= 0)
795*4882a593Smuzhiyun 		return -EINVAL;
796*4882a593Smuzhiyun 
797*4882a593Smuzhiyun 	part.name = name;
798*4882a593Smuzhiyun 	part.size = length;
799*4882a593Smuzhiyun 	part.offset = offset;
800*4882a593Smuzhiyun 	part.mask_flags = 0;
801*4882a593Smuzhiyun 	part.ecclayout = NULL;
802*4882a593Smuzhiyun 
803*4882a593Smuzhiyun 	new = allocate_partition(master, &part, -1, offset);
804*4882a593Smuzhiyun 	if (IS_ERR(new))
805*4882a593Smuzhiyun 		return PTR_ERR(new);
806*4882a593Smuzhiyun 
807*4882a593Smuzhiyun 	start = offset;
808*4882a593Smuzhiyun 	end = offset + length;
809*4882a593Smuzhiyun 
810*4882a593Smuzhiyun 	mutex_lock(&mtd_partitions_mutex);
811*4882a593Smuzhiyun 	list_for_each_entry(p, &master->partitions, node) {
812*4882a593Smuzhiyun 		if (start >= p->offset &&
813*4882a593Smuzhiyun 		    (start < (p->offset + p->size)))
814*4882a593Smuzhiyun 			goto err_inv;
815*4882a593Smuzhiyun 
816*4882a593Smuzhiyun 		if (end >= p->offset &&
817*4882a593Smuzhiyun 		    (end < (p->offset + p->size)))
818*4882a593Smuzhiyun 			goto err_inv;
819*4882a593Smuzhiyun 	}
820*4882a593Smuzhiyun 
821*4882a593Smuzhiyun 	list_add_tail(&new->node, &master->partitions);
822*4882a593Smuzhiyun 	mutex_unlock(&mtd_partitions_mutex);
823*4882a593Smuzhiyun 
824*4882a593Smuzhiyun 	add_mtd_device(new);
825*4882a593Smuzhiyun 
826*4882a593Smuzhiyun 	return ret;
827*4882a593Smuzhiyun err_inv:
828*4882a593Smuzhiyun 	mutex_unlock(&mtd_partitions_mutex);
829*4882a593Smuzhiyun 	free_partition(new);
830*4882a593Smuzhiyun 	return -EINVAL;
831*4882a593Smuzhiyun }
832*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mtd_add_partition);
833*4882a593Smuzhiyun 
mtd_del_partition(struct mtd_info * master,int partno)834*4882a593Smuzhiyun int mtd_del_partition(struct mtd_info *master, int partno)
835*4882a593Smuzhiyun {
836*4882a593Smuzhiyun 	struct mtd_info *slave, *next;
837*4882a593Smuzhiyun 	int ret = -EINVAL;
838*4882a593Smuzhiyun 
839*4882a593Smuzhiyun 	mutex_lock(&mtd_partitions_mutex);
840*4882a593Smuzhiyun 	list_for_each_entry_safe(slave, next, &master->partitions, node)
841*4882a593Smuzhiyun 		if (slave->index == partno) {
842*4882a593Smuzhiyun 			ret = del_mtd_device(slave);
843*4882a593Smuzhiyun 			if (ret < 0)
844*4882a593Smuzhiyun 				break;
845*4882a593Smuzhiyun 
846*4882a593Smuzhiyun 			list_del(&slave->node);
847*4882a593Smuzhiyun 			free_partition(slave);
848*4882a593Smuzhiyun 			break;
849*4882a593Smuzhiyun 		}
850*4882a593Smuzhiyun 	mutex_unlock(&mtd_partitions_mutex);
851*4882a593Smuzhiyun 
852*4882a593Smuzhiyun 	return ret;
853*4882a593Smuzhiyun }
854*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mtd_del_partition);
855*4882a593Smuzhiyun #endif
856*4882a593Smuzhiyun 
857*4882a593Smuzhiyun /*
858*4882a593Smuzhiyun  * This function, given a master MTD object and a partition table, creates
859*4882a593Smuzhiyun  * and registers slave MTD objects which are bound to the master according to
860*4882a593Smuzhiyun  * the partition definitions.
861*4882a593Smuzhiyun  *
862*4882a593Smuzhiyun  * We don't register the master, or expect the caller to have done so,
863*4882a593Smuzhiyun  * for reasons of data integrity.
864*4882a593Smuzhiyun  */
865*4882a593Smuzhiyun 
add_mtd_partitions(struct mtd_info * master,const struct mtd_partition * parts,int nbparts)866*4882a593Smuzhiyun int add_mtd_partitions(struct mtd_info *master,
867*4882a593Smuzhiyun 		       const struct mtd_partition *parts,
868*4882a593Smuzhiyun 		       int nbparts)
869*4882a593Smuzhiyun {
870*4882a593Smuzhiyun 	struct mtd_info *slave;
871*4882a593Smuzhiyun 	uint64_t cur_offset = 0;
872*4882a593Smuzhiyun 	int i;
873*4882a593Smuzhiyun 
874*4882a593Smuzhiyun 	debug("Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
875*4882a593Smuzhiyun 
876*4882a593Smuzhiyun 	for (i = 0; i < nbparts; i++) {
877*4882a593Smuzhiyun 		slave = allocate_partition(master, parts + i, i, cur_offset);
878*4882a593Smuzhiyun 		if (IS_ERR(slave))
879*4882a593Smuzhiyun 			return PTR_ERR(slave);
880*4882a593Smuzhiyun 
881*4882a593Smuzhiyun 		mutex_lock(&mtd_partitions_mutex);
882*4882a593Smuzhiyun 		list_add_tail(&slave->node, &master->partitions);
883*4882a593Smuzhiyun 		mutex_unlock(&mtd_partitions_mutex);
884*4882a593Smuzhiyun 
885*4882a593Smuzhiyun 		add_mtd_device(slave);
886*4882a593Smuzhiyun 
887*4882a593Smuzhiyun 		cur_offset = slave->offset + slave->size;
888*4882a593Smuzhiyun 	}
889*4882a593Smuzhiyun 
890*4882a593Smuzhiyun 	return 0;
891*4882a593Smuzhiyun }
892*4882a593Smuzhiyun 
893*4882a593Smuzhiyun #ifndef __UBOOT__
894*4882a593Smuzhiyun static DEFINE_SPINLOCK(part_parser_lock);
895*4882a593Smuzhiyun static LIST_HEAD(part_parsers);
896*4882a593Smuzhiyun 
get_partition_parser(const char * name)897*4882a593Smuzhiyun static struct mtd_part_parser *get_partition_parser(const char *name)
898*4882a593Smuzhiyun {
899*4882a593Smuzhiyun 	struct mtd_part_parser *p, *ret = NULL;
900*4882a593Smuzhiyun 
901*4882a593Smuzhiyun 	spin_lock(&part_parser_lock);
902*4882a593Smuzhiyun 
903*4882a593Smuzhiyun 	list_for_each_entry(p, &part_parsers, list)
904*4882a593Smuzhiyun 		if (!strcmp(p->name, name) && try_module_get(p->owner)) {
905*4882a593Smuzhiyun 			ret = p;
906*4882a593Smuzhiyun 			break;
907*4882a593Smuzhiyun 		}
908*4882a593Smuzhiyun 
909*4882a593Smuzhiyun 	spin_unlock(&part_parser_lock);
910*4882a593Smuzhiyun 
911*4882a593Smuzhiyun 	return ret;
912*4882a593Smuzhiyun }
913*4882a593Smuzhiyun 
914*4882a593Smuzhiyun #define put_partition_parser(p) do { module_put((p)->owner); } while (0)
915*4882a593Smuzhiyun 
register_mtd_parser(struct mtd_part_parser * p)916*4882a593Smuzhiyun void register_mtd_parser(struct mtd_part_parser *p)
917*4882a593Smuzhiyun {
918*4882a593Smuzhiyun 	spin_lock(&part_parser_lock);
919*4882a593Smuzhiyun 	list_add(&p->list, &part_parsers);
920*4882a593Smuzhiyun 	spin_unlock(&part_parser_lock);
921*4882a593Smuzhiyun }
922*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(register_mtd_parser);
923*4882a593Smuzhiyun 
deregister_mtd_parser(struct mtd_part_parser * p)924*4882a593Smuzhiyun void deregister_mtd_parser(struct mtd_part_parser *p)
925*4882a593Smuzhiyun {
926*4882a593Smuzhiyun 	spin_lock(&part_parser_lock);
927*4882a593Smuzhiyun 	list_del(&p->list);
928*4882a593Smuzhiyun 	spin_unlock(&part_parser_lock);
929*4882a593Smuzhiyun }
930*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(deregister_mtd_parser);
931*4882a593Smuzhiyun 
932*4882a593Smuzhiyun /*
933*4882a593Smuzhiyun  * Do not forget to update 'parse_mtd_partitions()' kerneldoc comment if you
934*4882a593Smuzhiyun  * are changing this array!
935*4882a593Smuzhiyun  */
936*4882a593Smuzhiyun static const char * const default_mtd_part_types[] = {
937*4882a593Smuzhiyun 	"cmdlinepart",
938*4882a593Smuzhiyun 	"ofpart",
939*4882a593Smuzhiyun 	NULL
940*4882a593Smuzhiyun };
941*4882a593Smuzhiyun 
942*4882a593Smuzhiyun /**
943*4882a593Smuzhiyun  * parse_mtd_partitions - parse MTD partitions
944*4882a593Smuzhiyun  * @master: the master partition (describes whole MTD device)
945*4882a593Smuzhiyun  * @types: names of partition parsers to try or %NULL
946*4882a593Smuzhiyun  * @pparts: array of partitions found is returned here
947*4882a593Smuzhiyun  * @data: MTD partition parser-specific data
948*4882a593Smuzhiyun  *
949*4882a593Smuzhiyun  * This function tries to find partition on MTD device @master. It uses MTD
950*4882a593Smuzhiyun  * partition parsers, specified in @types. However, if @types is %NULL, then
951*4882a593Smuzhiyun  * the default list of parsers is used. The default list contains only the
952*4882a593Smuzhiyun  * "cmdlinepart" and "ofpart" parsers ATM.
953*4882a593Smuzhiyun  * Note: If there are more then one parser in @types, the kernel only takes the
954*4882a593Smuzhiyun  * partitions parsed out by the first parser.
955*4882a593Smuzhiyun  *
956*4882a593Smuzhiyun  * This function may return:
957*4882a593Smuzhiyun  * o a negative error code in case of failure
958*4882a593Smuzhiyun  * o zero if no partitions were found
959*4882a593Smuzhiyun  * o a positive number of found partitions, in which case on exit @pparts will
960*4882a593Smuzhiyun  *   point to an array containing this number of &struct mtd_info objects.
961*4882a593Smuzhiyun  */
parse_mtd_partitions(struct mtd_info * master,const char * const * types,struct mtd_partition ** pparts,struct mtd_part_parser_data * data)962*4882a593Smuzhiyun int parse_mtd_partitions(struct mtd_info *master, const char *const *types,
963*4882a593Smuzhiyun 			 struct mtd_partition **pparts,
964*4882a593Smuzhiyun 			 struct mtd_part_parser_data *data)
965*4882a593Smuzhiyun {
966*4882a593Smuzhiyun 	struct mtd_part_parser *parser;
967*4882a593Smuzhiyun 	int ret = 0;
968*4882a593Smuzhiyun 
969*4882a593Smuzhiyun 	if (!types)
970*4882a593Smuzhiyun 		types = default_mtd_part_types;
971*4882a593Smuzhiyun 
972*4882a593Smuzhiyun 	for ( ; ret <= 0 && *types; types++) {
973*4882a593Smuzhiyun 		parser = get_partition_parser(*types);
974*4882a593Smuzhiyun 		if (!parser && !request_module("%s", *types))
975*4882a593Smuzhiyun 			parser = get_partition_parser(*types);
976*4882a593Smuzhiyun 		if (!parser)
977*4882a593Smuzhiyun 			continue;
978*4882a593Smuzhiyun 		ret = (*parser->parse_fn)(master, pparts, data);
979*4882a593Smuzhiyun 		put_partition_parser(parser);
980*4882a593Smuzhiyun 		if (ret > 0) {
981*4882a593Smuzhiyun 			printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n",
982*4882a593Smuzhiyun 			       ret, parser->name, master->name);
983*4882a593Smuzhiyun 			break;
984*4882a593Smuzhiyun 		}
985*4882a593Smuzhiyun 	}
986*4882a593Smuzhiyun 	return ret;
987*4882a593Smuzhiyun }
988*4882a593Smuzhiyun #endif
989*4882a593Smuzhiyun 
990*4882a593Smuzhiyun /* Returns the size of the entire flash chip */
mtd_get_device_size(const struct mtd_info * mtd)991*4882a593Smuzhiyun uint64_t mtd_get_device_size(const struct mtd_info *mtd)
992*4882a593Smuzhiyun {
993*4882a593Smuzhiyun 	if (mtd_is_partition(mtd))
994*4882a593Smuzhiyun 		return mtd->parent->size;
995*4882a593Smuzhiyun 
996*4882a593Smuzhiyun 	return mtd->size;
997*4882a593Smuzhiyun }
998*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mtd_get_device_size);
999