xref: /rk3399_rockchip-uboot/drivers/mtd/mtdpart.c (revision 042673ef62d35b992a506e9f43f035c973d36c1d)
1 /*
2  * Simple MTD partitioning layer
3  *
4  * Copyright © 2000 Nicolas Pitre <nico@fluxnic.net>
5  * Copyright © 2002 Thomas Gleixner <gleixner@linutronix.de>
6  * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org>
7  *
8  * SPDX-License-Identifier:	GPL-2.0+
9  *
10  */
11 
12 #ifndef __UBOOT__
13 #include <linux/module.h>
14 #include <linux/types.h>
15 #include <linux/kernel.h>
16 #include <linux/slab.h>
17 #include <linux/list.h>
18 #include <linux/kmod.h>
19 #endif
20 
21 #include <common.h>
22 #include <malloc.h>
23 #include <linux/errno.h>
24 #include <linux/compat.h>
25 #include <ubi_uboot.h>
26 
27 #include <linux/mtd/mtd.h>
28 #include <linux/mtd/partitions.h>
29 #include <linux/err.h>
30 
31 #include "mtdcore.h"
32 
33 /* Our partition linked list */
34 static LIST_HEAD(mtd_partitions);
35 #ifndef __UBOOT__
36 static DEFINE_MUTEX(mtd_partitions_mutex);
37 #else
38 DEFINE_MUTEX(mtd_partitions_mutex);
39 #endif
40 
41 /* Our partition node structure */
42 struct mtd_part {
43 	struct mtd_info mtd;
44 	struct mtd_info *master;
45 	uint64_t offset;
46 	struct list_head list;
47 };
48 
49 /*
50  * Given a pointer to the MTD object in the mtd_part structure, we can retrieve
51  * the pointer to that structure with this macro.
52  */
53 #define PART(x)  ((struct mtd_part *)(x))
54 
55 
56 #ifdef __UBOOT__
57 /* from mm/util.c */
58 
59 /**
60  * kstrdup - allocate space for and copy an existing string
61  * @s: the string to duplicate
62  * @gfp: the GFP mask used in the kmalloc() call when allocating memory
63  */
64 char *kstrdup(const char *s, gfp_t gfp)
65 {
66 	size_t len;
67 	char *buf;
68 
69 	if (!s)
70 		return NULL;
71 
72 	len = strlen(s) + 1;
73 	buf = kmalloc(len, gfp);
74 	if (buf)
75 		memcpy(buf, s, len);
76 	return buf;
77 }
78 #endif
79 
80 /*
81  * MTD methods which simply translate the effective address and pass through
82  * to the _real_ device.
83  */
84 
85 static int part_read(struct mtd_info *mtd, loff_t from, size_t len,
86 		size_t *retlen, u_char *buf)
87 {
88 	struct mtd_part *part = PART(mtd);
89 	struct mtd_ecc_stats stats;
90 	int res;
91 
92 	stats = part->master->ecc_stats;
93 	res = part->master->_read(part->master, from + part->offset, len,
94 				  retlen, buf);
95 	if (unlikely(mtd_is_eccerr(res)))
96 		mtd->ecc_stats.failed +=
97 			part->master->ecc_stats.failed - stats.failed;
98 	else
99 		mtd->ecc_stats.corrected +=
100 			part->master->ecc_stats.corrected - stats.corrected;
101 	return res;
102 }
103 
104 #ifndef __UBOOT__
105 static int part_point(struct mtd_info *mtd, loff_t from, size_t len,
106 		size_t *retlen, void **virt, resource_size_t *phys)
107 {
108 	struct mtd_part *part = PART(mtd);
109 
110 	return part->master->_point(part->master, from + part->offset, len,
111 				    retlen, virt, phys);
112 }
113 
114 static int part_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
115 {
116 	struct mtd_part *part = PART(mtd);
117 
118 	return part->master->_unpoint(part->master, from + part->offset, len);
119 }
120 #endif
121 
122 static unsigned long part_get_unmapped_area(struct mtd_info *mtd,
123 					    unsigned long len,
124 					    unsigned long offset,
125 					    unsigned long flags)
126 {
127 	struct mtd_part *part = PART(mtd);
128 
129 	offset += part->offset;
130 	return part->master->_get_unmapped_area(part->master, len, offset,
131 						flags);
132 }
133 
134 static int part_read_oob(struct mtd_info *mtd, loff_t from,
135 		struct mtd_oob_ops *ops)
136 {
137 	struct mtd_part *part = PART(mtd);
138 	int res;
139 
140 	if (from >= mtd->size)
141 		return -EINVAL;
142 	if (ops->datbuf && from + ops->len > mtd->size)
143 		return -EINVAL;
144 
145 	/*
146 	 * If OOB is also requested, make sure that we do not read past the end
147 	 * of this partition.
148 	 */
149 	if (ops->oobbuf) {
150 		size_t len, pages;
151 
152 		if (ops->mode == MTD_OPS_AUTO_OOB)
153 			len = mtd->oobavail;
154 		else
155 			len = mtd->oobsize;
156 		pages = mtd_div_by_ws(mtd->size, mtd);
157 		pages -= mtd_div_by_ws(from, mtd);
158 		if (ops->ooboffs + ops->ooblen > pages * len)
159 			return -EINVAL;
160 	}
161 
162 	res = part->master->_read_oob(part->master, from + part->offset, ops);
163 	if (unlikely(res)) {
164 		if (mtd_is_bitflip(res))
165 			mtd->ecc_stats.corrected++;
166 		if (mtd_is_eccerr(res))
167 			mtd->ecc_stats.failed++;
168 	}
169 	return res;
170 }
171 
172 static int part_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
173 		size_t len, size_t *retlen, u_char *buf)
174 {
175 	struct mtd_part *part = PART(mtd);
176 	return part->master->_read_user_prot_reg(part->master, from, len,
177 						 retlen, buf);
178 }
179 
180 static int part_get_user_prot_info(struct mtd_info *mtd, size_t len,
181 				   size_t *retlen, struct otp_info *buf)
182 {
183 	struct mtd_part *part = PART(mtd);
184 	return part->master->_get_user_prot_info(part->master, len, retlen,
185 						 buf);
186 }
187 
188 static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
189 		size_t len, size_t *retlen, u_char *buf)
190 {
191 	struct mtd_part *part = PART(mtd);
192 	return part->master->_read_fact_prot_reg(part->master, from, len,
193 						 retlen, buf);
194 }
195 
196 static int part_get_fact_prot_info(struct mtd_info *mtd, size_t len,
197 				   size_t *retlen, struct otp_info *buf)
198 {
199 	struct mtd_part *part = PART(mtd);
200 	return part->master->_get_fact_prot_info(part->master, len, retlen,
201 						 buf);
202 }
203 
204 static int part_write(struct mtd_info *mtd, loff_t to, size_t len,
205 		size_t *retlen, const u_char *buf)
206 {
207 	struct mtd_part *part = PART(mtd);
208 	return part->master->_write(part->master, to + part->offset, len,
209 				    retlen, buf);
210 }
211 
212 static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
213 		size_t *retlen, const u_char *buf)
214 {
215 	struct mtd_part *part = PART(mtd);
216 	return part->master->_panic_write(part->master, to + part->offset, len,
217 					  retlen, buf);
218 }
219 
220 static int part_write_oob(struct mtd_info *mtd, loff_t to,
221 		struct mtd_oob_ops *ops)
222 {
223 	struct mtd_part *part = PART(mtd);
224 
225 	if (to >= mtd->size)
226 		return -EINVAL;
227 	if (ops->datbuf && to + ops->len > mtd->size)
228 		return -EINVAL;
229 	return part->master->_write_oob(part->master, to + part->offset, ops);
230 }
231 
232 static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
233 		size_t len, size_t *retlen, u_char *buf)
234 {
235 	struct mtd_part *part = PART(mtd);
236 	return part->master->_write_user_prot_reg(part->master, from, len,
237 						  retlen, buf);
238 }
239 
240 static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
241 		size_t len)
242 {
243 	struct mtd_part *part = PART(mtd);
244 	return part->master->_lock_user_prot_reg(part->master, from, len);
245 }
246 
247 #ifndef __UBOOT__
248 static int part_writev(struct mtd_info *mtd, const struct kvec *vecs,
249 		unsigned long count, loff_t to, size_t *retlen)
250 {
251 	struct mtd_part *part = PART(mtd);
252 	return part->master->_writev(part->master, vecs, count,
253 				     to + part->offset, retlen);
254 }
255 #endif
256 
257 static int part_erase(struct mtd_info *mtd, struct erase_info *instr)
258 {
259 	struct mtd_part *part = PART(mtd);
260 	int ret;
261 
262 	instr->addr += part->offset;
263 	ret = part->master->_erase(part->master, instr);
264 	if (ret) {
265 		if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
266 			instr->fail_addr -= part->offset;
267 		instr->addr -= part->offset;
268 	}
269 	return ret;
270 }
271 
272 void mtd_erase_callback(struct erase_info *instr)
273 {
274 	if (instr->mtd->_erase == part_erase) {
275 		struct mtd_part *part = PART(instr->mtd);
276 
277 		if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
278 			instr->fail_addr -= part->offset;
279 		instr->addr -= part->offset;
280 	}
281 	if (instr->callback)
282 		instr->callback(instr);
283 }
284 EXPORT_SYMBOL_GPL(mtd_erase_callback);
285 
286 static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
287 {
288 	struct mtd_part *part = PART(mtd);
289 	return part->master->_lock(part->master, ofs + part->offset, len);
290 }
291 
292 static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
293 {
294 	struct mtd_part *part = PART(mtd);
295 	return part->master->_unlock(part->master, ofs + part->offset, len);
296 }
297 
298 static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
299 {
300 	struct mtd_part *part = PART(mtd);
301 	return part->master->_is_locked(part->master, ofs + part->offset, len);
302 }
303 
304 static void part_sync(struct mtd_info *mtd)
305 {
306 	struct mtd_part *part = PART(mtd);
307 	part->master->_sync(part->master);
308 }
309 
310 #ifndef __UBOOT__
311 static int part_suspend(struct mtd_info *mtd)
312 {
313 	struct mtd_part *part = PART(mtd);
314 	return part->master->_suspend(part->master);
315 }
316 
317 static void part_resume(struct mtd_info *mtd)
318 {
319 	struct mtd_part *part = PART(mtd);
320 	part->master->_resume(part->master);
321 }
322 #endif
323 
324 static int part_block_isreserved(struct mtd_info *mtd, loff_t ofs)
325 {
326 	struct mtd_part *part = PART(mtd);
327 	ofs += part->offset;
328 	return part->master->_block_isreserved(part->master, ofs);
329 }
330 
331 static int part_block_isbad(struct mtd_info *mtd, loff_t ofs)
332 {
333 	struct mtd_part *part = PART(mtd);
334 	ofs += part->offset;
335 	return part->master->_block_isbad(part->master, ofs);
336 }
337 
338 static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
339 {
340 	struct mtd_part *part = PART(mtd);
341 	int res;
342 
343 	ofs += part->offset;
344 	res = part->master->_block_markbad(part->master, ofs);
345 	if (!res)
346 		mtd->ecc_stats.badblocks++;
347 	return res;
348 }
349 
350 static inline void free_partition(struct mtd_part *p)
351 {
352 	kfree(p->mtd.name);
353 	kfree(p);
354 }
355 
356 /*
357  * This function unregisters and destroy all slave MTD objects which are
358  * attached to the given master MTD object.
359  */
360 
361 int del_mtd_partitions(struct mtd_info *master)
362 {
363 	struct mtd_part *slave, *next;
364 	int ret, err = 0;
365 
366 	mutex_lock(&mtd_partitions_mutex);
367 	list_for_each_entry_safe(slave, next, &mtd_partitions, list)
368 		if (slave->master == master) {
369 			ret = del_mtd_device(&slave->mtd);
370 			if (ret < 0) {
371 				err = ret;
372 				continue;
373 			}
374 			list_del(&slave->list);
375 			free_partition(slave);
376 		}
377 	mutex_unlock(&mtd_partitions_mutex);
378 
379 	return err;
380 }
381 
382 static struct mtd_part *allocate_partition(struct mtd_info *master,
383 			const struct mtd_partition *part, int partno,
384 			uint64_t cur_offset)
385 {
386 	struct mtd_part *slave;
387 	char *name;
388 
389 	/* allocate the partition structure */
390 	slave = kzalloc(sizeof(*slave), GFP_KERNEL);
391 	name = kstrdup(part->name, GFP_KERNEL);
392 	if (!name || !slave) {
393 		printk(KERN_ERR"memory allocation error while creating partitions for \"%s\"\n",
394 		       master->name);
395 		kfree(name);
396 		kfree(slave);
397 		return ERR_PTR(-ENOMEM);
398 	}
399 
400 	/* set up the MTD object for this partition */
401 	slave->mtd.type = master->type;
402 	slave->mtd.flags = master->flags & ~part->mask_flags;
403 	slave->mtd.size = part->size;
404 	slave->mtd.writesize = master->writesize;
405 	slave->mtd.writebufsize = master->writebufsize;
406 	slave->mtd.oobsize = master->oobsize;
407 	slave->mtd.oobavail = master->oobavail;
408 	slave->mtd.subpage_sft = master->subpage_sft;
409 
410 	slave->mtd.name = name;
411 	slave->mtd.owner = master->owner;
412 #ifndef __UBOOT__
413 	slave->mtd.backing_dev_info = master->backing_dev_info;
414 
415 	/* NOTE:  we don't arrange MTDs as a tree; it'd be error-prone
416 	 * to have the same data be in two different partitions.
417 	 */
418 	slave->mtd.dev.parent = master->dev.parent;
419 #endif
420 
421 	if (master->_read)
422 		slave->mtd._read = part_read;
423 	if (master->_write)
424 		slave->mtd._write = part_write;
425 
426 	if (master->_panic_write)
427 		slave->mtd._panic_write = part_panic_write;
428 
429 #ifndef __UBOOT__
430 	if (master->_point && master->_unpoint) {
431 		slave->mtd._point = part_point;
432 		slave->mtd._unpoint = part_unpoint;
433 	}
434 #endif
435 
436 	if (master->_get_unmapped_area)
437 		slave->mtd._get_unmapped_area = part_get_unmapped_area;
438 	if (master->_read_oob)
439 		slave->mtd._read_oob = part_read_oob;
440 	if (master->_write_oob)
441 		slave->mtd._write_oob = part_write_oob;
442 	if (master->_read_user_prot_reg)
443 		slave->mtd._read_user_prot_reg = part_read_user_prot_reg;
444 	if (master->_read_fact_prot_reg)
445 		slave->mtd._read_fact_prot_reg = part_read_fact_prot_reg;
446 	if (master->_write_user_prot_reg)
447 		slave->mtd._write_user_prot_reg = part_write_user_prot_reg;
448 	if (master->_lock_user_prot_reg)
449 		slave->mtd._lock_user_prot_reg = part_lock_user_prot_reg;
450 	if (master->_get_user_prot_info)
451 		slave->mtd._get_user_prot_info = part_get_user_prot_info;
452 	if (master->_get_fact_prot_info)
453 		slave->mtd._get_fact_prot_info = part_get_fact_prot_info;
454 	if (master->_sync)
455 		slave->mtd._sync = part_sync;
456 #ifndef __UBOOT__
457 	if (!partno && !master->dev.class && master->_suspend &&
458 	    master->_resume) {
459 			slave->mtd._suspend = part_suspend;
460 			slave->mtd._resume = part_resume;
461 	}
462 	if (master->_writev)
463 		slave->mtd._writev = part_writev;
464 #endif
465 	if (master->_lock)
466 		slave->mtd._lock = part_lock;
467 	if (master->_unlock)
468 		slave->mtd._unlock = part_unlock;
469 	if (master->_is_locked)
470 		slave->mtd._is_locked = part_is_locked;
471 	if (master->_block_isreserved)
472 		slave->mtd._block_isreserved = part_block_isreserved;
473 	if (master->_block_isbad)
474 		slave->mtd._block_isbad = part_block_isbad;
475 	if (master->_block_markbad)
476 		slave->mtd._block_markbad = part_block_markbad;
477 	slave->mtd._erase = part_erase;
478 	slave->master = master;
479 	slave->offset = part->offset;
480 
481 	if (slave->offset == MTDPART_OFS_APPEND)
482 		slave->offset = cur_offset;
483 	if (slave->offset == MTDPART_OFS_NXTBLK) {
484 		slave->offset = cur_offset;
485 		if (mtd_mod_by_eb(cur_offset, master) != 0) {
486 			/* Round up to next erasesize */
487 			slave->offset = (mtd_div_by_eb(cur_offset, master) + 1) * master->erasesize;
488 			debug("Moving partition %d: "
489 			       "0x%012llx -> 0x%012llx\n", partno,
490 			       (unsigned long long)cur_offset, (unsigned long long)slave->offset);
491 		}
492 	}
493 	if (slave->offset == MTDPART_OFS_RETAIN) {
494 		slave->offset = cur_offset;
495 		if (master->size - slave->offset >= slave->mtd.size) {
496 			slave->mtd.size = master->size - slave->offset
497 							- slave->mtd.size;
498 		} else {
499 			debug("mtd partition \"%s\" doesn't have enough space: %#llx < %#llx, disabled\n",
500 				part->name, master->size - slave->offset,
501 				slave->mtd.size);
502 			/* register to preserve ordering */
503 			goto out_register;
504 		}
505 	}
506 	if (slave->mtd.size == MTDPART_SIZ_FULL)
507 		slave->mtd.size = master->size - slave->offset;
508 
509 	debug("0x%012llx-0x%012llx : \"%s\"\n", (unsigned long long)slave->offset,
510 		(unsigned long long)(slave->offset + slave->mtd.size), slave->mtd.name);
511 
512 	/* let's do some sanity checks */
513 	if (slave->offset >= master->size) {
514 		/* let's register it anyway to preserve ordering */
515 		slave->offset = 0;
516 		slave->mtd.size = 0;
517 		printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n",
518 			part->name);
519 		goto out_register;
520 	}
521 	if (slave->offset + slave->mtd.size > master->size) {
522 		slave->mtd.size = master->size - slave->offset;
523 		printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#llx\n",
524 			part->name, master->name, (unsigned long long)slave->mtd.size);
525 	}
526 	if (master->numeraseregions > 1) {
527 		/* Deal with variable erase size stuff */
528 		int i, max = master->numeraseregions;
529 		u64 end = slave->offset + slave->mtd.size;
530 		struct mtd_erase_region_info *regions = master->eraseregions;
531 
532 		/* Find the first erase regions which is part of this
533 		 * partition. */
534 		for (i = 0; i < max && regions[i].offset <= slave->offset; i++)
535 			;
536 		/* The loop searched for the region _behind_ the first one */
537 		if (i > 0)
538 			i--;
539 
540 		/* Pick biggest erasesize */
541 		for (; i < max && regions[i].offset < end; i++) {
542 			if (slave->mtd.erasesize < regions[i].erasesize) {
543 				slave->mtd.erasesize = regions[i].erasesize;
544 			}
545 		}
546 		BUG_ON(slave->mtd.erasesize == 0);
547 	} else {
548 		/* Single erase size */
549 		slave->mtd.erasesize = master->erasesize;
550 	}
551 
552 	if ((slave->mtd.flags & MTD_WRITEABLE) &&
553 	    mtd_mod_by_eb(slave->offset, &slave->mtd)) {
554 		/* Doesn't start on a boundary of major erase size */
555 		/* FIXME: Let it be writable if it is on a boundary of
556 		 * _minor_ erase size though */
557 		slave->mtd.flags &= ~MTD_WRITEABLE;
558 		printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n",
559 			part->name);
560 	}
561 	if ((slave->mtd.flags & MTD_WRITEABLE) &&
562 	    mtd_mod_by_eb(slave->mtd.size, &slave->mtd)) {
563 		slave->mtd.flags &= ~MTD_WRITEABLE;
564 		printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n",
565 			part->name);
566 	}
567 
568 	slave->mtd.ecclayout = master->ecclayout;
569 	slave->mtd.ecc_step_size = master->ecc_step_size;
570 	slave->mtd.ecc_strength = master->ecc_strength;
571 	slave->mtd.bitflip_threshold = master->bitflip_threshold;
572 
573 	if (master->_block_isbad) {
574 		uint64_t offs = 0;
575 
576 		while (offs < slave->mtd.size) {
577 			if (mtd_block_isbad(master, offs + slave->offset))
578 				slave->mtd.ecc_stats.badblocks++;
579 			offs += slave->mtd.erasesize;
580 		}
581 	}
582 
583 out_register:
584 	return slave;
585 }
586 
587 #ifndef __UBOOT__
588 int mtd_add_partition(struct mtd_info *master, const char *name,
589 		      long long offset, long long length)
590 {
591 	struct mtd_partition part;
592 	struct mtd_part *p, *new;
593 	uint64_t start, end;
594 	int ret = 0;
595 
596 	/* the direct offset is expected */
597 	if (offset == MTDPART_OFS_APPEND ||
598 	    offset == MTDPART_OFS_NXTBLK)
599 		return -EINVAL;
600 
601 	if (length == MTDPART_SIZ_FULL)
602 		length = master->size - offset;
603 
604 	if (length <= 0)
605 		return -EINVAL;
606 
607 	part.name = name;
608 	part.size = length;
609 	part.offset = offset;
610 	part.mask_flags = 0;
611 	part.ecclayout = NULL;
612 
613 	new = allocate_partition(master, &part, -1, offset);
614 	if (IS_ERR(new))
615 		return PTR_ERR(new);
616 
617 	start = offset;
618 	end = offset + length;
619 
620 	mutex_lock(&mtd_partitions_mutex);
621 	list_for_each_entry(p, &mtd_partitions, list)
622 		if (p->master == master) {
623 			if ((start >= p->offset) &&
624 			    (start < (p->offset + p->mtd.size)))
625 				goto err_inv;
626 
627 			if ((end >= p->offset) &&
628 			    (end < (p->offset + p->mtd.size)))
629 				goto err_inv;
630 		}
631 
632 	list_add(&new->list, &mtd_partitions);
633 	mutex_unlock(&mtd_partitions_mutex);
634 
635 	add_mtd_device(&new->mtd);
636 
637 	return ret;
638 err_inv:
639 	mutex_unlock(&mtd_partitions_mutex);
640 	free_partition(new);
641 	return -EINVAL;
642 }
643 EXPORT_SYMBOL_GPL(mtd_add_partition);
644 
645 int mtd_del_partition(struct mtd_info *master, int partno)
646 {
647 	struct mtd_part *slave, *next;
648 	int ret = -EINVAL;
649 
650 	mutex_lock(&mtd_partitions_mutex);
651 	list_for_each_entry_safe(slave, next, &mtd_partitions, list)
652 		if ((slave->master == master) &&
653 		    (slave->mtd.index == partno)) {
654 			ret = del_mtd_device(&slave->mtd);
655 			if (ret < 0)
656 				break;
657 
658 			list_del(&slave->list);
659 			free_partition(slave);
660 			break;
661 		}
662 	mutex_unlock(&mtd_partitions_mutex);
663 
664 	return ret;
665 }
666 EXPORT_SYMBOL_GPL(mtd_del_partition);
667 #endif
668 
669 /*
670  * This function, given a master MTD object and a partition table, creates
671  * and registers slave MTD objects which are bound to the master according to
672  * the partition definitions.
673  *
674  * We don't register the master, or expect the caller to have done so,
675  * for reasons of data integrity.
676  */
677 
678 int add_mtd_partitions(struct mtd_info *master,
679 		       const struct mtd_partition *parts,
680 		       int nbparts)
681 {
682 	struct mtd_part *slave;
683 	uint64_t cur_offset = 0;
684 	int i;
685 
686 #ifdef __UBOOT__
687 	/*
688 	 * Need to init the list here, since LIST_INIT() does not
689 	 * work on platforms where relocation has problems (like MIPS
690 	 * & PPC).
691 	 */
692 	if (mtd_partitions.next == NULL)
693 		INIT_LIST_HEAD(&mtd_partitions);
694 #endif
695 
696 	debug("Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
697 
698 	for (i = 0; i < nbparts; i++) {
699 		slave = allocate_partition(master, parts + i, i, cur_offset);
700 		if (IS_ERR(slave))
701 			return PTR_ERR(slave);
702 
703 		mutex_lock(&mtd_partitions_mutex);
704 		list_add(&slave->list, &mtd_partitions);
705 		mutex_unlock(&mtd_partitions_mutex);
706 
707 		add_mtd_device(&slave->mtd);
708 
709 		cur_offset = slave->offset + slave->mtd.size;
710 	}
711 
712 	return 0;
713 }
714 
715 #ifndef __UBOOT__
716 static DEFINE_SPINLOCK(part_parser_lock);
717 static LIST_HEAD(part_parsers);
718 
719 static struct mtd_part_parser *get_partition_parser(const char *name)
720 {
721 	struct mtd_part_parser *p, *ret = NULL;
722 
723 	spin_lock(&part_parser_lock);
724 
725 	list_for_each_entry(p, &part_parsers, list)
726 		if (!strcmp(p->name, name) && try_module_get(p->owner)) {
727 			ret = p;
728 			break;
729 		}
730 
731 	spin_unlock(&part_parser_lock);
732 
733 	return ret;
734 }
735 
736 #define put_partition_parser(p) do { module_put((p)->owner); } while (0)
737 
738 void register_mtd_parser(struct mtd_part_parser *p)
739 {
740 	spin_lock(&part_parser_lock);
741 	list_add(&p->list, &part_parsers);
742 	spin_unlock(&part_parser_lock);
743 }
744 EXPORT_SYMBOL_GPL(register_mtd_parser);
745 
746 void deregister_mtd_parser(struct mtd_part_parser *p)
747 {
748 	spin_lock(&part_parser_lock);
749 	list_del(&p->list);
750 	spin_unlock(&part_parser_lock);
751 }
752 EXPORT_SYMBOL_GPL(deregister_mtd_parser);
753 
754 /*
755  * Do not forget to update 'parse_mtd_partitions()' kerneldoc comment if you
756  * are changing this array!
757  */
758 static const char * const default_mtd_part_types[] = {
759 	"cmdlinepart",
760 	"ofpart",
761 	NULL
762 };
763 
764 /**
765  * parse_mtd_partitions - parse MTD partitions
766  * @master: the master partition (describes whole MTD device)
767  * @types: names of partition parsers to try or %NULL
768  * @pparts: array of partitions found is returned here
769  * @data: MTD partition parser-specific data
770  *
771  * This function tries to find partition on MTD device @master. It uses MTD
772  * partition parsers, specified in @types. However, if @types is %NULL, then
773  * the default list of parsers is used. The default list contains only the
774  * "cmdlinepart" and "ofpart" parsers ATM.
775  * Note: If there are more then one parser in @types, the kernel only takes the
776  * partitions parsed out by the first parser.
777  *
778  * This function may return:
779  * o a negative error code in case of failure
780  * o zero if no partitions were found
781  * o a positive number of found partitions, in which case on exit @pparts will
782  *   point to an array containing this number of &struct mtd_info objects.
783  */
784 int parse_mtd_partitions(struct mtd_info *master, const char *const *types,
785 			 struct mtd_partition **pparts,
786 			 struct mtd_part_parser_data *data)
787 {
788 	struct mtd_part_parser *parser;
789 	int ret = 0;
790 
791 	if (!types)
792 		types = default_mtd_part_types;
793 
794 	for ( ; ret <= 0 && *types; types++) {
795 		parser = get_partition_parser(*types);
796 		if (!parser && !request_module("%s", *types))
797 			parser = get_partition_parser(*types);
798 		if (!parser)
799 			continue;
800 		ret = (*parser->parse_fn)(master, pparts, data);
801 		put_partition_parser(parser);
802 		if (ret > 0) {
803 			printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n",
804 			       ret, parser->name, master->name);
805 			break;
806 		}
807 	}
808 	return ret;
809 }
810 #endif
811 
812 int mtd_is_partition(const struct mtd_info *mtd)
813 {
814 	struct mtd_part *part;
815 	int ispart = 0;
816 
817 	mutex_lock(&mtd_partitions_mutex);
818 	list_for_each_entry(part, &mtd_partitions, list)
819 		if (&part->mtd == mtd) {
820 			ispart = 1;
821 			break;
822 		}
823 	mutex_unlock(&mtd_partitions_mutex);
824 
825 	return ispart;
826 }
827 EXPORT_SYMBOL_GPL(mtd_is_partition);
828 
829 /* Returns the size of the entire flash chip */
830 uint64_t mtd_get_device_size(const struct mtd_info *mtd)
831 {
832 	if (!mtd_is_partition(mtd))
833 		return mtd->size;
834 
835 	return PART(mtd)->master->size;
836 }
837 EXPORT_SYMBOL_GPL(mtd_get_device_size);
838