xref: /rk3399_rockchip-uboot/drivers/mtd/mtdpart.c (revision 3de98b82f9097ef0106987bf3c6d694476416a71)
1 /*
2  * Simple MTD partitioning layer
3  *
4  * Copyright © 2000 Nicolas Pitre <nico@fluxnic.net>
5  * Copyright © 2002 Thomas Gleixner <gleixner@linutronix.de>
6  * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org>
7  *
8  * SPDX-License-Identifier:	GPL-2.0+
9  *
10  */
11 
12 #ifndef __UBOOT__
13 #include <linux/module.h>
14 #include <linux/types.h>
15 #include <linux/kernel.h>
16 #include <linux/slab.h>
17 #include <linux/list.h>
18 #include <linux/kmod.h>
19 #endif
20 
21 #include <common.h>
22 #include <malloc.h>
23 #include <linux/errno.h>
24 #include <linux/compat.h>
25 #include <ubi_uboot.h>
26 
27 #include <linux/mtd/mtd.h>
28 #include <linux/mtd/partitions.h>
29 #include <linux/err.h>
30 #include <linux/sizes.h>
31 
32 #include "mtdcore.h"
33 
34 /* Our partition linked list */
35 static LIST_HEAD(mtd_partitions);
36 #ifndef __UBOOT__
37 static DEFINE_MUTEX(mtd_partitions_mutex);
38 #else
39 DEFINE_MUTEX(mtd_partitions_mutex);
40 #endif
41 
42 /* Our partition node structure */
43 struct mtd_part {
44 	struct mtd_info mtd;
45 	struct mtd_info *master;
46 	uint64_t offset;
47 	struct list_head list;
48 };
49 
50 /*
51  * Given a pointer to the MTD object in the mtd_part structure, we can retrieve
52  * the pointer to that structure with this macro.
53  */
54 #define PART(x)  ((struct mtd_part *)(x))
55 
56 
57 #ifdef __UBOOT__
58 /* from mm/util.c */
59 
60 /**
61  * kstrdup - allocate space for and copy an existing string
62  * @s: the string to duplicate
63  * @gfp: the GFP mask used in the kmalloc() call when allocating memory
64  */
65 char *kstrdup(const char *s, gfp_t gfp)
66 {
67 	size_t len;
68 	char *buf;
69 
70 	if (!s)
71 		return NULL;
72 
73 	len = strlen(s) + 1;
74 	buf = kmalloc(len, gfp);
75 	if (buf)
76 		memcpy(buf, s, len);
77 	return buf;
78 }
79 #endif
80 
81 #define MTD_SIZE_REMAINING		(~0LLU)
82 #define MTD_OFFSET_NOT_SPECIFIED	(~0LLU)
83 
84 /**
85  * mtd_parse_partition - Parse @mtdparts partition definition, fill @partition
86  *                       with it and update the @mtdparts string pointer.
87  *
88  * The partition name is allocated and must be freed by the caller.
89  *
90  * This function is widely inspired from part_parse (mtdparts.c).
91  *
92  * @mtdparts: String describing the partition with mtdparts command syntax
93  * @partition: MTD partition structure to fill
94  *
95  * @return 0 on success, an error otherwise.
96  */
97 static int mtd_parse_partition(const char **_mtdparts,
98 			       struct mtd_partition *partition)
99 {
100 	const char *mtdparts = *_mtdparts;
101 	const char *name = NULL;
102 	int name_len;
103 	char *buf;
104 
105 	/* Ensure the partition structure is empty */
106 	memset(partition, 0, sizeof(struct mtd_partition));
107 
108 	/* Fetch the partition size */
109 	if (*mtdparts == '-') {
110 		/* Assign all remaining space to this partition */
111 		partition->size = MTD_SIZE_REMAINING;
112 		mtdparts++;
113 	} else {
114 		partition->size = ustrtoull(mtdparts, (char **)&mtdparts, 0);
115 		if (partition->size < SZ_4K) {
116 			printf("Minimum partition size 4kiB, %lldB requested\n",
117 			       partition->size);
118 			return -EINVAL;
119 		}
120 	}
121 
122 	/* Check for the offset */
123 	partition->offset = MTD_OFFSET_NOT_SPECIFIED;
124 	if (*mtdparts == '@') {
125 		mtdparts++;
126 		partition->offset = ustrtoull(mtdparts, (char **)&mtdparts, 0);
127 	}
128 
129 	/* Now look for the name */
130 	if (*mtdparts == '(') {
131 		name = ++mtdparts;
132 		mtdparts = strchr(name, ')');
133 		if (!mtdparts) {
134 			printf("No closing ')' found in partition name\n");
135 			return -EINVAL;
136 		}
137 		name_len = mtdparts - name + 1;
138 		if ((name_len - 1) == 0) {
139 			printf("Empty partition name\n");
140 			return -EINVAL;
141 		}
142 		mtdparts++;
143 	} else {
144 		/* Name will be of the form size@offset */
145 		name_len = 22;
146 	}
147 
148 	/* Check if the partition is read-only */
149 	if (strncmp(mtdparts, "ro", 2) == 0) {
150 		partition->mask_flags |= MTD_WRITEABLE;
151 		mtdparts += 2;
152 	}
153 
154 	/* Check for a potential next partition definition */
155 	if (*mtdparts == ',') {
156 		if (partition->size == MTD_SIZE_REMAINING) {
157 			printf("No partitions allowed after a fill-up\n");
158 			return -EINVAL;
159 		}
160 		++mtdparts;
161 	} else if ((*mtdparts == ';') || (*mtdparts == '\0')) {
162 		/* NOP */
163 	} else {
164 		printf("Unexpected character '%c' in mtdparts\n", *mtdparts);
165 		return -EINVAL;
166 	}
167 
168 	/*
169 	 * Allocate a buffer for the name and either copy the provided name or
170 	 * auto-generate it with the form 'size@offset'.
171 	 */
172 	buf = malloc(name_len);
173 	if (!buf)
174 		return -ENOMEM;
175 
176 	if (name)
177 		strncpy(buf, name, name_len - 1);
178 	else
179 		snprintf(buf, name_len, "0x%08llx@0x%08llx",
180 			 partition->size, partition->offset);
181 
182 	buf[name_len - 1] = '\0';
183 	partition->name = buf;
184 
185 	*_mtdparts = mtdparts;
186 
187 	return 0;
188 }
189 
190 /**
191  * mtd_parse_partitions - Create a partition array from an mtdparts definition
192  *
193  * Stateless function that takes a @parent MTD device, a string @_mtdparts
194  * describing the partitions (with the "mtdparts" command syntax) and creates
195  * the corresponding MTD partition structure array @_parts. Both the name and
196  * the structure partition itself must be freed freed, the caller may use
197  * @mtd_free_parsed_partitions() for this purpose.
198  *
199  * @parent: MTD device which contains the partitions
200  * @_mtdparts: Pointer to a string describing the partitions with "mtdparts"
201  *             command syntax.
202  * @_parts: Allocated array containing the partitions, must be freed by the
203  *          caller.
204  * @_nparts: Size of @_parts array.
205  *
206  * @return 0 on success, an error otherwise.
207  */
208 int mtd_parse_partitions(struct mtd_info *parent, const char **_mtdparts,
209 			 struct mtd_partition **_parts, int *_nparts)
210 {
211 	struct mtd_partition partition = {}, *parts;
212 	const char *mtdparts = *_mtdparts;
213 	int cur_off = 0, cur_sz = 0;
214 	int nparts = 0;
215 	int ret, idx;
216 	u64 sz;
217 
218 	/* First, iterate over the partitions until we know their number */
219 	while (mtdparts[0] != '\0' && mtdparts[0] != ';') {
220 		ret = mtd_parse_partition(&mtdparts, &partition);
221 		if (ret)
222 			return ret;
223 
224 		free((char *)partition.name);
225 		nparts++;
226 	}
227 
228 	/* Allocate an array of partitions to give back to the caller */
229 	parts = malloc(sizeof(*parts) * nparts);
230 	if (!parts) {
231 		printf("Not enough space to save partitions meta-data\n");
232 		return -ENOMEM;
233 	}
234 
235 	/* Iterate again over each partition to save the data in our array */
236 	for (idx = 0; idx < nparts; idx++) {
237 		ret = mtd_parse_partition(_mtdparts, &parts[idx]);
238 		if (ret)
239 			return ret;
240 
241 		if (parts[idx].size == MTD_SIZE_REMAINING)
242 			parts[idx].size = parent->size - cur_sz;
243 		cur_sz += parts[idx].size;
244 
245 		sz = parts[idx].size;
246 		if (sz < parent->writesize || do_div(sz, parent->writesize)) {
247 			printf("Partition size must be a multiple of %d\n",
248 			       parent->writesize);
249 			return -EINVAL;
250 		}
251 
252 		if (parts[idx].offset == MTD_OFFSET_NOT_SPECIFIED)
253 			parts[idx].offset = cur_off;
254 		cur_off += parts[idx].size;
255 
256 		parts[idx].ecclayout = parent->ecclayout;
257 	}
258 
259 	/* Offset by one mtdparts to point to the next device if any */
260 	if (*_mtdparts[0] == ';')
261 		(*_mtdparts)++;
262 
263 	*_parts = parts;
264 	*_nparts = nparts;
265 
266 	return 0;
267 }
268 
269 /**
270  * mtd_free_parsed_partitions - Free dynamically allocated partitions
271  *
272  * Each successful call to @mtd_parse_partitions must be followed by a call to
273  * @mtd_free_parsed_partitions to free any allocated array during the parsing
274  * process.
275  *
276  * @parts: Array containing the partitions that will be freed.
277  * @nparts: Size of @parts array.
278  */
279 void mtd_free_parsed_partitions(struct mtd_partition *parts,
280 				unsigned int nparts)
281 {
282 	int i;
283 
284 	for (i = 0; i < nparts; i++)
285 		free((char *)parts[i].name);
286 
287 	free(parts);
288 }
289 
290 /*
291  * MTD methods which simply translate the effective address and pass through
292  * to the _real_ device.
293  */
294 
295 static int part_read(struct mtd_info *mtd, loff_t from, size_t len,
296 		size_t *retlen, u_char *buf)
297 {
298 	struct mtd_part *part = PART(mtd);
299 	struct mtd_ecc_stats stats;
300 	int res;
301 
302 	stats = part->master->ecc_stats;
303 	res = part->master->_read(part->master, from + part->offset, len,
304 				  retlen, buf);
305 	if (unlikely(mtd_is_eccerr(res)))
306 		mtd->ecc_stats.failed +=
307 			part->master->ecc_stats.failed - stats.failed;
308 	else
309 		mtd->ecc_stats.corrected +=
310 			part->master->ecc_stats.corrected - stats.corrected;
311 	return res;
312 }
313 
314 #ifndef __UBOOT__
315 static int part_point(struct mtd_info *mtd, loff_t from, size_t len,
316 		size_t *retlen, void **virt, resource_size_t *phys)
317 {
318 	struct mtd_part *part = PART(mtd);
319 
320 	return part->master->_point(part->master, from + part->offset, len,
321 				    retlen, virt, phys);
322 }
323 
324 static int part_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
325 {
326 	struct mtd_part *part = PART(mtd);
327 
328 	return part->master->_unpoint(part->master, from + part->offset, len);
329 }
330 #endif
331 
332 static unsigned long part_get_unmapped_area(struct mtd_info *mtd,
333 					    unsigned long len,
334 					    unsigned long offset,
335 					    unsigned long flags)
336 {
337 	struct mtd_part *part = PART(mtd);
338 
339 	offset += part->offset;
340 	return part->master->_get_unmapped_area(part->master, len, offset,
341 						flags);
342 }
343 
344 static int part_read_oob(struct mtd_info *mtd, loff_t from,
345 		struct mtd_oob_ops *ops)
346 {
347 	struct mtd_part *part = PART(mtd);
348 	int res;
349 
350 	if (from >= mtd->size)
351 		return -EINVAL;
352 	if (ops->datbuf && from + ops->len > mtd->size)
353 		return -EINVAL;
354 
355 	/*
356 	 * If OOB is also requested, make sure that we do not read past the end
357 	 * of this partition.
358 	 */
359 	if (ops->oobbuf) {
360 		size_t len, pages;
361 
362 		if (ops->mode == MTD_OPS_AUTO_OOB)
363 			len = mtd->oobavail;
364 		else
365 			len = mtd->oobsize;
366 		pages = mtd_div_by_ws(mtd->size, mtd);
367 		pages -= mtd_div_by_ws(from, mtd);
368 		if (ops->ooboffs + ops->ooblen > pages * len)
369 			return -EINVAL;
370 	}
371 
372 	res = part->master->_read_oob(part->master, from + part->offset, ops);
373 	if (unlikely(res)) {
374 		if (mtd_is_bitflip(res))
375 			mtd->ecc_stats.corrected++;
376 		if (mtd_is_eccerr(res))
377 			mtd->ecc_stats.failed++;
378 	}
379 	return res;
380 }
381 
382 static int part_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
383 		size_t len, size_t *retlen, u_char *buf)
384 {
385 	struct mtd_part *part = PART(mtd);
386 	return part->master->_read_user_prot_reg(part->master, from, len,
387 						 retlen, buf);
388 }
389 
390 static int part_get_user_prot_info(struct mtd_info *mtd, size_t len,
391 				   size_t *retlen, struct otp_info *buf)
392 {
393 	struct mtd_part *part = PART(mtd);
394 	return part->master->_get_user_prot_info(part->master, len, retlen,
395 						 buf);
396 }
397 
398 static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
399 		size_t len, size_t *retlen, u_char *buf)
400 {
401 	struct mtd_part *part = PART(mtd);
402 	return part->master->_read_fact_prot_reg(part->master, from, len,
403 						 retlen, buf);
404 }
405 
406 static int part_get_fact_prot_info(struct mtd_info *mtd, size_t len,
407 				   size_t *retlen, struct otp_info *buf)
408 {
409 	struct mtd_part *part = PART(mtd);
410 	return part->master->_get_fact_prot_info(part->master, len, retlen,
411 						 buf);
412 }
413 
414 static int part_write(struct mtd_info *mtd, loff_t to, size_t len,
415 		size_t *retlen, const u_char *buf)
416 {
417 	struct mtd_part *part = PART(mtd);
418 	return part->master->_write(part->master, to + part->offset, len,
419 				    retlen, buf);
420 }
421 
422 static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
423 		size_t *retlen, const u_char *buf)
424 {
425 	struct mtd_part *part = PART(mtd);
426 	return part->master->_panic_write(part->master, to + part->offset, len,
427 					  retlen, buf);
428 }
429 
430 static int part_write_oob(struct mtd_info *mtd, loff_t to,
431 		struct mtd_oob_ops *ops)
432 {
433 	struct mtd_part *part = PART(mtd);
434 
435 	if (to >= mtd->size)
436 		return -EINVAL;
437 	if (ops->datbuf && to + ops->len > mtd->size)
438 		return -EINVAL;
439 	return part->master->_write_oob(part->master, to + part->offset, ops);
440 }
441 
442 static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
443 		size_t len, size_t *retlen, u_char *buf)
444 {
445 	struct mtd_part *part = PART(mtd);
446 	return part->master->_write_user_prot_reg(part->master, from, len,
447 						  retlen, buf);
448 }
449 
450 static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
451 		size_t len)
452 {
453 	struct mtd_part *part = PART(mtd);
454 	return part->master->_lock_user_prot_reg(part->master, from, len);
455 }
456 
457 #ifndef __UBOOT__
458 static int part_writev(struct mtd_info *mtd, const struct kvec *vecs,
459 		unsigned long count, loff_t to, size_t *retlen)
460 {
461 	struct mtd_part *part = PART(mtd);
462 	return part->master->_writev(part->master, vecs, count,
463 				     to + part->offset, retlen);
464 }
465 #endif
466 
467 static int part_erase(struct mtd_info *mtd, struct erase_info *instr)
468 {
469 	struct mtd_part *part = PART(mtd);
470 	int ret;
471 
472 	instr->addr += part->offset;
473 	ret = part->master->_erase(part->master, instr);
474 	if (ret) {
475 		if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
476 			instr->fail_addr -= part->offset;
477 		instr->addr -= part->offset;
478 	}
479 	return ret;
480 }
481 
482 void mtd_erase_callback(struct erase_info *instr)
483 {
484 	if (instr->mtd->_erase == part_erase) {
485 		struct mtd_part *part = PART(instr->mtd);
486 
487 		if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
488 			instr->fail_addr -= part->offset;
489 		instr->addr -= part->offset;
490 	}
491 	if (instr->callback)
492 		instr->callback(instr);
493 }
494 EXPORT_SYMBOL_GPL(mtd_erase_callback);
495 
496 static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
497 {
498 	struct mtd_part *part = PART(mtd);
499 	return part->master->_lock(part->master, ofs + part->offset, len);
500 }
501 
502 static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
503 {
504 	struct mtd_part *part = PART(mtd);
505 	return part->master->_unlock(part->master, ofs + part->offset, len);
506 }
507 
508 static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
509 {
510 	struct mtd_part *part = PART(mtd);
511 	return part->master->_is_locked(part->master, ofs + part->offset, len);
512 }
513 
514 static void part_sync(struct mtd_info *mtd)
515 {
516 	struct mtd_part *part = PART(mtd);
517 	part->master->_sync(part->master);
518 }
519 
520 #ifndef __UBOOT__
521 static int part_suspend(struct mtd_info *mtd)
522 {
523 	struct mtd_part *part = PART(mtd);
524 	return part->master->_suspend(part->master);
525 }
526 
527 static void part_resume(struct mtd_info *mtd)
528 {
529 	struct mtd_part *part = PART(mtd);
530 	part->master->_resume(part->master);
531 }
532 #endif
533 
534 static int part_block_isreserved(struct mtd_info *mtd, loff_t ofs)
535 {
536 	struct mtd_part *part = PART(mtd);
537 	ofs += part->offset;
538 	return part->master->_block_isreserved(part->master, ofs);
539 }
540 
541 static int part_block_isbad(struct mtd_info *mtd, loff_t ofs)
542 {
543 	struct mtd_part *part = PART(mtd);
544 	ofs += part->offset;
545 	return part->master->_block_isbad(part->master, ofs);
546 }
547 
548 static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
549 {
550 	struct mtd_part *part = PART(mtd);
551 	int res;
552 
553 	ofs += part->offset;
554 	res = part->master->_block_markbad(part->master, ofs);
555 	if (!res)
556 		mtd->ecc_stats.badblocks++;
557 	return res;
558 }
559 
560 static inline void free_partition(struct mtd_part *p)
561 {
562 	kfree(p->mtd.name);
563 	kfree(p);
564 }
565 
566 /*
567  * This function unregisters and destroy all slave MTD objects which are
568  * attached to the given master MTD object.
569  */
570 
571 int del_mtd_partitions(struct mtd_info *master)
572 {
573 	struct mtd_part *slave, *next;
574 	int ret, err = 0;
575 
576 	debug("Deleting MTD partitions on \"%s\":\n", master->name);
577 
578 	mutex_lock(&mtd_partitions_mutex);
579 	list_for_each_entry_safe(slave, next, &mtd_partitions, list)
580 		if (slave->master == master) {
581 			ret = del_mtd_device(&slave->mtd);
582 			if (ret < 0) {
583 				err = ret;
584 				continue;
585 			}
586 			list_del(&slave->list);
587 			free_partition(slave);
588 		}
589 	mutex_unlock(&mtd_partitions_mutex);
590 
591 	return err;
592 }
593 
594 static struct mtd_part *allocate_partition(struct mtd_info *master,
595 			const struct mtd_partition *part, int partno,
596 			uint64_t cur_offset)
597 {
598 	struct mtd_part *slave;
599 	char *name;
600 
601 	/* allocate the partition structure */
602 	slave = kzalloc(sizeof(*slave), GFP_KERNEL);
603 	name = kstrdup(part->name, GFP_KERNEL);
604 	if (!name || !slave) {
605 		printk(KERN_ERR"memory allocation error while creating partitions for \"%s\"\n",
606 		       master->name);
607 		kfree(name);
608 		kfree(slave);
609 		return ERR_PTR(-ENOMEM);
610 	}
611 
612 	/* set up the MTD object for this partition */
613 	slave->mtd.type = master->type;
614 	slave->mtd.flags = master->flags & ~part->mask_flags;
615 	slave->mtd.size = part->size;
616 	slave->mtd.writesize = master->writesize;
617 	slave->mtd.writebufsize = master->writebufsize;
618 	slave->mtd.oobsize = master->oobsize;
619 	slave->mtd.oobavail = master->oobavail;
620 	slave->mtd.subpage_sft = master->subpage_sft;
621 
622 	slave->mtd.name = name;
623 	slave->mtd.owner = master->owner;
624 #ifndef __UBOOT__
625 	slave->mtd.backing_dev_info = master->backing_dev_info;
626 
627 	/* NOTE:  we don't arrange MTDs as a tree; it'd be error-prone
628 	 * to have the same data be in two different partitions.
629 	 */
630 	slave->mtd.dev.parent = master->dev.parent;
631 #endif
632 
633 	if (master->_read)
634 		slave->mtd._read = part_read;
635 	if (master->_write)
636 		slave->mtd._write = part_write;
637 
638 	if (master->_panic_write)
639 		slave->mtd._panic_write = part_panic_write;
640 
641 #ifndef __UBOOT__
642 	if (master->_point && master->_unpoint) {
643 		slave->mtd._point = part_point;
644 		slave->mtd._unpoint = part_unpoint;
645 	}
646 #endif
647 
648 	if (master->_get_unmapped_area)
649 		slave->mtd._get_unmapped_area = part_get_unmapped_area;
650 	if (master->_read_oob)
651 		slave->mtd._read_oob = part_read_oob;
652 	if (master->_write_oob)
653 		slave->mtd._write_oob = part_write_oob;
654 	if (master->_read_user_prot_reg)
655 		slave->mtd._read_user_prot_reg = part_read_user_prot_reg;
656 	if (master->_read_fact_prot_reg)
657 		slave->mtd._read_fact_prot_reg = part_read_fact_prot_reg;
658 	if (master->_write_user_prot_reg)
659 		slave->mtd._write_user_prot_reg = part_write_user_prot_reg;
660 	if (master->_lock_user_prot_reg)
661 		slave->mtd._lock_user_prot_reg = part_lock_user_prot_reg;
662 	if (master->_get_user_prot_info)
663 		slave->mtd._get_user_prot_info = part_get_user_prot_info;
664 	if (master->_get_fact_prot_info)
665 		slave->mtd._get_fact_prot_info = part_get_fact_prot_info;
666 	if (master->_sync)
667 		slave->mtd._sync = part_sync;
668 #ifndef __UBOOT__
669 	if (!partno && !master->dev.class && master->_suspend &&
670 	    master->_resume) {
671 			slave->mtd._suspend = part_suspend;
672 			slave->mtd._resume = part_resume;
673 	}
674 	if (master->_writev)
675 		slave->mtd._writev = part_writev;
676 #endif
677 	if (master->_lock)
678 		slave->mtd._lock = part_lock;
679 	if (master->_unlock)
680 		slave->mtd._unlock = part_unlock;
681 	if (master->_is_locked)
682 		slave->mtd._is_locked = part_is_locked;
683 	if (master->_block_isreserved)
684 		slave->mtd._block_isreserved = part_block_isreserved;
685 	if (master->_block_isbad)
686 		slave->mtd._block_isbad = part_block_isbad;
687 	if (master->_block_markbad)
688 		slave->mtd._block_markbad = part_block_markbad;
689 	slave->mtd._erase = part_erase;
690 	slave->master = master;
691 	slave->offset = part->offset;
692 
693 	if (slave->offset == MTDPART_OFS_APPEND)
694 		slave->offset = cur_offset;
695 	if (slave->offset == MTDPART_OFS_NXTBLK) {
696 		slave->offset = cur_offset;
697 		if (mtd_mod_by_eb(cur_offset, master) != 0) {
698 			/* Round up to next erasesize */
699 			slave->offset = (mtd_div_by_eb(cur_offset, master) + 1) * master->erasesize;
700 			debug("Moving partition %d: "
701 			       "0x%012llx -> 0x%012llx\n", partno,
702 			       (unsigned long long)cur_offset, (unsigned long long)slave->offset);
703 		}
704 	}
705 	if (slave->offset == MTDPART_OFS_RETAIN) {
706 		slave->offset = cur_offset;
707 		if (master->size - slave->offset >= slave->mtd.size) {
708 			slave->mtd.size = master->size - slave->offset
709 							- slave->mtd.size;
710 		} else {
711 			debug("mtd partition \"%s\" doesn't have enough space: %#llx < %#llx, disabled\n",
712 				part->name, master->size - slave->offset,
713 				slave->mtd.size);
714 			/* register to preserve ordering */
715 			goto out_register;
716 		}
717 	}
718 	if (slave->mtd.size == MTDPART_SIZ_FULL)
719 		slave->mtd.size = master->size - slave->offset;
720 
721 	debug("0x%012llx-0x%012llx : \"%s\"\n", (unsigned long long)slave->offset,
722 		(unsigned long long)(slave->offset + slave->mtd.size), slave->mtd.name);
723 
724 	/* let's do some sanity checks */
725 	if (slave->offset >= master->size) {
726 		/* let's register it anyway to preserve ordering */
727 		slave->offset = 0;
728 		slave->mtd.size = 0;
729 		printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n",
730 			part->name);
731 		goto out_register;
732 	}
733 	if (slave->offset + slave->mtd.size > master->size) {
734 		slave->mtd.size = master->size - slave->offset;
735 		printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#llx\n",
736 			part->name, master->name, (unsigned long long)slave->mtd.size);
737 	}
738 	if (master->numeraseregions > 1) {
739 		/* Deal with variable erase size stuff */
740 		int i, max = master->numeraseregions;
741 		u64 end = slave->offset + slave->mtd.size;
742 		struct mtd_erase_region_info *regions = master->eraseregions;
743 
744 		/* Find the first erase regions which is part of this
745 		 * partition. */
746 		for (i = 0; i < max && regions[i].offset <= slave->offset; i++)
747 			;
748 		/* The loop searched for the region _behind_ the first one */
749 		if (i > 0)
750 			i--;
751 
752 		/* Pick biggest erasesize */
753 		for (; i < max && regions[i].offset < end; i++) {
754 			if (slave->mtd.erasesize < regions[i].erasesize) {
755 				slave->mtd.erasesize = regions[i].erasesize;
756 			}
757 		}
758 		BUG_ON(slave->mtd.erasesize == 0);
759 	} else {
760 		/* Single erase size */
761 		slave->mtd.erasesize = master->erasesize;
762 	}
763 
764 	if ((slave->mtd.flags & MTD_WRITEABLE) &&
765 	    mtd_mod_by_eb(slave->offset, &slave->mtd)) {
766 		/* Doesn't start on a boundary of major erase size */
767 		/* FIXME: Let it be writable if it is on a boundary of
768 		 * _minor_ erase size though */
769 		slave->mtd.flags &= ~MTD_WRITEABLE;
770 		printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n",
771 			part->name);
772 	}
773 	if ((slave->mtd.flags & MTD_WRITEABLE) &&
774 	    mtd_mod_by_eb(slave->mtd.size, &slave->mtd)) {
775 		slave->mtd.flags &= ~MTD_WRITEABLE;
776 		printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n",
777 			part->name);
778 	}
779 
780 	slave->mtd.ecclayout = master->ecclayout;
781 	slave->mtd.ecc_step_size = master->ecc_step_size;
782 	slave->mtd.ecc_strength = master->ecc_strength;
783 	slave->mtd.bitflip_threshold = master->bitflip_threshold;
784 
785 	if (master->_block_isbad) {
786 		uint64_t offs = 0;
787 
788 		while (offs < slave->mtd.size) {
789 			if (mtd_block_isbad(master, offs + slave->offset))
790 				slave->mtd.ecc_stats.badblocks++;
791 			offs += slave->mtd.erasesize;
792 		}
793 	}
794 
795 out_register:
796 	return slave;
797 }
798 
799 #ifndef __UBOOT__
800 int mtd_add_partition(struct mtd_info *master, const char *name,
801 		      long long offset, long long length)
802 {
803 	struct mtd_partition part;
804 	struct mtd_part *p, *new;
805 	uint64_t start, end;
806 	int ret = 0;
807 
808 	/* the direct offset is expected */
809 	if (offset == MTDPART_OFS_APPEND ||
810 	    offset == MTDPART_OFS_NXTBLK)
811 		return -EINVAL;
812 
813 	if (length == MTDPART_SIZ_FULL)
814 		length = master->size - offset;
815 
816 	if (length <= 0)
817 		return -EINVAL;
818 
819 	part.name = name;
820 	part.size = length;
821 	part.offset = offset;
822 	part.mask_flags = 0;
823 	part.ecclayout = NULL;
824 
825 	new = allocate_partition(master, &part, -1, offset);
826 	if (IS_ERR(new))
827 		return PTR_ERR(new);
828 
829 	start = offset;
830 	end = offset + length;
831 
832 	mutex_lock(&mtd_partitions_mutex);
833 	list_for_each_entry(p, &mtd_partitions, list)
834 		if (p->master == master) {
835 			if ((start >= p->offset) &&
836 			    (start < (p->offset + p->mtd.size)))
837 				goto err_inv;
838 
839 			if ((end >= p->offset) &&
840 			    (end < (p->offset + p->mtd.size)))
841 				goto err_inv;
842 		}
843 
844 	list_add(&new->list, &mtd_partitions);
845 	mutex_unlock(&mtd_partitions_mutex);
846 
847 	add_mtd_device(&new->mtd);
848 
849 	return ret;
850 err_inv:
851 	mutex_unlock(&mtd_partitions_mutex);
852 	free_partition(new);
853 	return -EINVAL;
854 }
855 EXPORT_SYMBOL_GPL(mtd_add_partition);
856 
857 int mtd_del_partition(struct mtd_info *master, int partno)
858 {
859 	struct mtd_part *slave, *next;
860 	int ret = -EINVAL;
861 
862 	mutex_lock(&mtd_partitions_mutex);
863 	list_for_each_entry_safe(slave, next, &mtd_partitions, list)
864 		if ((slave->master == master) &&
865 		    (slave->mtd.index == partno)) {
866 			ret = del_mtd_device(&slave->mtd);
867 			if (ret < 0)
868 				break;
869 
870 			list_del(&slave->list);
871 			free_partition(slave);
872 			break;
873 		}
874 	mutex_unlock(&mtd_partitions_mutex);
875 
876 	return ret;
877 }
878 EXPORT_SYMBOL_GPL(mtd_del_partition);
879 #endif
880 
881 /*
882  * This function, given a master MTD object and a partition table, creates
883  * and registers slave MTD objects which are bound to the master according to
884  * the partition definitions.
885  *
886  * We don't register the master, or expect the caller to have done so,
887  * for reasons of data integrity.
888  */
889 
890 int add_mtd_partitions(struct mtd_info *master,
891 		       const struct mtd_partition *parts,
892 		       int nbparts)
893 {
894 	struct mtd_part *slave;
895 	uint64_t cur_offset = 0;
896 	int i;
897 
898 #ifdef __UBOOT__
899 	/*
900 	 * Need to init the list here, since LIST_INIT() does not
901 	 * work on platforms where relocation has problems (like MIPS
902 	 * & PPC).
903 	 */
904 	if (mtd_partitions.next == NULL)
905 		INIT_LIST_HEAD(&mtd_partitions);
906 #endif
907 
908 	debug("Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
909 
910 	for (i = 0; i < nbparts; i++) {
911 		slave = allocate_partition(master, parts + i, i, cur_offset);
912 		if (IS_ERR(slave))
913 			return PTR_ERR(slave);
914 
915 		mutex_lock(&mtd_partitions_mutex);
916 		list_add(&slave->list, &mtd_partitions);
917 		mutex_unlock(&mtd_partitions_mutex);
918 
919 		add_mtd_device(&slave->mtd);
920 
921 		cur_offset = slave->offset + slave->mtd.size;
922 	}
923 
924 	return 0;
925 }
926 
927 #ifndef __UBOOT__
928 static DEFINE_SPINLOCK(part_parser_lock);
929 static LIST_HEAD(part_parsers);
930 
931 static struct mtd_part_parser *get_partition_parser(const char *name)
932 {
933 	struct mtd_part_parser *p, *ret = NULL;
934 
935 	spin_lock(&part_parser_lock);
936 
937 	list_for_each_entry(p, &part_parsers, list)
938 		if (!strcmp(p->name, name) && try_module_get(p->owner)) {
939 			ret = p;
940 			break;
941 		}
942 
943 	spin_unlock(&part_parser_lock);
944 
945 	return ret;
946 }
947 
948 #define put_partition_parser(p) do { module_put((p)->owner); } while (0)
949 
950 void register_mtd_parser(struct mtd_part_parser *p)
951 {
952 	spin_lock(&part_parser_lock);
953 	list_add(&p->list, &part_parsers);
954 	spin_unlock(&part_parser_lock);
955 }
956 EXPORT_SYMBOL_GPL(register_mtd_parser);
957 
958 void deregister_mtd_parser(struct mtd_part_parser *p)
959 {
960 	spin_lock(&part_parser_lock);
961 	list_del(&p->list);
962 	spin_unlock(&part_parser_lock);
963 }
964 EXPORT_SYMBOL_GPL(deregister_mtd_parser);
965 
966 /*
967  * Do not forget to update 'parse_mtd_partitions()' kerneldoc comment if you
968  * are changing this array!
969  */
970 static const char * const default_mtd_part_types[] = {
971 	"cmdlinepart",
972 	"ofpart",
973 	NULL
974 };
975 
976 /**
977  * parse_mtd_partitions - parse MTD partitions
978  * @master: the master partition (describes whole MTD device)
979  * @types: names of partition parsers to try or %NULL
980  * @pparts: array of partitions found is returned here
981  * @data: MTD partition parser-specific data
982  *
983  * This function tries to find partition on MTD device @master. It uses MTD
984  * partition parsers, specified in @types. However, if @types is %NULL, then
985  * the default list of parsers is used. The default list contains only the
986  * "cmdlinepart" and "ofpart" parsers ATM.
987  * Note: If there are more then one parser in @types, the kernel only takes the
988  * partitions parsed out by the first parser.
989  *
990  * This function may return:
991  * o a negative error code in case of failure
992  * o zero if no partitions were found
993  * o a positive number of found partitions, in which case on exit @pparts will
994  *   point to an array containing this number of &struct mtd_info objects.
995  */
996 int parse_mtd_partitions(struct mtd_info *master, const char *const *types,
997 			 struct mtd_partition **pparts,
998 			 struct mtd_part_parser_data *data)
999 {
1000 	struct mtd_part_parser *parser;
1001 	int ret = 0;
1002 
1003 	if (!types)
1004 		types = default_mtd_part_types;
1005 
1006 	for ( ; ret <= 0 && *types; types++) {
1007 		parser = get_partition_parser(*types);
1008 		if (!parser && !request_module("%s", *types))
1009 			parser = get_partition_parser(*types);
1010 		if (!parser)
1011 			continue;
1012 		ret = (*parser->parse_fn)(master, pparts, data);
1013 		put_partition_parser(parser);
1014 		if (ret > 0) {
1015 			printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n",
1016 			       ret, parser->name, master->name);
1017 			break;
1018 		}
1019 	}
1020 	return ret;
1021 }
1022 #endif
1023 
1024 int mtd_is_partition(const struct mtd_info *mtd)
1025 {
1026 	struct mtd_part *part;
1027 	int ispart = 0;
1028 
1029 	mutex_lock(&mtd_partitions_mutex);
1030 	list_for_each_entry(part, &mtd_partitions, list)
1031 		if (&part->mtd == mtd) {
1032 			ispart = 1;
1033 			break;
1034 		}
1035 	mutex_unlock(&mtd_partitions_mutex);
1036 
1037 	return ispart;
1038 }
1039 EXPORT_SYMBOL_GPL(mtd_is_partition);
1040 
1041 /* Returns the size of the entire flash chip */
1042 uint64_t mtd_get_device_size(const struct mtd_info *mtd)
1043 {
1044 	if (!mtd_is_partition(mtd))
1045 		return mtd->size;
1046 
1047 	return PART(mtd)->master->size;
1048 }
1049 EXPORT_SYMBOL_GPL(mtd_get_device_size);
1050