1 /* 2 * Simple MTD partitioning layer 3 * 4 * Copyright © 2000 Nicolas Pitre <nico@fluxnic.net> 5 * Copyright © 2002 Thomas Gleixner <gleixner@linutronix.de> 6 * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org> 7 * 8 * SPDX-License-Identifier: GPL-2.0+ 9 * 10 */ 11 12 #ifndef __UBOOT__ 13 #include <linux/module.h> 14 #include <linux/types.h> 15 #include <linux/kernel.h> 16 #include <linux/slab.h> 17 #include <linux/list.h> 18 #include <linux/kmod.h> 19 #endif 20 21 #include <common.h> 22 #include <malloc.h> 23 #include <linux/errno.h> 24 #include <linux/compat.h> 25 #include <ubi_uboot.h> 26 27 #include <linux/mtd/mtd.h> 28 #include <linux/mtd/partitions.h> 29 #include <linux/err.h> 30 #include <linux/sizes.h> 31 32 #include "mtdcore.h" 33 34 #ifndef __UBOOT__ 35 static DEFINE_MUTEX(mtd_partitions_mutex); 36 #else 37 DEFINE_MUTEX(mtd_partitions_mutex); 38 #endif 39 40 #ifdef __UBOOT__ 41 /* from mm/util.c */ 42 43 /** 44 * kstrdup - allocate space for and copy an existing string 45 * @s: the string to duplicate 46 * @gfp: the GFP mask used in the kmalloc() call when allocating memory 47 */ 48 char *kstrdup(const char *s, gfp_t gfp) 49 { 50 size_t len; 51 char *buf; 52 53 if (!s) 54 return NULL; 55 56 len = strlen(s) + 1; 57 buf = kmalloc(len, gfp); 58 if (buf) 59 memcpy(buf, s, len); 60 return buf; 61 } 62 #endif 63 64 #define MTD_SIZE_REMAINING (~0LLU) 65 #define MTD_OFFSET_NOT_SPECIFIED (~0LLU) 66 67 bool mtd_partitions_used(struct mtd_info *master) 68 { 69 struct mtd_info *slave; 70 71 list_for_each_entry(slave, &master->partitions, node) { 72 if (slave->usecount) 73 return true; 74 } 75 76 return false; 77 } 78 79 /** 80 * mtd_parse_partition - Parse @mtdparts partition definition, fill @partition 81 * with it and update the @mtdparts string pointer. 82 * 83 * The partition name is allocated and must be freed by the caller. 84 * 85 * This function is widely inspired from part_parse (mtdparts.c). 86 * 87 * @mtdparts: String describing the partition with mtdparts command syntax 88 * @partition: MTD partition structure to fill 89 * 90 * @return 0 on success, an error otherwise. 91 */ 92 static int mtd_parse_partition(const char **_mtdparts, 93 struct mtd_partition *partition) 94 { 95 const char *mtdparts = *_mtdparts; 96 const char *name = NULL; 97 int name_len; 98 char *buf; 99 100 /* Ensure the partition structure is empty */ 101 memset(partition, 0, sizeof(struct mtd_partition)); 102 103 /* Fetch the partition size */ 104 if (*mtdparts == '-') { 105 /* Assign all remaining space to this partition */ 106 partition->size = MTD_SIZE_REMAINING; 107 mtdparts++; 108 } else { 109 partition->size = ustrtoull(mtdparts, (char **)&mtdparts, 0); 110 if (partition->size < SZ_4K) { 111 printf("Minimum partition size 4kiB, %lldB requested\n", 112 partition->size); 113 return -EINVAL; 114 } 115 } 116 117 /* Check for the offset */ 118 partition->offset = MTD_OFFSET_NOT_SPECIFIED; 119 if (*mtdparts == '@') { 120 mtdparts++; 121 partition->offset = ustrtoull(mtdparts, (char **)&mtdparts, 0); 122 } 123 124 /* Now look for the name */ 125 if (*mtdparts == '(') { 126 name = ++mtdparts; 127 mtdparts = strchr(name, ')'); 128 if (!mtdparts) { 129 printf("No closing ')' found in partition name\n"); 130 return -EINVAL; 131 } 132 name_len = mtdparts - name + 1; 133 if ((name_len - 1) == 0) { 134 printf("Empty partition name\n"); 135 return -EINVAL; 136 } 137 mtdparts++; 138 } else { 139 /* Name will be of the form size@offset */ 140 name_len = 22; 141 } 142 143 /* Check if the partition is read-only */ 144 if (strncmp(mtdparts, "ro", 2) == 0) { 145 partition->mask_flags |= MTD_WRITEABLE; 146 mtdparts += 2; 147 } 148 149 /* Check for a potential next partition definition */ 150 if (*mtdparts == ',') { 151 if (partition->size == MTD_SIZE_REMAINING) { 152 printf("No partitions allowed after a fill-up\n"); 153 return -EINVAL; 154 } 155 ++mtdparts; 156 } else if ((*mtdparts == ';') || (*mtdparts == '\0')) { 157 /* NOP */ 158 } else { 159 printf("Unexpected character '%c' in mtdparts\n", *mtdparts); 160 return -EINVAL; 161 } 162 163 /* 164 * Allocate a buffer for the name and either copy the provided name or 165 * auto-generate it with the form 'size@offset'. 166 */ 167 buf = malloc(name_len); 168 if (!buf) 169 return -ENOMEM; 170 171 if (name) 172 strncpy(buf, name, name_len - 1); 173 else 174 snprintf(buf, name_len, "0x%08llx@0x%08llx", 175 partition->size, partition->offset); 176 177 buf[name_len - 1] = '\0'; 178 partition->name = buf; 179 180 *_mtdparts = mtdparts; 181 182 return 0; 183 } 184 185 /** 186 * mtd_parse_partitions - Create a partition array from an mtdparts definition 187 * 188 * Stateless function that takes a @parent MTD device, a string @_mtdparts 189 * describing the partitions (with the "mtdparts" command syntax) and creates 190 * the corresponding MTD partition structure array @_parts. Both the name and 191 * the structure partition itself must be freed freed, the caller may use 192 * @mtd_free_parsed_partitions() for this purpose. 193 * 194 * @parent: MTD device which contains the partitions 195 * @_mtdparts: Pointer to a string describing the partitions with "mtdparts" 196 * command syntax. 197 * @_parts: Allocated array containing the partitions, must be freed by the 198 * caller. 199 * @_nparts: Size of @_parts array. 200 * 201 * @return 0 on success, an error otherwise. 202 */ 203 int mtd_parse_partitions(struct mtd_info *parent, const char **_mtdparts, 204 struct mtd_partition **_parts, int *_nparts) 205 { 206 struct mtd_partition partition = {}, *parts; 207 const char *mtdparts = *_mtdparts; 208 int cur_off = 0, cur_sz = 0; 209 int nparts = 0; 210 int ret, idx; 211 u64 sz; 212 213 /* First, iterate over the partitions until we know their number */ 214 while (mtdparts[0] != '\0' && mtdparts[0] != ';') { 215 ret = mtd_parse_partition(&mtdparts, &partition); 216 if (ret) 217 return ret; 218 219 free((char *)partition.name); 220 nparts++; 221 } 222 223 /* Allocate an array of partitions to give back to the caller */ 224 parts = malloc(sizeof(*parts) * nparts); 225 if (!parts) { 226 printf("Not enough space to save partitions meta-data\n"); 227 return -ENOMEM; 228 } 229 230 /* Iterate again over each partition to save the data in our array */ 231 for (idx = 0; idx < nparts; idx++) { 232 ret = mtd_parse_partition(_mtdparts, &parts[idx]); 233 if (ret) 234 return ret; 235 236 if (parts[idx].size == MTD_SIZE_REMAINING) 237 parts[idx].size = parent->size - cur_sz; 238 cur_sz += parts[idx].size; 239 240 sz = parts[idx].size; 241 if (sz < parent->writesize || do_div(sz, parent->writesize)) { 242 printf("Partition size must be a multiple of %d\n", 243 parent->writesize); 244 return -EINVAL; 245 } 246 247 if (parts[idx].offset == MTD_OFFSET_NOT_SPECIFIED) 248 parts[idx].offset = cur_off; 249 cur_off += parts[idx].size; 250 251 parts[idx].ecclayout = parent->ecclayout; 252 } 253 254 /* Offset by one mtdparts to point to the next device if any */ 255 if (*_mtdparts[0] == ';') 256 (*_mtdparts)++; 257 258 *_parts = parts; 259 *_nparts = nparts; 260 261 return 0; 262 } 263 264 /** 265 * mtd_free_parsed_partitions - Free dynamically allocated partitions 266 * 267 * Each successful call to @mtd_parse_partitions must be followed by a call to 268 * @mtd_free_parsed_partitions to free any allocated array during the parsing 269 * process. 270 * 271 * @parts: Array containing the partitions that will be freed. 272 * @nparts: Size of @parts array. 273 */ 274 void mtd_free_parsed_partitions(struct mtd_partition *parts, 275 unsigned int nparts) 276 { 277 int i; 278 279 for (i = 0; i < nparts; i++) 280 free((char *)parts[i].name); 281 282 free(parts); 283 } 284 285 /* 286 * MTD methods which simply translate the effective address and pass through 287 * to the _real_ device. 288 */ 289 290 static int part_read(struct mtd_info *mtd, loff_t from, size_t len, 291 size_t *retlen, u_char *buf) 292 { 293 struct mtd_ecc_stats stats; 294 int res; 295 296 stats = mtd->parent->ecc_stats; 297 res = mtd->parent->_read(mtd->parent, from + mtd->offset, len, 298 retlen, buf); 299 if (unlikely(mtd_is_eccerr(res))) 300 mtd->ecc_stats.failed += 301 mtd->parent->ecc_stats.failed - stats.failed; 302 else 303 mtd->ecc_stats.corrected += 304 mtd->parent->ecc_stats.corrected - stats.corrected; 305 return res; 306 } 307 308 #ifndef __UBOOT__ 309 static int part_point(struct mtd_info *mtd, loff_t from, size_t len, 310 size_t *retlen, void **virt, resource_size_t *phys) 311 { 312 return mtd->parent->_point(mtd->parent, from + mtd->offset, len, 313 retlen, virt, phys); 314 } 315 316 static int part_unpoint(struct mtd_info *mtd, loff_t from, size_t len) 317 { 318 return mtd->parent->_unpoint(mtd->parent, from + mtd->offset, len); 319 } 320 #endif 321 322 static unsigned long part_get_unmapped_area(struct mtd_info *mtd, 323 unsigned long len, 324 unsigned long offset, 325 unsigned long flags) 326 { 327 offset += mtd->offset; 328 return mtd->parent->_get_unmapped_area(mtd->parent, len, offset, flags); 329 } 330 331 static int part_read_oob(struct mtd_info *mtd, loff_t from, 332 struct mtd_oob_ops *ops) 333 { 334 int res; 335 336 if (from >= mtd->size) 337 return -EINVAL; 338 if (ops->datbuf && from + ops->len > mtd->size) 339 return -EINVAL; 340 341 /* 342 * If OOB is also requested, make sure that we do not read past the end 343 * of this partition. 344 */ 345 if (ops->oobbuf) { 346 size_t len, pages; 347 348 if (ops->mode == MTD_OPS_AUTO_OOB) 349 len = mtd->oobavail; 350 else 351 len = mtd->oobsize; 352 pages = mtd_div_by_ws(mtd->size, mtd); 353 pages -= mtd_div_by_ws(from, mtd); 354 if (ops->ooboffs + ops->ooblen > pages * len) 355 return -EINVAL; 356 } 357 358 res = mtd->parent->_read_oob(mtd->parent, from + mtd->offset, ops); 359 if (unlikely(res)) { 360 if (mtd_is_bitflip(res)) 361 mtd->ecc_stats.corrected++; 362 if (mtd_is_eccerr(res)) 363 mtd->ecc_stats.failed++; 364 } 365 return res; 366 } 367 368 static int part_read_user_prot_reg(struct mtd_info *mtd, loff_t from, 369 size_t len, size_t *retlen, u_char *buf) 370 { 371 return mtd->parent->_read_user_prot_reg(mtd->parent, from, len, 372 retlen, buf); 373 } 374 375 static int part_get_user_prot_info(struct mtd_info *mtd, size_t len, 376 size_t *retlen, struct otp_info *buf) 377 { 378 return mtd->parent->_get_user_prot_info(mtd->parent, len, retlen, 379 buf); 380 } 381 382 static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, 383 size_t len, size_t *retlen, u_char *buf) 384 { 385 return mtd->parent->_read_fact_prot_reg(mtd->parent, from, len, 386 retlen, buf); 387 } 388 389 static int part_get_fact_prot_info(struct mtd_info *mtd, size_t len, 390 size_t *retlen, struct otp_info *buf) 391 { 392 return mtd->parent->_get_fact_prot_info(mtd->parent, len, retlen, 393 buf); 394 } 395 396 static int part_write(struct mtd_info *mtd, loff_t to, size_t len, 397 size_t *retlen, const u_char *buf) 398 { 399 return mtd->parent->_write(mtd->parent, to + mtd->offset, len, 400 retlen, buf); 401 } 402 403 static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len, 404 size_t *retlen, const u_char *buf) 405 { 406 return mtd->parent->_panic_write(mtd->parent, to + mtd->offset, len, 407 retlen, buf); 408 } 409 410 static int part_write_oob(struct mtd_info *mtd, loff_t to, 411 struct mtd_oob_ops *ops) 412 { 413 if (to >= mtd->size) 414 return -EINVAL; 415 if (ops->datbuf && to + ops->len > mtd->size) 416 return -EINVAL; 417 return mtd->parent->_write_oob(mtd->parent, to + mtd->offset, ops); 418 } 419 420 static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from, 421 size_t len, size_t *retlen, u_char *buf) 422 { 423 return mtd->parent->_write_user_prot_reg(mtd->parent, from, len, 424 retlen, buf); 425 } 426 427 static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, 428 size_t len) 429 { 430 return mtd->parent->_lock_user_prot_reg(mtd->parent, from, len); 431 } 432 433 #ifndef __UBOOT__ 434 static int part_writev(struct mtd_info *mtd, const struct kvec *vecs, 435 unsigned long count, loff_t to, size_t *retlen) 436 { 437 return mtd->parent->_writev(mtd->parent, vecs, count, 438 to + mtd->offset, retlen); 439 } 440 #endif 441 442 static int part_erase(struct mtd_info *mtd, struct erase_info *instr) 443 { 444 int ret; 445 446 instr->addr += mtd->offset; 447 ret = mtd->parent->_erase(mtd->parent, instr); 448 if (ret) { 449 if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN) 450 instr->fail_addr -= mtd->offset; 451 instr->addr -= mtd->offset; 452 } 453 return ret; 454 } 455 456 void mtd_erase_callback(struct erase_info *instr) 457 { 458 if (instr->mtd->_erase == part_erase) { 459 if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN) 460 instr->fail_addr -= instr->mtd->offset; 461 instr->addr -= instr->mtd->offset; 462 } 463 if (instr->callback) 464 instr->callback(instr); 465 } 466 EXPORT_SYMBOL_GPL(mtd_erase_callback); 467 468 static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 469 { 470 return mtd->parent->_lock(mtd->parent, ofs + mtd->offset, len); 471 } 472 473 static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 474 { 475 return mtd->parent->_unlock(mtd->parent, ofs + mtd->offset, len); 476 } 477 478 static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len) 479 { 480 return mtd->parent->_is_locked(mtd->parent, ofs + mtd->offset, len); 481 } 482 483 static void part_sync(struct mtd_info *mtd) 484 { 485 mtd->parent->_sync(mtd->parent); 486 } 487 488 #ifndef __UBOOT__ 489 static int part_suspend(struct mtd_info *mtd) 490 { 491 return mtd->parent->_suspend(mtd->parent); 492 } 493 494 static void part_resume(struct mtd_info *mtd) 495 { 496 mtd->parent->_resume(mtd->parent); 497 } 498 #endif 499 500 static int part_block_isreserved(struct mtd_info *mtd, loff_t ofs) 501 { 502 ofs += mtd->offset; 503 return mtd->parent->_block_isreserved(mtd->parent, ofs); 504 } 505 506 static int part_block_isbad(struct mtd_info *mtd, loff_t ofs) 507 { 508 ofs += mtd->offset; 509 return mtd->parent->_block_isbad(mtd->parent, ofs); 510 } 511 512 static int part_block_markbad(struct mtd_info *mtd, loff_t ofs) 513 { 514 int res; 515 516 ofs += mtd->offset; 517 res = mtd->parent->_block_markbad(mtd->parent, ofs); 518 if (!res) 519 mtd->ecc_stats.badblocks++; 520 return res; 521 } 522 523 static inline void free_partition(struct mtd_info *p) 524 { 525 kfree(p->name); 526 kfree(p); 527 } 528 529 /* 530 * This function unregisters and destroy all slave MTD objects which are 531 * attached to the given master MTD object, recursively. 532 */ 533 static int do_del_mtd_partitions(struct mtd_info *master) 534 { 535 struct mtd_info *slave, *next; 536 int ret, err = 0; 537 538 list_for_each_entry_safe(slave, next, &master->partitions, node) { 539 if (mtd_has_partitions(slave)) 540 del_mtd_partitions(slave); 541 542 debug("Deleting %s MTD partition\n", slave->name); 543 ret = del_mtd_device(slave); 544 if (ret < 0) { 545 printf("Error when deleting partition \"%s\" (%d)\n", 546 slave->name, ret); 547 err = ret; 548 continue; 549 } 550 551 list_del(&slave->node); 552 free_partition(slave); 553 } 554 555 return err; 556 } 557 558 int del_mtd_partitions(struct mtd_info *master) 559 { 560 int ret; 561 562 debug("Deleting MTD partitions on \"%s\":\n", master->name); 563 564 mutex_lock(&mtd_partitions_mutex); 565 ret = do_del_mtd_partitions(master); 566 mutex_unlock(&mtd_partitions_mutex); 567 568 return ret; 569 } 570 571 static struct mtd_info *allocate_partition(struct mtd_info *master, 572 const struct mtd_partition *part, 573 int partno, uint64_t cur_offset) 574 { 575 struct mtd_info *slave; 576 char *name; 577 578 /* allocate the partition structure */ 579 slave = kzalloc(sizeof(*slave), GFP_KERNEL); 580 name = kstrdup(part->name, GFP_KERNEL); 581 if (!name || !slave) { 582 printk(KERN_ERR"memory allocation error while creating partitions for \"%s\"\n", 583 master->name); 584 kfree(name); 585 kfree(slave); 586 return ERR_PTR(-ENOMEM); 587 } 588 589 /* set up the MTD object for this partition */ 590 slave->type = master->type; 591 slave->flags = master->flags & ~part->mask_flags; 592 slave->size = part->size; 593 slave->writesize = master->writesize; 594 slave->writebufsize = master->writebufsize; 595 slave->oobsize = master->oobsize; 596 slave->oobavail = master->oobavail; 597 slave->subpage_sft = master->subpage_sft; 598 599 slave->name = name; 600 slave->owner = master->owner; 601 #ifndef __UBOOT__ 602 slave->backing_dev_info = master->backing_dev_info; 603 604 /* NOTE: we don't arrange MTDs as a tree; it'd be error-prone 605 * to have the same data be in two different partitions. 606 */ 607 slave->dev.parent = master->dev.parent; 608 #endif 609 610 if (master->_read) 611 slave->_read = part_read; 612 if (master->_write) 613 slave->_write = part_write; 614 615 if (master->_panic_write) 616 slave->_panic_write = part_panic_write; 617 618 #ifndef __UBOOT__ 619 if (master->_point && master->_unpoint) { 620 slave->_point = part_point; 621 slave->_unpoint = part_unpoint; 622 } 623 #endif 624 625 if (master->_get_unmapped_area) 626 slave->_get_unmapped_area = part_get_unmapped_area; 627 if (master->_read_oob) 628 slave->_read_oob = part_read_oob; 629 if (master->_write_oob) 630 slave->_write_oob = part_write_oob; 631 if (master->_read_user_prot_reg) 632 slave->_read_user_prot_reg = part_read_user_prot_reg; 633 if (master->_read_fact_prot_reg) 634 slave->_read_fact_prot_reg = part_read_fact_prot_reg; 635 if (master->_write_user_prot_reg) 636 slave->_write_user_prot_reg = part_write_user_prot_reg; 637 if (master->_lock_user_prot_reg) 638 slave->_lock_user_prot_reg = part_lock_user_prot_reg; 639 if (master->_get_user_prot_info) 640 slave->_get_user_prot_info = part_get_user_prot_info; 641 if (master->_get_fact_prot_info) 642 slave->_get_fact_prot_info = part_get_fact_prot_info; 643 if (master->_sync) 644 slave->_sync = part_sync; 645 #ifndef __UBOOT__ 646 if (!partno && !master->dev.class && master->_suspend && 647 master->_resume) { 648 slave->_suspend = part_suspend; 649 slave->_resume = part_resume; 650 } 651 if (master->_writev) 652 slave->_writev = part_writev; 653 #endif 654 if (master->_lock) 655 slave->_lock = part_lock; 656 if (master->_unlock) 657 slave->_unlock = part_unlock; 658 if (master->_is_locked) 659 slave->_is_locked = part_is_locked; 660 if (master->_block_isreserved) 661 slave->_block_isreserved = part_block_isreserved; 662 if (master->_block_isbad) 663 slave->_block_isbad = part_block_isbad; 664 if (master->_block_markbad) 665 slave->_block_markbad = part_block_markbad; 666 slave->_erase = part_erase; 667 slave->parent = master; 668 slave->offset = part->offset; 669 INIT_LIST_HEAD(&slave->partitions); 670 INIT_LIST_HEAD(&slave->node); 671 672 if (slave->offset == MTDPART_OFS_APPEND) 673 slave->offset = cur_offset; 674 if (slave->offset == MTDPART_OFS_NXTBLK) { 675 slave->offset = cur_offset; 676 if (mtd_mod_by_eb(cur_offset, master) != 0) { 677 /* Round up to next erasesize */ 678 slave->offset = (mtd_div_by_eb(cur_offset, master) + 1) * master->erasesize; 679 debug("Moving partition %d: " 680 "0x%012llx -> 0x%012llx\n", partno, 681 (unsigned long long)cur_offset, (unsigned long long)slave->offset); 682 } 683 } 684 if (slave->offset == MTDPART_OFS_RETAIN) { 685 slave->offset = cur_offset; 686 if (master->size - slave->offset >= slave->size) { 687 slave->size = master->size - slave->offset 688 - slave->size; 689 } else { 690 debug("mtd partition \"%s\" doesn't have enough space: %#llx < %#llx, disabled\n", 691 part->name, master->size - slave->offset, 692 slave->size); 693 /* register to preserve ordering */ 694 goto out_register; 695 } 696 } 697 if (slave->size == MTDPART_SIZ_FULL) 698 slave->size = master->size - slave->offset; 699 700 debug("0x%012llx-0x%012llx : \"%s\"\n", (unsigned long long)slave->offset, 701 (unsigned long long)(slave->offset + slave->size), slave->name); 702 703 /* let's do some sanity checks */ 704 if (slave->offset >= master->size) { 705 /* let's register it anyway to preserve ordering */ 706 slave->offset = 0; 707 slave->size = 0; 708 printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n", 709 part->name); 710 goto out_register; 711 } 712 if (slave->offset + slave->size > master->size) { 713 slave->size = master->size - slave->offset; 714 printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#llx\n", 715 part->name, master->name, slave->size); 716 } 717 if (master->numeraseregions > 1) { 718 /* Deal with variable erase size stuff */ 719 int i, max = master->numeraseregions; 720 u64 end = slave->offset + slave->size; 721 struct mtd_erase_region_info *regions = master->eraseregions; 722 723 /* Find the first erase regions which is part of this 724 * partition. */ 725 for (i = 0; i < max && regions[i].offset <= slave->offset; i++) 726 ; 727 /* The loop searched for the region _behind_ the first one */ 728 if (i > 0) 729 i--; 730 731 /* Pick biggest erasesize */ 732 for (; i < max && regions[i].offset < end; i++) { 733 if (slave->erasesize < regions[i].erasesize) 734 slave->erasesize = regions[i].erasesize; 735 } 736 WARN_ON(slave->erasesize == 0); 737 } else { 738 /* Single erase size */ 739 slave->erasesize = master->erasesize; 740 } 741 742 if ((slave->flags & MTD_WRITEABLE) && 743 mtd_mod_by_eb(slave->offset, slave)) { 744 /* Doesn't start on a boundary of major erase size */ 745 /* FIXME: Let it be writable if it is on a boundary of 746 * _minor_ erase size though */ 747 slave->flags &= ~MTD_WRITEABLE; 748 printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n", 749 part->name); 750 } 751 if ((slave->flags & MTD_WRITEABLE) && 752 mtd_mod_by_eb(slave->size, slave)) { 753 slave->flags &= ~MTD_WRITEABLE; 754 printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n", 755 part->name); 756 } 757 758 slave->ecclayout = master->ecclayout; 759 slave->ecc_step_size = master->ecc_step_size; 760 slave->ecc_strength = master->ecc_strength; 761 slave->bitflip_threshold = master->bitflip_threshold; 762 763 if (master->_block_isbad) { 764 uint64_t offs = 0; 765 766 while (offs < slave->size) { 767 if (mtd_block_isbad(master, offs + slave->offset)) 768 slave->ecc_stats.badblocks++; 769 offs += slave->erasesize; 770 } 771 } 772 773 out_register: 774 return slave; 775 } 776 777 #ifndef __UBOOT__ 778 int mtd_add_partition(struct mtd_info *master, const char *name, 779 long long offset, long long length) 780 { 781 struct mtd_partition part; 782 struct mtd_info *p, *new; 783 uint64_t start, end; 784 int ret = 0; 785 786 /* the direct offset is expected */ 787 if (offset == MTDPART_OFS_APPEND || 788 offset == MTDPART_OFS_NXTBLK) 789 return -EINVAL; 790 791 if (length == MTDPART_SIZ_FULL) 792 length = master->size - offset; 793 794 if (length <= 0) 795 return -EINVAL; 796 797 part.name = name; 798 part.size = length; 799 part.offset = offset; 800 part.mask_flags = 0; 801 part.ecclayout = NULL; 802 803 new = allocate_partition(master, &part, -1, offset); 804 if (IS_ERR(new)) 805 return PTR_ERR(new); 806 807 start = offset; 808 end = offset + length; 809 810 mutex_lock(&mtd_partitions_mutex); 811 list_for_each_entry(p, &master->partitions, node) { 812 if (start >= p->offset && 813 (start < (p->offset + p->size))) 814 goto err_inv; 815 816 if (end >= p->offset && 817 (end < (p->offset + p->size))) 818 goto err_inv; 819 } 820 821 list_add_tail(&new->node, &master->partitions); 822 mutex_unlock(&mtd_partitions_mutex); 823 824 add_mtd_device(new); 825 826 return ret; 827 err_inv: 828 mutex_unlock(&mtd_partitions_mutex); 829 free_partition(new); 830 return -EINVAL; 831 } 832 EXPORT_SYMBOL_GPL(mtd_add_partition); 833 834 int mtd_del_partition(struct mtd_info *master, int partno) 835 { 836 struct mtd_info *slave, *next; 837 int ret = -EINVAL; 838 839 mutex_lock(&mtd_partitions_mutex); 840 list_for_each_entry_safe(slave, next, &master->partitions, node) 841 if (slave->index == partno) { 842 ret = del_mtd_device(slave); 843 if (ret < 0) 844 break; 845 846 list_del(&slave->node); 847 free_partition(slave); 848 break; 849 } 850 mutex_unlock(&mtd_partitions_mutex); 851 852 return ret; 853 } 854 EXPORT_SYMBOL_GPL(mtd_del_partition); 855 #endif 856 857 /* 858 * This function, given a master MTD object and a partition table, creates 859 * and registers slave MTD objects which are bound to the master according to 860 * the partition definitions. 861 * 862 * We don't register the master, or expect the caller to have done so, 863 * for reasons of data integrity. 864 */ 865 866 int add_mtd_partitions(struct mtd_info *master, 867 const struct mtd_partition *parts, 868 int nbparts) 869 { 870 struct mtd_info *slave; 871 uint64_t cur_offset = 0; 872 int i; 873 874 debug("Creating %d MTD partitions on \"%s\":\n", nbparts, master->name); 875 876 for (i = 0; i < nbparts; i++) { 877 slave = allocate_partition(master, parts + i, i, cur_offset); 878 if (IS_ERR(slave)) 879 return PTR_ERR(slave); 880 881 mutex_lock(&mtd_partitions_mutex); 882 list_add_tail(&slave->node, &master->partitions); 883 mutex_unlock(&mtd_partitions_mutex); 884 885 add_mtd_device(slave); 886 887 cur_offset = slave->offset + slave->size; 888 } 889 890 return 0; 891 } 892 893 #ifndef __UBOOT__ 894 static DEFINE_SPINLOCK(part_parser_lock); 895 static LIST_HEAD(part_parsers); 896 897 static struct mtd_part_parser *get_partition_parser(const char *name) 898 { 899 struct mtd_part_parser *p, *ret = NULL; 900 901 spin_lock(&part_parser_lock); 902 903 list_for_each_entry(p, &part_parsers, list) 904 if (!strcmp(p->name, name) && try_module_get(p->owner)) { 905 ret = p; 906 break; 907 } 908 909 spin_unlock(&part_parser_lock); 910 911 return ret; 912 } 913 914 #define put_partition_parser(p) do { module_put((p)->owner); } while (0) 915 916 void register_mtd_parser(struct mtd_part_parser *p) 917 { 918 spin_lock(&part_parser_lock); 919 list_add(&p->list, &part_parsers); 920 spin_unlock(&part_parser_lock); 921 } 922 EXPORT_SYMBOL_GPL(register_mtd_parser); 923 924 void deregister_mtd_parser(struct mtd_part_parser *p) 925 { 926 spin_lock(&part_parser_lock); 927 list_del(&p->list); 928 spin_unlock(&part_parser_lock); 929 } 930 EXPORT_SYMBOL_GPL(deregister_mtd_parser); 931 932 /* 933 * Do not forget to update 'parse_mtd_partitions()' kerneldoc comment if you 934 * are changing this array! 935 */ 936 static const char * const default_mtd_part_types[] = { 937 "cmdlinepart", 938 "ofpart", 939 NULL 940 }; 941 942 /** 943 * parse_mtd_partitions - parse MTD partitions 944 * @master: the master partition (describes whole MTD device) 945 * @types: names of partition parsers to try or %NULL 946 * @pparts: array of partitions found is returned here 947 * @data: MTD partition parser-specific data 948 * 949 * This function tries to find partition on MTD device @master. It uses MTD 950 * partition parsers, specified in @types. However, if @types is %NULL, then 951 * the default list of parsers is used. The default list contains only the 952 * "cmdlinepart" and "ofpart" parsers ATM. 953 * Note: If there are more then one parser in @types, the kernel only takes the 954 * partitions parsed out by the first parser. 955 * 956 * This function may return: 957 * o a negative error code in case of failure 958 * o zero if no partitions were found 959 * o a positive number of found partitions, in which case on exit @pparts will 960 * point to an array containing this number of &struct mtd_info objects. 961 */ 962 int parse_mtd_partitions(struct mtd_info *master, const char *const *types, 963 struct mtd_partition **pparts, 964 struct mtd_part_parser_data *data) 965 { 966 struct mtd_part_parser *parser; 967 int ret = 0; 968 969 if (!types) 970 types = default_mtd_part_types; 971 972 for ( ; ret <= 0 && *types; types++) { 973 parser = get_partition_parser(*types); 974 if (!parser && !request_module("%s", *types)) 975 parser = get_partition_parser(*types); 976 if (!parser) 977 continue; 978 ret = (*parser->parse_fn)(master, pparts, data); 979 put_partition_parser(parser); 980 if (ret > 0) { 981 printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n", 982 ret, parser->name, master->name); 983 break; 984 } 985 } 986 return ret; 987 } 988 #endif 989 990 /* Returns the size of the entire flash chip */ 991 uint64_t mtd_get_device_size(const struct mtd_info *mtd) 992 { 993 if (mtd_is_partition(mtd)) 994 return mtd->parent->size; 995 996 return mtd->size; 997 } 998 EXPORT_SYMBOL_GPL(mtd_get_device_size); 999