Lines Matching full:mtd
19 #include <linux/mtd/spi-nor.h>
21 #include <linux/mtd/nand.h>
38 struct mtd_info *mtd = NULL; in mtd_blk_map_table_init() local
47 mtd = desc->bdev->priv; in mtd_blk_map_table_init()
53 if (!mtd) { in mtd_blk_map_table_init()
56 blk_total = (mtd->size + mtd->erasesize - 1) >> mtd->erasesize_shift; in mtd_blk_map_table_init()
65 blk_begin = (u32)offset >> mtd->erasesize_shift; in mtd_blk_map_table_init()
66 blk_cnt = ((u32)((offset & mtd->erasesize_mask) + length + \ in mtd_blk_map_table_init()
67 mtd->erasesize - 1) >> mtd->erasesize_shift); in mtd_blk_map_table_init()
84 if (!mtd_block_isbad(mtd, (blk_begin + j) << mtd->erasesize_shift)) { in mtd_blk_map_table_init()
98 static bool get_mtd_blk_map_address(struct mtd_info *mtd, loff_t *off) in get_mtd_blk_map_address() argument
102 size_t block_offset = offset & (mtd->erasesize - 1); in get_mtd_blk_map_address()
106 mtd_map_blk_table[(u64)offset >> mtd->erasesize_shift] == in get_mtd_blk_map_address()
108 mtd_map_blk_table[(u64)offset >> mtd->erasesize_shift] == in get_mtd_blk_map_address()
114 mtd->erasesize_shift] << mtd->erasesize_shift) + block_offset); in get_mtd_blk_map_address()
138 pr_debug("mtd block map table fail\n"); in mtd_blk_map_partitions()
145 struct mtd_info *mtd = NULL; in mtd_blk_map_fit() local
153 mtd = dev_get_priv(desc->bdev->parent); in mtd_blk_map_fit()
157 mtd = desc->bdev->priv; in mtd_blk_map_fit()
165 if (mtd && totalsize) { in mtd_blk_map_fit()
166 if (mtd_blk_map_table_init(desc, sector << 9, totalsize + (size_t)mtd->erasesize)) in mtd_blk_map_fit()
171 static __maybe_unused int mtd_map_read(struct mtd_info *mtd, loff_t offset, in mtd_map_read() argument
180 size_t block_offset = offset & (mtd->erasesize - 1); in mtd_map_read()
184 if (offset >= mtd->size) in mtd_map_read()
188 if (!get_mtd_blk_map_address(mtd, &mapped_offset)) { in mtd_map_read()
189 if (mtd_block_isbad(mtd, mapped_offset & in mtd_map_read()
190 ~(mtd->erasesize - 1))) { in mtd_map_read()
192 offset & ~(mtd->erasesize - 1)); in mtd_map_read()
193 offset += mtd->erasesize - block_offset; in mtd_map_read()
198 if (left_to_read < (mtd->erasesize - block_offset)) in mtd_map_read()
201 read_length = mtd->erasesize - block_offset; in mtd_map_read()
203 rval = mtd_read(mtd, mapped_offset, read_length, &read_length, in mtd_map_read()
220 static __maybe_unused int mtd_map_write(struct mtd_info *mtd, loff_t offset, in mtd_map_write() argument
229 blocksize = mtd->erasesize; in mtd_map_write()
242 if ((offset & (mtd->writesize - 1)) != 0) { in mtd_map_write()
249 size_t block_offset = offset & (mtd->erasesize - 1); in mtd_map_write()
253 if (offset >= mtd->size) in mtd_map_write()
257 if (!get_mtd_blk_map_address(mtd, &mapped_offset)) { in mtd_map_write()
258 if (mtd_block_isbad(mtd, mapped_offset & in mtd_map_write()
259 ~(mtd->erasesize - 1))) { in mtd_map_write()
261 offset & ~(mtd->erasesize - 1)); in mtd_map_write()
262 offset += mtd->erasesize - block_offset; in mtd_map_write()
267 if (!(mapped_offset & mtd->erasesize_mask)) { in mtd_map_write()
270 ei.len = mtd->erasesize; in mtd_map_write()
271 rval = mtd_erase(mtd, &ei); in mtd_map_write()
285 rval = mtd_write(mtd, mapped_offset, truncated_write_size, in mtd_map_write()
304 static __maybe_unused int mtd_map_erase(struct mtd_info *mtd, loff_t offset, in mtd_map_erase() argument
314 if ((pos & mtd->erasesize_mask) || (len & mtd->erasesize_mask)) { in mtd_map_erase()
325 if (!get_mtd_blk_map_address(mtd, &mapped_offset)) { in mtd_map_erase()
326 if (mtd_block_isbad(mtd, pos) || mtd_block_isreserved(mtd, pos)) { in mtd_map_erase()
329 pos += mtd->erasesize; in mtd_map_erase()
336 ei.len = mtd->erasesize; in mtd_map_erase()
337 ret = mtd_erase(mtd, &ei); in mtd_map_erase()
344 pos += mtd->erasesize; in mtd_map_erase()
345 len -= mtd->erasesize; in mtd_map_erase()
357 struct mtd_info *mtd; in mtd_part_parse() local
368 mtd = (struct mtd_info *)dev_desc->bdev->priv; in mtd_part_parse()
369 if (!mtd) in mtd_part_parse()
428 (mtd->erasesize >> 9) - 1) << 9, in mtd_part_parse()
459 struct mtd_info *mtd; in mtd_dread() local
465 mtd = desc->bdev->priv; in mtd_dread()
466 if (!mtd) in mtd_dread()
472 pr_debug("mtd dread %s %lx %lx\n", mtd->name, start, blkcnt); in mtd_dread()
475 ret = mtd_map_read(mtd, off, &rwsize, in mtd_dread()
476 NULL, mtd->size, in mtd_dread()
483 ret = mtd_map_read(mtd, off, &rwsize, in mtd_dread()
484 NULL, mtd->size, in mtd_dread()
492 struct spi_nor *nor = (struct spi_nor *)mtd->priv; in mtd_dread()
498 mtd_read(mtd, off, rwsize, &retlen_nor, dst); in mtd_dread()
521 struct mtd_info *mtd; in mtd_dwrite() local
527 mtd = desc->bdev->priv; in mtd_dwrite()
528 if (!mtd) in mtd_dwrite()
531 pr_debug("mtd dwrite %s %lx %lx\n", mtd->name, start, blkcnt); in mtd_dwrite()
540 ret = mtd_map_write(mtd, off, &rwsize, in mtd_dwrite()
541 NULL, mtd->size, in mtd_dwrite()
552 alinged = off & mtd->erasesize_mask; in mtd_dwrite()
555 rwsize_aligned = (rwsize_aligned + mtd->erasesize - 1) & in mtd_dwrite()
556 ~(mtd->erasesize - 1); in mtd_dwrite()
564 ret = mtd_map_read(mtd, off_aligned, &rwsize_aligned, in mtd_dwrite()
565 NULL, mtd->size, in mtd_dwrite()
574 ret = mtd_map_write(mtd, off_aligned, &rwsize_aligned, in mtd_dwrite()
575 NULL, mtd->size, in mtd_dwrite()
598 struct mtd_info *mtd; in mtd_derase() local
604 mtd = desc->bdev->priv; in mtd_derase()
605 if (!mtd) in mtd_derase()
608 pr_debug("mtd derase %s %lx %lx\n", mtd->name, start, blkcnt); in mtd_derase()
616 ret = mtd_map_erase(mtd, off, len); in mtd_derase()
629 struct mtd_info *mtd; in mtd_blk_probe() local
633 mtd = dev_get_uclass_priv(udev->parent); in mtd_blk_probe()
634 if (mtd->type == MTD_NANDFLASH && desc->devnum == BLK_MTD_NAND) { in mtd_blk_probe()
636 mtd = dev_get_priv(udev->parent); in mtd_blk_probe()
640 /* Fill mtd devices information */ in mtd_blk_probe()
641 if (is_power_of_2(mtd->erasesize)) in mtd_blk_probe()
642 mtd->erasesize_shift = ffs(mtd->erasesize) - 1; in mtd_blk_probe()
644 mtd->erasesize_shift = 0; in mtd_blk_probe()
646 if (is_power_of_2(mtd->writesize)) in mtd_blk_probe()
647 mtd->writesize_shift = ffs(mtd->writesize) - 1; in mtd_blk_probe()
649 mtd->writesize_shift = 0; in mtd_blk_probe()
651 mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1; in mtd_blk_probe()
652 mtd->writesize_mask = (1 << mtd->writesize_shift) - 1; in mtd_blk_probe()
654 desc->bdev->priv = mtd; in mtd_blk_probe()
656 if (strncmp(mtd->name, "nand", 4) == 0) in mtd_blk_probe()
659 memcpy(desc->product, mtd->name, strlen(mtd->name)); in mtd_blk_probe()
661 if (mtd->type == MTD_NANDFLASH) { in mtd_blk_probe()
673 for (; i < (mtd->size / mtd->erasesize); i++) { in mtd_blk_probe()
674 ret = mtd_block_isbad(mtd, in mtd_blk_probe()
675 mtd->size - mtd->erasesize * (i + 1)); in mtd_blk_probe()
677 desc->lba = (mtd->size >> 9) - in mtd_blk_probe()
678 (mtd->erasesize >> 9) * i; in mtd_blk_probe()
683 desc->lba = mtd->size >> 9; in mtd_blk_probe()
686 debug("MTD: desc->lba is %lx\n", desc->lba); in mtd_blk_probe()