1 /*
2 * (C) Copyright 2019 Rockchip Electronics Co., Ltd
3 *
4 * SPDX-License-Identifier: GPL-2.0+
5 */
6
7 #include <common.h>
8 #include <blk.h>
9 #include <boot_rkimg.h>
10 #include <dm.h>
11 #include <errno.h>
12 #include <image.h>
13 #include <linux/log2.h>
14 #include <malloc.h>
15 #include <nand.h>
16 #include <part.h>
17 #include <spi.h>
18 #include <dm/device-internal.h>
19 #include <linux/mtd/spinand.h>
20 #include <linux/mtd/spi-nor.h>
21 #ifdef CONFIG_NAND
22 #include <linux/mtd/nand.h>
23 #endif
24
25 // #define MTD_BLK_VERBOSE
26
27 #define MTD_PART_NAND_HEAD "mtdparts="
28 #define MTD_PART_INFO_MAX_SIZE 512
29 #define MTD_SINGLE_PART_INFO_MAX_SIZE 40
30
31 #define MTD_BLK_TABLE_BLOCK_UNKNOWN (-2)
32 #define MTD_BLK_TABLE_BLOCK_SHIFT (-1)
33
34 #define FACTORY_UNKNOWN_LBA (0xffffffff - 34)
35
36 static int *mtd_map_blk_table;
37
38 #if CONFIG_IS_ENABLED(SUPPORT_USBPLUG)
39 static loff_t usbplug_dummy_partition_write_last_addr;
40 static loff_t usbplug_dummy_partition_write_seek;
41 static loff_t usbplug_dummy_partition_read_last_addr;
42 static loff_t usbplug_dummy_partition_read_seek;
43 #endif
44
mtd_blk_map_table_init(struct blk_desc * desc,loff_t offset,size_t length)45 int mtd_blk_map_table_init(struct blk_desc *desc,
46 loff_t offset,
47 size_t length)
48 {
49 u32 blk_total, blk_begin, blk_cnt;
50 struct mtd_info *mtd = NULL;
51 int i, j;
52
53 if (!desc)
54 return -ENODEV;
55
56 switch (desc->devnum) {
57 case BLK_MTD_NAND:
58 case BLK_MTD_SPI_NAND:
59 mtd = desc->bdev->priv;
60 break;
61 default:
62 break;
63 }
64
65 if (!mtd) {
66 return -ENODEV;
67 } else {
68 blk_total = (mtd->size + mtd->erasesize - 1) >> mtd->erasesize_shift;
69 if (!mtd_map_blk_table) {
70 mtd_map_blk_table = (int *)malloc(blk_total * sizeof(int));
71 if (!mtd_map_blk_table)
72 return -ENOMEM;
73 for (i = 0; i < blk_total; i++)
74 mtd_map_blk_table[i] = MTD_BLK_TABLE_BLOCK_UNKNOWN;
75 }
76
77 blk_begin = (u32)offset >> mtd->erasesize_shift;
78 blk_cnt = ((u32)((offset & mtd->erasesize_mask) + length + \
79 mtd->erasesize - 1) >> mtd->erasesize_shift);
80 if (blk_begin >= blk_total) {
81 pr_err("map table blk begin[%d] overflow\n", blk_begin);
82 return -EINVAL;
83 }
84 if ((blk_begin + blk_cnt) > blk_total)
85 blk_cnt = blk_total - blk_begin;
86
87 if (mtd_map_blk_table[blk_begin] != MTD_BLK_TABLE_BLOCK_UNKNOWN)
88 return 0;
89
90 j = 0;
91 /* should not across blk_cnt */
92 for (i = 0; i < blk_cnt; i++) {
93 if (j >= blk_cnt)
94 mtd_map_blk_table[blk_begin + i] = MTD_BLK_TABLE_BLOCK_SHIFT;
95 for (; j < blk_cnt; j++) {
96 if (!mtd_block_isbad(mtd, (blk_begin + j) << mtd->erasesize_shift)) {
97 mtd_map_blk_table[blk_begin + i] = blk_begin + j;
98 j++;
99 if (j == blk_cnt)
100 j++;
101 break;
102 }
103 }
104 }
105
106 return 0;
107 }
108 }
109
get_mtd_blk_map_address(struct mtd_info * mtd,loff_t * off)110 static bool get_mtd_blk_map_address(struct mtd_info *mtd, loff_t *off)
111 {
112 bool mapped;
113 loff_t offset = *off;
114 size_t block_offset = offset & (mtd->erasesize - 1);
115
116 mapped = false;
117 if (!mtd_map_blk_table ||
118 mtd_map_blk_table[(u64)offset >> mtd->erasesize_shift] ==
119 MTD_BLK_TABLE_BLOCK_UNKNOWN ||
120 mtd_map_blk_table[(u64)offset >> mtd->erasesize_shift] ==
121 0xffffffff)
122 return mapped;
123
124 mapped = true;
125 *off = (loff_t)(((u32)mtd_map_blk_table[(u64)offset >>
126 mtd->erasesize_shift] << mtd->erasesize_shift) + block_offset);
127
128 return mapped;
129 }
130
mtd_blk_map_partitions(struct blk_desc * desc)131 void mtd_blk_map_partitions(struct blk_desc *desc)
132 {
133 disk_partition_t info;
134 int i, ret;
135
136 if (!desc)
137 return;
138
139 if (desc->if_type != IF_TYPE_MTD)
140 return;
141
142 for (i = 1; i < MAX_SEARCH_PARTITIONS; i++) {
143 ret = part_get_info(desc, i, &info);
144 if (ret != 0)
145 break;
146
147 if (mtd_blk_map_table_init(desc,
148 info.start << 9,
149 info.size << 9)) {
150 pr_debug("mtd block map table fail\n");
151 }
152 }
153 }
154
mtd_blk_map_fit(struct blk_desc * desc,ulong sector,void * fit)155 void mtd_blk_map_fit(struct blk_desc *desc, ulong sector, void *fit)
156 {
157 struct mtd_info *mtd = NULL;
158 int totalsize = 0;
159
160 if (desc->if_type != IF_TYPE_MTD)
161 return;
162
163 if (desc->devnum == BLK_MTD_NAND) {
164 #if defined(CONFIG_NAND)
165 mtd = dev_get_priv(desc->bdev->parent);
166 #endif
167 } else if (desc->devnum == BLK_MTD_SPI_NAND) {
168 #if defined(CONFIG_MTD_SPI_NAND)
169 mtd = desc->bdev->priv;
170 #endif
171 }
172
173 #ifdef CONFIG_SPL_FIT
174 if (fit_get_totalsize(fit, &totalsize))
175 debug("Can not find /totalsize node.\n");
176 #endif
177 if (mtd && totalsize) {
178 if (mtd_blk_map_table_init(desc, sector << 9, totalsize + (size_t)mtd->erasesize))
179 debug("Map block table fail.\n");
180 }
181 }
182
mtd_map_read(struct mtd_info * mtd,loff_t offset,size_t * length,size_t * actual,loff_t lim,u_char * buffer)183 static __maybe_unused int mtd_map_read(struct mtd_info *mtd, loff_t offset,
184 size_t *length, size_t *actual,
185 loff_t lim, u_char *buffer)
186 {
187 size_t left_to_read = *length;
188 u_char *p_buffer = buffer;
189 int rval;
190
191 #if CONFIG_IS_ENABLED(SUPPORT_USBPLUG)
192 if (usbplug_dummy_partition_read_last_addr != offset)
193 usbplug_dummy_partition_read_seek = 0;
194 usbplug_dummy_partition_read_last_addr = offset + left_to_read;
195 offset += usbplug_dummy_partition_read_seek;
196 #endif
197
198 while (left_to_read > 0) {
199 size_t block_offset = offset & (mtd->erasesize - 1);
200 size_t read_length;
201 loff_t mapped_offset;
202
203 if (offset >= mtd->size)
204 return 0;
205
206 mapped_offset = offset;
207 if (!get_mtd_blk_map_address(mtd, &mapped_offset)) {
208 if (mtd_block_isbad(mtd, mapped_offset &
209 ~(mtd->erasesize - 1))) {
210 printf("Skipping bad block 0x%08x in read\n",
211 (u32)(offset & ~(mtd->erasesize - 1)));
212 offset += mtd->erasesize - block_offset;
213 #if CONFIG_IS_ENABLED(SUPPORT_USBPLUG)
214 usbplug_dummy_partition_read_seek += mtd->erasesize;
215 #endif
216 continue;
217 }
218 }
219
220 if (left_to_read < (mtd->erasesize - block_offset))
221 read_length = left_to_read;
222 else
223 read_length = mtd->erasesize - block_offset;
224
225 rval = mtd_read(mtd, mapped_offset, read_length, &read_length,
226 p_buffer);
227 if (rval && rval != -EUCLEAN) {
228 printf("NAND read from offset %x failed %d\n",
229 (u32)offset, rval);
230 *length -= left_to_read;
231 return rval;
232 }
233
234 left_to_read -= read_length;
235 offset += read_length;
236 p_buffer += read_length;
237 }
238
239 return 0;
240 }
241
mtd_map_write(struct mtd_info * mtd,loff_t offset,size_t * length,size_t * actual,loff_t lim,u_char * buffer,int flags)242 static __maybe_unused int mtd_map_write(struct mtd_info *mtd, loff_t offset,
243 size_t *length, size_t *actual,
244 loff_t lim, u_char *buffer, int flags)
245 {
246 int rval = 0, blocksize;
247 size_t left_to_write = *length;
248 u_char *p_buffer = buffer;
249 struct erase_info ei;
250
251 blocksize = mtd->erasesize;
252
253 #if CONFIG_IS_ENABLED(SUPPORT_USBPLUG)
254 if (usbplug_dummy_partition_write_last_addr != offset)
255 usbplug_dummy_partition_write_seek = 0;
256 usbplug_dummy_partition_write_last_addr = offset + left_to_write;
257 offset += usbplug_dummy_partition_write_seek;
258 #endif
259
260 /*
261 * nand_write() handles unaligned, partial page writes.
262 *
263 * We allow length to be unaligned, for convenience in
264 * using the $filesize variable.
265 *
266 * However, starting at an unaligned offset makes the
267 * semantics of bad block skipping ambiguous (really,
268 * you should only start a block skipping access at a
269 * partition boundary). So don't try to handle that.
270 */
271 if ((offset & (mtd->writesize - 1)) != 0) {
272 printf("Attempt to write non page-aligned data\n");
273 *length = 0;
274 return -EINVAL;
275 }
276
277 while (left_to_write > 0) {
278 size_t block_offset = offset & (mtd->erasesize - 1);
279 size_t write_size, truncated_write_size;
280 loff_t mapped_offset;
281
282 if (offset >= mtd->size)
283 return 0;
284
285 mapped_offset = offset;
286 if (!get_mtd_blk_map_address(mtd, &mapped_offset)) {
287 if (mtd_block_isbad(mtd, mapped_offset &
288 ~(mtd->erasesize - 1))) {
289 printf("Skipping bad block 0x%08x in write\n",
290 (u32)(offset & ~(mtd->erasesize - 1)));
291 offset += mtd->erasesize - block_offset;
292 #if CONFIG_IS_ENABLED(SUPPORT_USBPLUG)
293 usbplug_dummy_partition_write_seek += mtd->erasesize;
294 #endif
295 continue;
296 }
297 }
298
299 if (!(mapped_offset & mtd->erasesize_mask)) {
300 memset(&ei, 0, sizeof(struct erase_info));
301 ei.addr = mapped_offset;
302 ei.len = mtd->erasesize;
303 rval = mtd_erase(mtd, &ei);
304 if (rval) {
305 pr_info("error %d while erasing %llx\n", rval,
306 mapped_offset);
307 return rval;
308 }
309 }
310
311 if (left_to_write < (blocksize - block_offset))
312 write_size = left_to_write;
313 else
314 write_size = blocksize - block_offset;
315
316 truncated_write_size = write_size;
317 rval = mtd_write(mtd, mapped_offset, truncated_write_size,
318 (size_t *)(&truncated_write_size), p_buffer);
319
320 offset += write_size;
321 p_buffer += write_size;
322
323 if (rval != 0) {
324 printf("NAND write to offset %llx failed %d\n",
325 offset, rval);
326 *length -= left_to_write;
327 return rval;
328 }
329
330 left_to_write -= write_size;
331 }
332
333 return 0;
334 }
335
mtd_map_erase(struct mtd_info * mtd,loff_t offset,size_t length)336 static __maybe_unused int mtd_map_erase(struct mtd_info *mtd, loff_t offset,
337 size_t length)
338 {
339 struct erase_info ei;
340 loff_t pos, len;
341 int ret;
342
343 pos = offset;
344 len = length;
345
346 if ((pos & mtd->erasesize_mask) || (len & mtd->erasesize_mask)) {
347 pr_err("Attempt to erase non block-aligned data, pos= %llx, len= %llx\n",
348 pos, len);
349
350 return -EINVAL;
351 }
352
353 while (len) {
354 loff_t mapped_offset;
355
356 mapped_offset = pos;
357 if (!get_mtd_blk_map_address(mtd, &mapped_offset)) {
358 if (mtd_block_isbad(mtd, pos) || mtd_block_isreserved(mtd, pos)) {
359 pr_debug("attempt to erase a bad/reserved block @%llx\n",
360 pos);
361 pos += mtd->erasesize;
362 continue;
363 }
364 }
365
366 memset(&ei, 0, sizeof(struct erase_info));
367 ei.addr = mapped_offset;
368 ei.len = mtd->erasesize;
369 ret = mtd_erase(mtd, &ei);
370 if (ret) {
371 pr_err("map_erase error %d while erasing %llx\n", ret,
372 pos);
373 return ret;
374 }
375
376 pos += mtd->erasesize;
377 len -= mtd->erasesize;
378 }
379
380 return 0;
381 }
382
mtd_part_parse(struct blk_desc * dev_desc)383 char *mtd_part_parse(struct blk_desc *dev_desc)
384 {
385 char mtd_part_info_temp[MTD_SINGLE_PART_INFO_MAX_SIZE] = {0};
386 u32 length, data_len = MTD_PART_INFO_MAX_SIZE;
387 disk_partition_t info;
388 char *mtd_part_info_p;
389 struct mtd_info *mtd;
390 char *mtd_part_info;
391 int ret;
392 int p;
393
394 #ifndef CONFIG_SPL_BUILD
395 dev_desc = rockchip_get_bootdev();
396 #endif
397 if (!dev_desc)
398 return NULL;
399
400 mtd = (struct mtd_info *)dev_desc->bdev->priv;
401 if (!mtd)
402 return NULL;
403
404 mtd_part_info = (char *)calloc(MTD_PART_INFO_MAX_SIZE, sizeof(char));
405 if (!mtd_part_info) {
406 printf("%s: Fail to malloc!", __func__);
407 return NULL;
408 }
409
410 mtd_part_info_p = mtd_part_info;
411 snprintf(mtd_part_info_p, data_len - 1, "%s%s:",
412 MTD_PART_NAND_HEAD,
413 dev_desc->product);
414 data_len -= strlen(mtd_part_info_p);
415 mtd_part_info_p = mtd_part_info_p + strlen(mtd_part_info_p);
416
417 for (p = 1; p < MAX_SEARCH_PARTITIONS; p++) {
418 ret = part_get_info(dev_desc, p, &info);
419 if (ret)
420 break;
421
422 debug("name is %s, start addr is %x\n", info.name,
423 (int)(size_t)info.start);
424
425 snprintf(mtd_part_info_p, data_len - 1, "0x%x@0x%x(%s)",
426 (int)(size_t)info.size << 9,
427 (int)(size_t)info.start << 9,
428 info.name);
429 snprintf(mtd_part_info_temp, MTD_SINGLE_PART_INFO_MAX_SIZE - 1,
430 "0x%x@0x%x(%s)",
431 (int)(size_t)info.size << 9,
432 (int)(size_t)info.start << 9,
433 info.name);
434 strcat(mtd_part_info, ",");
435 if (part_get_info(dev_desc, p + 1, &info)) {
436 /* Partition with grow tag in parameter will be resized */
437 if ((info.size + info.start + 64) >= dev_desc->lba ||
438 (info.size + info.start - 1) == FACTORY_UNKNOWN_LBA) {
439 if (dev_desc->devnum == BLK_MTD_SPI_NOR) {
440 /* Nor is 64KB erase block(kernel) and gpt table just
441 * resserve 33 sectors for the last partition. This
442 * will erase the backup gpt table by user program,
443 * so reserve one block.
444 */
445 snprintf(mtd_part_info_p, data_len - 1, "0x%x@0x%x(%s)",
446 (int)(size_t)(info.size -
447 (info.size - 1) %
448 (0x10000 >> 9) - 1) << 9,
449 (int)(size_t)info.start << 9,
450 info.name);
451 break;
452 } else {
453 /* Nand flash is erased by block and gpt table just
454 * resserve 33 sectors for the last partition. This
455 * will erase the backup gpt table by user program,
456 * so reserve one block.
457 */
458 snprintf(mtd_part_info_p, data_len - 1, "0x%x@0x%x(%s)",
459 (int)(size_t)(info.size -
460 (info.size - 1) %
461 (mtd->erasesize >> 9) - 1) << 9,
462 (int)(size_t)info.start << 9,
463 info.name);
464 break;
465 }
466 } else {
467 snprintf(mtd_part_info_temp, MTD_SINGLE_PART_INFO_MAX_SIZE - 1,
468 "0x%x@0x%x(%s)",
469 (int)(size_t)info.size << 9,
470 (int)(size_t)info.start << 9,
471 info.name);
472 break;
473 }
474 }
475 length = strlen(mtd_part_info_temp);
476 data_len -= length;
477 mtd_part_info_p = mtd_part_info_p + length + 1;
478 memset(mtd_part_info_temp, 0, MTD_SINGLE_PART_INFO_MAX_SIZE);
479 }
480
481 length = strlen(mtd_part_info);
482 if (length > 0 && mtd_part_info[length - 1] == ',')
483 mtd_part_info[length - 1] = '\0';
484
485 return mtd_part_info;
486 }
487
mtd_dread(struct udevice * udev,lbaint_t start,lbaint_t blkcnt,void * dst)488 ulong mtd_dread(struct udevice *udev, lbaint_t start,
489 lbaint_t blkcnt, void *dst)
490 {
491 struct blk_desc *desc = dev_get_uclass_platdata(udev);
492 #if defined(CONFIG_NAND) || defined(CONFIG_MTD_SPI_NAND) || defined(CONFIG_SPI_FLASH_MTD)
493 loff_t off = (loff_t)(start * 512);
494 size_t rwsize = blkcnt * 512;
495 #endif
496 struct mtd_info *mtd;
497 int ret = 0;
498 #ifdef MTD_BLK_VERBOSE
499 ulong us = 1;
500 #endif
501
502 if (!desc)
503 return ret;
504
505 mtd = desc->bdev->priv;
506 if (!mtd)
507 return 0;
508
509 if (blkcnt == 0)
510 return 0;
511
512 #ifdef MTD_BLK_VERBOSE
513 us = get_ticks();
514 #endif
515 if (desc->devnum == BLK_MTD_NAND) {
516 ret = mtd_map_read(mtd, off, &rwsize,
517 NULL, mtd->size,
518 (u_char *)(dst));
519 if (!ret)
520 ret = blkcnt;
521 } else if (desc->devnum == BLK_MTD_SPI_NAND) {
522 #if defined(CONFIG_MTD_SPI_NAND)
523 struct spinand_device *spinand = mtd_to_spinand(mtd);
524 struct spi_slave *spi = spinand->slave;
525 size_t retlen_nand;
526
527 if (desc->op_flag == BLK_PRE_RW) {
528 spi->mode |= SPI_DMA_PREPARE;
529 ret = mtd_read(mtd, off, rwsize,
530 &retlen_nand, (u_char *)(dst));
531 spi->mode &= ~SPI_DMA_PREPARE;
532 if (retlen_nand == rwsize)
533 ret = blkcnt;
534 } else {
535 if (spinand->support_cont_read)
536 ret = mtd_read(mtd, off, rwsize,
537 &retlen_nand,
538 (u_char *)(dst));
539 else
540 ret = mtd_map_read(mtd, off, &rwsize,
541 NULL, mtd->size,
542 (u_char *)(dst));
543 if (!ret)
544 ret = blkcnt;
545 }
546 #endif
547 } else if (desc->devnum == BLK_MTD_SPI_NOR) {
548 #if defined(CONFIG_SPI_FLASH_MTD) || defined(CONFIG_SPL_BUILD)
549 struct spi_nor *nor = (struct spi_nor *)mtd->priv;
550 struct spi_slave *spi = nor->spi;
551 size_t retlen_nor;
552
553 if (desc->op_flag == BLK_PRE_RW)
554 spi->mode |= SPI_DMA_PREPARE;
555 ret = mtd_read(mtd, off, rwsize, &retlen_nor, dst);
556 if (desc->op_flag == BLK_PRE_RW)
557 spi->mode &= ~SPI_DMA_PREPARE;
558
559 if (retlen_nor == rwsize)
560 ret = blkcnt;
561 #endif
562 }
563 #ifdef MTD_BLK_VERBOSE
564 us = (get_ticks() - us) / (gd->arch.timer_rate_hz / 1000000);
565 pr_err("mtd dread %s %lx %lx cost %ldus: %ldMB/s\n\n", mtd->name, start, blkcnt, us, (blkcnt / 2) / ((us + 999) / 1000));
566 #else
567 pr_debug("mtd dread %s %lx %lx\n\n", mtd->name, start, blkcnt);
568 #endif
569
570 return ret;
571 }
572
573 #if CONFIG_IS_ENABLED(MTD_WRITE)
mtd_dwrite(struct udevice * udev,lbaint_t start,lbaint_t blkcnt,const void * src)574 ulong mtd_dwrite(struct udevice *udev, lbaint_t start,
575 lbaint_t blkcnt, const void *src)
576 {
577 struct blk_desc *desc = dev_get_uclass_platdata(udev);
578 #if defined(CONFIG_NAND) || defined(CONFIG_MTD_SPI_NAND) || defined(CONFIG_SPI_FLASH_MTD)
579 loff_t off = (loff_t)(start * 512);
580 size_t rwsize = blkcnt * 512;
581 #endif
582 struct mtd_info *mtd;
583 int ret = 0;
584
585 if (!desc)
586 return ret;
587
588 mtd = desc->bdev->priv;
589 if (!mtd)
590 return 0;
591
592 pr_debug("mtd dwrite %s %lx %lx\n", mtd->name, start, blkcnt);
593
594 if (blkcnt == 0)
595 return 0;
596
597 if (desc->op_flag & BLK_MTD_CONT_WRITE &&
598 (start == 1 || ((desc->lba - start) <= 33))) {
599 printf("Write in GPT area, lba=%ld cnt=%ld\n", start, blkcnt);
600 desc->op_flag &= ~BLK_MTD_CONT_WRITE;
601 }
602
603 if (desc->devnum == BLK_MTD_NAND ||
604 desc->devnum == BLK_MTD_SPI_NAND ||
605 desc->devnum == BLK_MTD_SPI_NOR) {
606 if (desc->op_flag & BLK_MTD_CONT_WRITE) {
607 ret = mtd_map_write(mtd, off, &rwsize,
608 NULL, mtd->size,
609 (u_char *)(src), 0);
610 if (!ret)
611 return blkcnt;
612 else
613 return 0;
614 } else {
615 lbaint_t off_aligned, alinged;
616 size_t rwsize_aligned;
617 u8 *p_buf;
618
619 alinged = off & mtd->erasesize_mask;
620 off_aligned = off - alinged;
621 rwsize_aligned = rwsize + alinged;
622 rwsize_aligned = (rwsize_aligned + mtd->erasesize - 1) &
623 ~(mtd->erasesize - 1);
624
625 p_buf = malloc(rwsize_aligned);
626 if (!p_buf) {
627 printf("%s: Fail to malloc!", __func__);
628 return 0;
629 }
630
631 ret = mtd_map_read(mtd, off_aligned, &rwsize_aligned,
632 NULL, mtd->size,
633 (u_char *)(p_buf));
634 if (ret) {
635 free(p_buf);
636 return 0;
637 }
638
639 memcpy(p_buf + alinged, src, rwsize);
640
641 ret = mtd_map_write(mtd, off_aligned, &rwsize_aligned,
642 NULL, mtd->size,
643 (u_char *)(p_buf), 0);
644 free(p_buf);
645 if (!ret)
646 return blkcnt;
647 else
648 return 0;
649 }
650 } else {
651 return 0;
652 }
653
654 return 0;
655 }
656
mtd_derase(struct udevice * udev,lbaint_t start,lbaint_t blkcnt)657 ulong mtd_derase(struct udevice *udev, lbaint_t start,
658 lbaint_t blkcnt)
659 {
660 struct blk_desc *desc = dev_get_uclass_platdata(udev);
661 #if defined(CONFIG_NAND) || defined(CONFIG_MTD_SPI_NAND) || defined(CONFIG_SPI_FLASH_MTD)
662 loff_t off = (loff_t)(start * 512);
663 size_t len = blkcnt * 512;
664 #endif
665 struct mtd_info *mtd;
666 int ret = 0;
667
668 if (!desc)
669 return ret;
670
671 mtd = desc->bdev->priv;
672 if (!mtd)
673 return 0;
674
675 pr_debug("mtd derase %s %lx %lx\n", mtd->name, start, blkcnt);
676 len = round_up(len, mtd->erasesize);
677
678 if (blkcnt == 0)
679 return 0;
680
681 if (desc->devnum == BLK_MTD_NAND ||
682 desc->devnum == BLK_MTD_SPI_NAND ||
683 desc->devnum == BLK_MTD_SPI_NOR) {
684 ret = mtd_map_erase(mtd, off, len);
685 if (ret)
686 return ret;
687 } else {
688 return 0;
689 }
690
691 return blkcnt;
692 }
693 #endif
694
mtd_blk_probe(struct udevice * udev)695 static int mtd_blk_probe(struct udevice *udev)
696 {
697 struct mtd_info *mtd;
698 struct blk_desc *desc = dev_get_uclass_platdata(udev);
699 int ret, i = 0;
700
701 mtd = dev_get_uclass_priv(udev->parent);
702 if (mtd->type == MTD_NANDFLASH && desc->devnum == BLK_MTD_NAND) {
703 #ifndef CONFIG_SPL_BUILD
704 mtd = dev_get_priv(udev->parent);
705 #endif
706 }
707
708 /* Fill mtd devices information */
709 if (is_power_of_2(mtd->erasesize))
710 mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
711 else
712 mtd->erasesize_shift = 0;
713
714 if (is_power_of_2(mtd->writesize))
715 mtd->writesize_shift = ffs(mtd->writesize) - 1;
716 else
717 mtd->writesize_shift = 0;
718
719 mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
720 mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
721
722 desc->bdev->priv = mtd;
723 sprintf(desc->vendor, "0x%.4x", 0x2207);
724 if (strncmp(mtd->name, "nand", 4) == 0)
725 memcpy(desc->product, "rk-nand", strlen("rk-nand"));
726 else
727 memcpy(desc->product, mtd->name, strlen(mtd->name));
728 memcpy(desc->revision, "V1.00", sizeof("V1.00"));
729 if (mtd->type == MTD_NANDFLASH) {
730 #ifdef CONFIG_NAND
731 if (desc->devnum == BLK_MTD_NAND)
732 i = NAND_BBT_SCAN_MAXBLOCKS;
733 #endif
734 #ifdef CONFIG_MTD_SPI_NAND
735 if (desc->devnum == BLK_MTD_SPI_NAND)
736 i = NANDDEV_BBT_SCAN_MAXBLOCKS;
737 #endif
738
739 /*
740 * Find the first useful block in the end,
741 * and it is the end lba of the nand storage.
742 */
743 for (; i < (mtd->size / mtd->erasesize); i++) {
744 ret = mtd_block_isbad(mtd,
745 mtd->size - mtd->erasesize * (i + 1));
746 if (!ret) {
747 desc->lba = (mtd->size >> 9) -
748 (mtd->erasesize >> 9) * i;
749 desc->rawlba = desc->lba;
750 break;
751 }
752 }
753 } else {
754 desc->lba = mtd->size >> 9;
755 }
756
757 debug("MTD: desc->lba is %lx\n", desc->lba);
758
759 return 0;
760 }
761
762 static const struct blk_ops mtd_blk_ops = {
763 .read = mtd_dread,
764 #if CONFIG_IS_ENABLED(MTD_WRITE)
765 .write = mtd_dwrite,
766 .erase = mtd_derase,
767 #endif
768 };
769
770 U_BOOT_DRIVER(mtd_blk) = {
771 .name = "mtd_blk",
772 .id = UCLASS_BLK,
773 .ops = &mtd_blk_ops,
774 .probe = mtd_blk_probe,
775 };
776