xref: /rk3399_rockchip-uboot/drivers/mtd/mtd_blk.c (revision 2f6c020d95ebda22b28d3a31f574ec547a9281fb)
1 /*
2  * (C) Copyright 2019 Rockchip Electronics Co., Ltd
3  *
4  * SPDX-License-Identifier:	GPL-2.0+
5  */
6 
7 #include <common.h>
8 #include <blk.h>
9 #include <boot_rkimg.h>
10 #include <dm.h>
11 #include <errno.h>
12 #include <image.h>
13 #include <linux/log2.h>
14 #include <malloc.h>
15 #include <nand.h>
16 #include <part.h>
17 #include <spi.h>
18 #include <dm/device-internal.h>
19 #include <linux/mtd/spinand.h>
20 #include <linux/mtd/spi-nor.h>
21 #ifdef CONFIG_NAND
22 #include <linux/mtd/nand.h>
23 #endif
24 
25 // #define MTD_BLK_VERBOSE
26 
27 #define MTD_PART_NAND_HEAD		"mtdparts="
28 #define MTD_PART_INFO_MAX_SIZE		512
29 #define MTD_SINGLE_PART_INFO_MAX_SIZE	40
30 
31 #define MTD_BLK_TABLE_BLOCK_UNKNOWN	(-2)
32 #define MTD_BLK_TABLE_BLOCK_SHIFT	(-1)
33 
34 static int *mtd_map_blk_table;
35 
36 #if CONFIG_IS_ENABLED(SUPPORT_USBPLUG)
37 static loff_t usbplug_dummy_partition_write_last_addr;
38 static loff_t usbplug_dummy_partition_write_seek;
39 static loff_t usbplug_dummy_partition_read_last_addr;
40 static loff_t usbplug_dummy_partition_read_seek;
41 #endif
42 
43 int mtd_blk_map_table_init(struct blk_desc *desc,
44 			   loff_t offset,
45 			   size_t length)
46 {
47 	u32 blk_total, blk_begin, blk_cnt;
48 	struct mtd_info *mtd = NULL;
49 	int i, j;
50 
51 	if (!desc)
52 		return -ENODEV;
53 
54 	switch (desc->devnum) {
55 	case BLK_MTD_NAND:
56 	case BLK_MTD_SPI_NAND:
57 		mtd = desc->bdev->priv;
58 		break;
59 	default:
60 		break;
61 	}
62 
63 	if (!mtd) {
64 		return -ENODEV;
65 	} else {
66 		blk_total = (mtd->size + mtd->erasesize - 1) >> mtd->erasesize_shift;
67 		if (!mtd_map_blk_table) {
68 			mtd_map_blk_table = (int *)malloc(blk_total * sizeof(int));
69 			if (!mtd_map_blk_table)
70 				return -ENOMEM;
71 			for (i = 0; i < blk_total; i++)
72 				mtd_map_blk_table[i] = MTD_BLK_TABLE_BLOCK_UNKNOWN;
73 		}
74 
75 		blk_begin = (u32)offset >> mtd->erasesize_shift;
76 		blk_cnt = ((u32)((offset & mtd->erasesize_mask) + length + \
77 			mtd->erasesize - 1) >> mtd->erasesize_shift);
78 		if (blk_begin >= blk_total) {
79 			pr_err("map table blk begin[%d] overflow\n", blk_begin);
80 			return -EINVAL;
81 		}
82 		if ((blk_begin + blk_cnt) > blk_total)
83 			blk_cnt = blk_total - blk_begin;
84 
85 		if (mtd_map_blk_table[blk_begin] != MTD_BLK_TABLE_BLOCK_UNKNOWN)
86 			return 0;
87 
88 		j = 0;
89 		 /* should not across blk_cnt */
90 		for (i = 0; i < blk_cnt; i++) {
91 			if (j >= blk_cnt)
92 				mtd_map_blk_table[blk_begin + i] = MTD_BLK_TABLE_BLOCK_SHIFT;
93 			for (; j < blk_cnt; j++) {
94 				if (!mtd_block_isbad(mtd, (blk_begin + j) << mtd->erasesize_shift)) {
95 					mtd_map_blk_table[blk_begin + i] = blk_begin + j;
96 					j++;
97 					if (j == blk_cnt)
98 						j++;
99 					break;
100 				}
101 			}
102 		}
103 
104 		return 0;
105 	}
106 }
107 
108 static bool get_mtd_blk_map_address(struct mtd_info *mtd, loff_t *off)
109 {
110 	bool mapped;
111 	loff_t offset = *off;
112 	size_t block_offset = offset & (mtd->erasesize - 1);
113 
114 	mapped = false;
115 	if (!mtd_map_blk_table ||
116 	    mtd_map_blk_table[(u64)offset >> mtd->erasesize_shift] ==
117 	    MTD_BLK_TABLE_BLOCK_UNKNOWN ||
118 	    mtd_map_blk_table[(u64)offset >> mtd->erasesize_shift] ==
119 	    0xffffffff)
120 		return mapped;
121 
122 	mapped = true;
123 	*off = (loff_t)(((u32)mtd_map_blk_table[(u64)offset >>
124 		mtd->erasesize_shift] << mtd->erasesize_shift) + block_offset);
125 
126 	return mapped;
127 }
128 
129 void mtd_blk_map_partitions(struct blk_desc *desc)
130 {
131 	disk_partition_t info;
132 	int i, ret;
133 
134 	if (!desc)
135 		return;
136 
137 	if (desc->if_type != IF_TYPE_MTD)
138 		return;
139 
140 	for (i = 1; i < MAX_SEARCH_PARTITIONS; i++) {
141 		ret = part_get_info(desc, i, &info);
142 		if (ret != 0)
143 			break;
144 
145 		if (mtd_blk_map_table_init(desc,
146 					   info.start << 9,
147 					   info.size << 9)) {
148 			pr_debug("mtd block map table fail\n");
149 		}
150 	}
151 }
152 
153 void mtd_blk_map_fit(struct blk_desc *desc, ulong sector, void *fit)
154 {
155 	struct mtd_info *mtd = NULL;
156 	int totalsize = 0;
157 
158 	if (desc->if_type != IF_TYPE_MTD)
159 		return;
160 
161 	if (desc->devnum == BLK_MTD_NAND) {
162 #if defined(CONFIG_NAND)
163 		mtd = dev_get_priv(desc->bdev->parent);
164 #endif
165 	} else if (desc->devnum == BLK_MTD_SPI_NAND) {
166 #if defined(CONFIG_MTD_SPI_NAND)
167 		mtd = desc->bdev->priv;
168 #endif
169 	}
170 
171 #ifdef CONFIG_SPL_FIT
172 	if (fit_get_totalsize(fit, &totalsize))
173 		debug("Can not find /totalsize node.\n");
174 #endif
175 	if (mtd && totalsize) {
176 		if (mtd_blk_map_table_init(desc, sector << 9, totalsize + (size_t)mtd->erasesize))
177 			debug("Map block table fail.\n");
178 	}
179 }
180 
181 static __maybe_unused int mtd_map_read(struct mtd_info *mtd, loff_t offset,
182 				       size_t *length, size_t *actual,
183 				       loff_t lim, u_char *buffer)
184 {
185 	size_t left_to_read = *length;
186 	u_char *p_buffer = buffer;
187 	int rval;
188 
189 #if CONFIG_IS_ENABLED(SUPPORT_USBPLUG)
190 	if (usbplug_dummy_partition_read_last_addr != offset)
191 		usbplug_dummy_partition_read_seek = 0;
192 	usbplug_dummy_partition_read_last_addr = offset + left_to_read;
193 	offset += usbplug_dummy_partition_read_seek;
194 #endif
195 
196 	while (left_to_read > 0) {
197 		size_t block_offset = offset & (mtd->erasesize - 1);
198 		size_t read_length;
199 		loff_t mapped_offset;
200 
201 		if (offset >= mtd->size)
202 			return 0;
203 
204 		mapped_offset = offset;
205 		if (!get_mtd_blk_map_address(mtd, &mapped_offset)) {
206 			if (mtd_block_isbad(mtd, mapped_offset &
207 					    ~(mtd->erasesize - 1))) {
208 				printf("Skipping bad block 0x%08x in read\n",
209 				       (u32)(offset & ~(mtd->erasesize - 1)));
210 				offset += mtd->erasesize - block_offset;
211 #if CONFIG_IS_ENABLED(SUPPORT_USBPLUG)
212 				usbplug_dummy_partition_read_seek += mtd->erasesize;
213 #endif
214 				continue;
215 			}
216 		}
217 
218 		if (left_to_read < (mtd->erasesize - block_offset))
219 			read_length = left_to_read;
220 		else
221 			read_length = mtd->erasesize - block_offset;
222 
223 		rval = mtd_read(mtd, mapped_offset, read_length, &read_length,
224 				p_buffer);
225 		if (rval && rval != -EUCLEAN) {
226 			printf("NAND read from offset %x failed %d\n",
227 			       (u32)offset, rval);
228 			*length -= left_to_read;
229 			return rval;
230 		}
231 
232 		left_to_read -= read_length;
233 		offset       += read_length;
234 		p_buffer     += read_length;
235 	}
236 
237 	return 0;
238 }
239 
240 static __maybe_unused int mtd_map_write(struct mtd_info *mtd, loff_t offset,
241 					size_t *length, size_t *actual,
242 					loff_t lim, u_char *buffer, int flags)
243 {
244 	int rval = 0, blocksize;
245 	size_t left_to_write = *length;
246 	u_char *p_buffer = buffer;
247 	struct erase_info ei;
248 
249 	blocksize = mtd->erasesize;
250 
251 #if CONFIG_IS_ENABLED(SUPPORT_USBPLUG)
252 	if (usbplug_dummy_partition_write_last_addr != offset)
253 		usbplug_dummy_partition_write_seek = 0;
254 	usbplug_dummy_partition_write_last_addr = offset + left_to_write;
255 	offset += usbplug_dummy_partition_write_seek;
256 #endif
257 
258 	/*
259 	 * nand_write() handles unaligned, partial page writes.
260 	 *
261 	 * We allow length to be unaligned, for convenience in
262 	 * using the $filesize variable.
263 	 *
264 	 * However, starting at an unaligned offset makes the
265 	 * semantics of bad block skipping ambiguous (really,
266 	 * you should only start a block skipping access at a
267 	 * partition boundary).  So don't try to handle that.
268 	 */
269 	if ((offset & (mtd->writesize - 1)) != 0) {
270 		printf("Attempt to write non page-aligned data\n");
271 		*length = 0;
272 		return -EINVAL;
273 	}
274 
275 	while (left_to_write > 0) {
276 		size_t block_offset = offset & (mtd->erasesize - 1);
277 		size_t write_size, truncated_write_size;
278 		loff_t mapped_offset;
279 
280 		if (offset >= mtd->size)
281 			return 0;
282 
283 		mapped_offset = offset;
284 		if (!get_mtd_blk_map_address(mtd, &mapped_offset)) {
285 			if (mtd_block_isbad(mtd, mapped_offset &
286 					    ~(mtd->erasesize - 1))) {
287 				printf("Skipping bad block 0x%08x in write\n",
288 				       (u32)(offset & ~(mtd->erasesize - 1)));
289 				offset += mtd->erasesize - block_offset;
290 #if CONFIG_IS_ENABLED(SUPPORT_USBPLUG)
291 				usbplug_dummy_partition_write_seek += mtd->erasesize;
292 #endif
293 				continue;
294 			}
295 		}
296 
297 		if (!(mapped_offset & mtd->erasesize_mask)) {
298 			memset(&ei, 0, sizeof(struct erase_info));
299 			ei.addr = mapped_offset;
300 			ei.len  = mtd->erasesize;
301 			rval = mtd_erase(mtd, &ei);
302 			if (rval) {
303 				pr_info("error %d while erasing %llx\n", rval,
304 					mapped_offset);
305 				return rval;
306 			}
307 		}
308 
309 		if (left_to_write < (blocksize - block_offset))
310 			write_size = left_to_write;
311 		else
312 			write_size = blocksize - block_offset;
313 
314 		truncated_write_size = write_size;
315 		rval = mtd_write(mtd, mapped_offset, truncated_write_size,
316 				 (size_t *)(&truncated_write_size), p_buffer);
317 
318 		offset += write_size;
319 		p_buffer += write_size;
320 
321 		if (rval != 0) {
322 			printf("NAND write to offset %llx failed %d\n",
323 			       offset, rval);
324 			*length -= left_to_write;
325 			return rval;
326 		}
327 
328 		left_to_write -= write_size;
329 	}
330 
331 	return 0;
332 }
333 
334 static __maybe_unused int mtd_map_erase(struct mtd_info *mtd, loff_t offset,
335 					size_t length)
336 {
337 	struct erase_info ei;
338 	loff_t pos, len;
339 	int ret;
340 
341 	pos = offset;
342 	len = length;
343 
344 	if ((pos & mtd->erasesize_mask) || (len & mtd->erasesize_mask)) {
345 		pr_err("Attempt to erase non block-aligned data, pos= %llx, len= %llx\n",
346 		       pos, len);
347 
348 		return -EINVAL;
349 	}
350 
351 	while (len) {
352 		loff_t mapped_offset;
353 
354 		mapped_offset = pos;
355 		if (!get_mtd_blk_map_address(mtd, &mapped_offset)) {
356 			if (mtd_block_isbad(mtd, pos) || mtd_block_isreserved(mtd, pos)) {
357 				pr_debug("attempt to erase a bad/reserved block @%llx\n",
358 					 pos);
359 				pos += mtd->erasesize;
360 				continue;
361 			}
362 		}
363 
364 		memset(&ei, 0, sizeof(struct erase_info));
365 		ei.addr = mapped_offset;
366 		ei.len  = mtd->erasesize;
367 		ret = mtd_erase(mtd, &ei);
368 		if (ret) {
369 			pr_err("map_erase error %d while erasing %llx\n", ret,
370 			       pos);
371 			return ret;
372 		}
373 
374 		pos += mtd->erasesize;
375 		len -= mtd->erasesize;
376 	}
377 
378 	return 0;
379 }
380 
381 char *mtd_part_parse(struct blk_desc *dev_desc)
382 {
383 	char mtd_part_info_temp[MTD_SINGLE_PART_INFO_MAX_SIZE] = {0};
384 	u32 length, data_len = MTD_PART_INFO_MAX_SIZE;
385 	disk_partition_t info;
386 	char *mtd_part_info_p;
387 	struct mtd_info *mtd;
388 	char *mtd_part_info;
389 	int ret;
390 	int p;
391 
392 #ifndef CONFIG_SPL_BUILD
393 	dev_desc = rockchip_get_bootdev();
394 #endif
395 	if (!dev_desc)
396 		return NULL;
397 
398 	mtd = (struct mtd_info *)dev_desc->bdev->priv;
399 	if (!mtd)
400 		return NULL;
401 
402 	mtd_part_info = (char *)calloc(MTD_PART_INFO_MAX_SIZE, sizeof(char));
403 	if (!mtd_part_info) {
404 		printf("%s: Fail to malloc!", __func__);
405 		return NULL;
406 	}
407 
408 	mtd_part_info_p = mtd_part_info;
409 	snprintf(mtd_part_info_p, data_len - 1, "%s%s:",
410 		 MTD_PART_NAND_HEAD,
411 		 dev_desc->product);
412 	data_len -= strlen(mtd_part_info_p);
413 	mtd_part_info_p = mtd_part_info_p + strlen(mtd_part_info_p);
414 
415 	for (p = 1; p < MAX_SEARCH_PARTITIONS; p++) {
416 		ret = part_get_info(dev_desc, p, &info);
417 		if (ret)
418 			break;
419 
420 		debug("name is %s, start addr is %x\n", info.name,
421 		      (int)(size_t)info.start);
422 
423 		snprintf(mtd_part_info_p, data_len - 1, "0x%x@0x%x(%s)",
424 			 (int)(size_t)info.size << 9,
425 			 (int)(size_t)info.start << 9,
426 			 info.name);
427 		snprintf(mtd_part_info_temp, MTD_SINGLE_PART_INFO_MAX_SIZE - 1,
428 			 "0x%x@0x%x(%s)",
429 			 (int)(size_t)info.size << 9,
430 			 (int)(size_t)info.start << 9,
431 			 info.name);
432 		strcat(mtd_part_info, ",");
433 		if (part_get_info(dev_desc, p + 1, &info)) {
434 			/* Partition with grow tag in parameter will be resized */
435 			if ((info.size + info.start + 64) >= dev_desc->lba) {
436 				if (dev_desc->devnum == BLK_MTD_SPI_NOR) {
437 					/* Nor is 64KB erase block(kernel) and gpt table just
438 					 * resserve 33 sectors for the last partition. This
439 					 * will erase the backup gpt table by user program,
440 					 * so reserve one block.
441 					 */
442 					snprintf(mtd_part_info_p, data_len - 1, "0x%x@0x%x(%s)",
443 						 (int)(size_t)(info.size -
444 						 (info.size - 1) %
445 						 (0x10000 >> 9) - 1) << 9,
446 						 (int)(size_t)info.start << 9,
447 						 info.name);
448 					break;
449 				} else {
450 					/* Nand flash is erased by block and gpt table just
451 					 * resserve 33 sectors for the last partition. This
452 					 * will erase the backup gpt table by user program,
453 					 * so reserve one block.
454 					 */
455 					snprintf(mtd_part_info_p, data_len - 1, "0x%x@0x%x(%s)",
456 						 (int)(size_t)(info.size -
457 						 (info.size - 1) %
458 						 (mtd->erasesize >> 9) - 1) << 9,
459 						 (int)(size_t)info.start << 9,
460 						 info.name);
461 					break;
462 				}
463 			} else {
464 				snprintf(mtd_part_info_temp, MTD_SINGLE_PART_INFO_MAX_SIZE - 1,
465 					 "0x%x@0x%x(%s)",
466 					 (int)(size_t)info.size << 9,
467 					 (int)(size_t)info.start << 9,
468 					 info.name);
469 				break;
470 			}
471 		}
472 		length = strlen(mtd_part_info_temp);
473 		data_len -= length;
474 		mtd_part_info_p = mtd_part_info_p + length + 1;
475 		memset(mtd_part_info_temp, 0, MTD_SINGLE_PART_INFO_MAX_SIZE);
476 	}
477 
478 	return mtd_part_info;
479 }
480 
481 ulong mtd_dread(struct udevice *udev, lbaint_t start,
482 		lbaint_t blkcnt, void *dst)
483 {
484 	struct blk_desc *desc = dev_get_uclass_platdata(udev);
485 #if defined(CONFIG_NAND) || defined(CONFIG_MTD_SPI_NAND) || defined(CONFIG_SPI_FLASH_MTD)
486 	loff_t off = (loff_t)(start * 512);
487 	size_t rwsize = blkcnt * 512;
488 #endif
489 	struct mtd_info *mtd;
490 	int ret = 0;
491 #ifdef MTD_BLK_VERBOSE
492 	ulong us = 1;
493 #endif
494 
495 	if (!desc)
496 		return ret;
497 
498 	mtd = desc->bdev->priv;
499 	if (!mtd)
500 		return 0;
501 
502 	if (blkcnt == 0)
503 		return 0;
504 
505 #ifdef MTD_BLK_VERBOSE
506 	us = get_ticks();
507 #endif
508 	if (desc->devnum == BLK_MTD_NAND) {
509 		ret = mtd_map_read(mtd, off, &rwsize,
510 				   NULL, mtd->size,
511 				   (u_char *)(dst));
512 		if (!ret)
513 			ret = blkcnt;
514 	} else if (desc->devnum == BLK_MTD_SPI_NAND) {
515 #if defined(CONFIG_MTD_SPI_NAND)
516 		struct spinand_device *spinand = mtd_to_spinand(mtd);
517 		struct spi_slave *spi = spinand->slave;
518 		size_t retlen_nand;
519 
520 		if (desc->op_flag == BLK_PRE_RW) {
521 			spi->mode |= SPI_DMA_PREPARE;
522 			ret = mtd_read(mtd, off, rwsize,
523 				       &retlen_nand, (u_char *)(dst));
524 			spi->mode &= ~SPI_DMA_PREPARE;
525 			if (retlen_nand == rwsize)
526 				ret = blkcnt;
527 		} else {
528 			if (spinand->support_cont_read)
529 				ret = mtd_read(mtd, off, rwsize,
530 					       &retlen_nand,
531 					       (u_char *)(dst));
532 			else
533 				ret = mtd_map_read(mtd, off, &rwsize,
534 						   NULL, mtd->size,
535 						   (u_char *)(dst));
536 			if (!ret)
537 				ret = blkcnt;
538 		}
539 #endif
540 	} else if (desc->devnum == BLK_MTD_SPI_NOR) {
541 #if defined(CONFIG_SPI_FLASH_MTD) || defined(CONFIG_SPL_BUILD)
542 		struct spi_nor *nor = (struct spi_nor *)mtd->priv;
543 		struct spi_slave *spi = nor->spi;
544 		size_t retlen_nor;
545 
546 		if (desc->op_flag == BLK_PRE_RW)
547 			spi->mode |= SPI_DMA_PREPARE;
548 		ret = mtd_read(mtd, off, rwsize, &retlen_nor, dst);
549 		if (desc->op_flag == BLK_PRE_RW)
550 			spi->mode &= ~SPI_DMA_PREPARE;
551 
552 		if (retlen_nor == rwsize)
553 			ret = blkcnt;
554 #endif
555 	}
556 #ifdef MTD_BLK_VERBOSE
557 	us = (get_ticks() - us) / 24UL;
558 	pr_err("mtd dread %s %lx %lx cost %ldus: %ldMB/s\n\n", mtd->name, start, blkcnt, us, (blkcnt / 2) / ((us + 999) / 1000));
559 #else
560 	pr_debug("mtd dread %s %lx %lx\n\n", mtd->name, start, blkcnt);
561 #endif
562 
563 	return ret;
564 }
565 
566 #if CONFIG_IS_ENABLED(MTD_WRITE)
567 ulong mtd_dwrite(struct udevice *udev, lbaint_t start,
568 		 lbaint_t blkcnt, const void *src)
569 {
570 	struct blk_desc *desc = dev_get_uclass_platdata(udev);
571 #if defined(CONFIG_NAND) || defined(CONFIG_MTD_SPI_NAND) || defined(CONFIG_SPI_FLASH_MTD)
572 	loff_t off = (loff_t)(start * 512);
573 	size_t rwsize = blkcnt * 512;
574 #endif
575 	struct mtd_info *mtd;
576 	int ret = 0;
577 
578 	if (!desc)
579 		return ret;
580 
581 	mtd = desc->bdev->priv;
582 	if (!mtd)
583 		return 0;
584 
585 	pr_debug("mtd dwrite %s %lx %lx\n", mtd->name, start, blkcnt);
586 
587 	if (blkcnt == 0)
588 		return 0;
589 
590 	if (desc->op_flag & BLK_MTD_CONT_WRITE &&
591 	    (start == 1 || ((desc->lba - start) <= 33))) {
592 		printf("Write in GPT area, lba=%ld cnt=%ld\n", start, blkcnt);
593 		desc->op_flag &= ~BLK_MTD_CONT_WRITE;
594 	}
595 
596 	if (desc->devnum == BLK_MTD_NAND ||
597 	    desc->devnum == BLK_MTD_SPI_NAND ||
598 	    desc->devnum == BLK_MTD_SPI_NOR) {
599 		if (desc->op_flag & BLK_MTD_CONT_WRITE) {
600 			ret = mtd_map_write(mtd, off, &rwsize,
601 					    NULL, mtd->size,
602 					    (u_char *)(src), 0);
603 			if (!ret)
604 				return blkcnt;
605 			else
606 				return 0;
607 		} else {
608 			lbaint_t off_aligned, alinged;
609 			size_t rwsize_aligned;
610 			u8 *p_buf;
611 
612 			alinged = off & mtd->erasesize_mask;
613 			off_aligned = off - alinged;
614 			rwsize_aligned = rwsize + alinged;
615 			rwsize_aligned = (rwsize_aligned + mtd->erasesize - 1) &
616 				~(mtd->erasesize - 1);
617 
618 			p_buf = malloc(rwsize_aligned);
619 			if (!p_buf) {
620 				printf("%s: Fail to malloc!", __func__);
621 				return 0;
622 			}
623 
624 			ret = mtd_map_read(mtd, off_aligned, &rwsize_aligned,
625 					   NULL, mtd->size,
626 					   (u_char *)(p_buf));
627 			if (ret) {
628 				free(p_buf);
629 				return 0;
630 			}
631 
632 			memcpy(p_buf + alinged, src, rwsize);
633 
634 			ret = mtd_map_write(mtd, off_aligned, &rwsize_aligned,
635 					    NULL, mtd->size,
636 					    (u_char *)(p_buf), 0);
637 			free(p_buf);
638 			if (!ret)
639 				return blkcnt;
640 			else
641 				return 0;
642 		}
643 	} else {
644 		return 0;
645 	}
646 
647 	return 0;
648 }
649 
650 ulong mtd_derase(struct udevice *udev, lbaint_t start,
651 		 lbaint_t blkcnt)
652 {
653 	struct blk_desc *desc = dev_get_uclass_platdata(udev);
654 #if defined(CONFIG_NAND) || defined(CONFIG_MTD_SPI_NAND) || defined(CONFIG_SPI_FLASH_MTD)
655 	loff_t off = (loff_t)(start * 512);
656 	size_t len = blkcnt * 512;
657 #endif
658 	struct mtd_info *mtd;
659 	int ret = 0;
660 
661 	if (!desc)
662 		return ret;
663 
664 	mtd = desc->bdev->priv;
665 	if (!mtd)
666 		return 0;
667 
668 	pr_debug("mtd derase %s %lx %lx\n", mtd->name, start, blkcnt);
669 
670 	if (blkcnt == 0)
671 		return 0;
672 
673 	if (desc->devnum == BLK_MTD_NAND ||
674 	    desc->devnum == BLK_MTD_SPI_NAND ||
675 	    desc->devnum == BLK_MTD_SPI_NOR) {
676 		ret = mtd_map_erase(mtd, off, len);
677 		if (ret)
678 			return ret;
679 	} else {
680 		return 0;
681 	}
682 
683 	return blkcnt;
684 }
685 #endif
686 
687 static int mtd_blk_probe(struct udevice *udev)
688 {
689 	struct mtd_info *mtd;
690 	struct blk_desc *desc = dev_get_uclass_platdata(udev);
691 	int ret, i = 0;
692 
693 	mtd = dev_get_uclass_priv(udev->parent);
694 	if (mtd->type == MTD_NANDFLASH && desc->devnum == BLK_MTD_NAND) {
695 #ifndef CONFIG_SPL_BUILD
696 		mtd = dev_get_priv(udev->parent);
697 #endif
698 	}
699 
700 	/* Fill mtd devices information */
701 	if (is_power_of_2(mtd->erasesize))
702 		mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
703 	else
704 		mtd->erasesize_shift = 0;
705 
706 	if (is_power_of_2(mtd->writesize))
707 		mtd->writesize_shift = ffs(mtd->writesize) - 1;
708 	else
709 		mtd->writesize_shift = 0;
710 
711 	mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
712 	mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
713 
714 	desc->bdev->priv = mtd;
715 	sprintf(desc->vendor, "0x%.4x", 0x2207);
716 	if (strncmp(mtd->name, "nand", 4) == 0)
717 		memcpy(desc->product, "rk-nand", strlen("rk-nand"));
718 	else
719 		memcpy(desc->product, mtd->name, strlen(mtd->name));
720 	memcpy(desc->revision, "V1.00", sizeof("V1.00"));
721 	if (mtd->type == MTD_NANDFLASH) {
722 #ifdef CONFIG_NAND
723 		if (desc->devnum == BLK_MTD_NAND)
724 			i = NAND_BBT_SCAN_MAXBLOCKS;
725 		else if (desc->devnum == BLK_MTD_SPI_NAND)
726 			i = NANDDEV_BBT_SCAN_MAXBLOCKS;
727 #endif
728 
729 		/*
730 		 * Find the first useful block in the end,
731 		 * and it is the end lba of the nand storage.
732 		 */
733 		for (; i < (mtd->size / mtd->erasesize); i++) {
734 			ret =  mtd_block_isbad(mtd,
735 					       mtd->size - mtd->erasesize * (i + 1));
736 			if (!ret) {
737 				desc->lba = (mtd->size >> 9) -
738 					(mtd->erasesize >> 9) * i;
739 				break;
740 			}
741 		}
742 	} else {
743 		desc->lba = mtd->size >> 9;
744 	}
745 
746 	debug("MTD: desc->lba is %lx\n", desc->lba);
747 
748 	return 0;
749 }
750 
751 static const struct blk_ops mtd_blk_ops = {
752 	.read	= mtd_dread,
753 #if CONFIG_IS_ENABLED(MTD_WRITE)
754 	.write	= mtd_dwrite,
755 	.erase	= mtd_derase,
756 #endif
757 };
758 
759 U_BOOT_DRIVER(mtd_blk) = {
760 	.name		= "mtd_blk",
761 	.id		= UCLASS_BLK,
762 	.ops		= &mtd_blk_ops,
763 	.probe		= mtd_blk_probe,
764 };
765