xref: /rk3399_rockchip-uboot/drivers/mtd/mtd_blk.c (revision ed2791773ea1539f48aa90110f5d207006db818a)
1 /*
2  * (C) Copyright 2019 Rockchip Electronics Co., Ltd
3  *
4  * SPDX-License-Identifier:	GPL-2.0+
5  */
6 
7 #include <common.h>
8 #include <blk.h>
9 #include <boot_rkimg.h>
10 #include <dm.h>
11 #include <errno.h>
12 #include <image.h>
13 #include <linux/log2.h>
14 #include <malloc.h>
15 #include <nand.h>
16 #include <part.h>
17 #include <spi.h>
18 #include <dm/device-internal.h>
19 #include <linux/mtd/spi-nor.h>
20 #ifdef CONFIG_NAND
21 #include <linux/mtd/nand.h>
22 #endif
23 
24 // #define MTD_BLK_VERBOSE
25 
26 #define MTD_PART_NAND_HEAD		"mtdparts="
27 #define MTD_PART_INFO_MAX_SIZE		512
28 #define MTD_SINGLE_PART_INFO_MAX_SIZE	40
29 
30 #define MTD_BLK_TABLE_BLOCK_UNKNOWN	(-2)
31 #define MTD_BLK_TABLE_BLOCK_SHIFT	(-1)
32 
33 static int *mtd_map_blk_table;
34 
35 int mtd_blk_map_table_init(struct blk_desc *desc,
36 			   loff_t offset,
37 			   size_t length)
38 {
39 	u32 blk_total, blk_begin, blk_cnt;
40 	struct mtd_info *mtd = NULL;
41 	int i, j;
42 
43 	if (!desc)
44 		return -ENODEV;
45 
46 	switch (desc->devnum) {
47 	case BLK_MTD_NAND:
48 	case BLK_MTD_SPI_NAND:
49 		mtd = desc->bdev->priv;
50 		break;
51 	default:
52 		break;
53 	}
54 
55 	if (!mtd) {
56 		return -ENODEV;
57 	} else {
58 		blk_total = (mtd->size + mtd->erasesize - 1) >> mtd->erasesize_shift;
59 		if (!mtd_map_blk_table) {
60 			mtd_map_blk_table = (int *)malloc(blk_total * sizeof(int));
61 			if (!mtd_map_blk_table)
62 				return -ENOMEM;
63 			for (i = 0; i < blk_total; i++)
64 				mtd_map_blk_table[i] = MTD_BLK_TABLE_BLOCK_UNKNOWN;
65 		}
66 
67 		blk_begin = (u32)offset >> mtd->erasesize_shift;
68 		blk_cnt = ((u32)((offset & mtd->erasesize_mask) + length + \
69 			mtd->erasesize - 1) >> mtd->erasesize_shift);
70 		if (blk_begin >= blk_total) {
71 			pr_err("map table blk begin[%d] overflow\n", blk_begin);
72 			return -EINVAL;
73 		}
74 		if ((blk_begin + blk_cnt) > blk_total)
75 			blk_cnt = blk_total - blk_begin;
76 
77 		if (mtd_map_blk_table[blk_begin] != MTD_BLK_TABLE_BLOCK_UNKNOWN)
78 			return 0;
79 
80 		j = 0;
81 		 /* should not across blk_cnt */
82 		for (i = 0; i < blk_cnt; i++) {
83 			if (j >= blk_cnt)
84 				mtd_map_blk_table[blk_begin + i] = MTD_BLK_TABLE_BLOCK_SHIFT;
85 			for (; j < blk_cnt; j++) {
86 				if (!mtd_block_isbad(mtd, (blk_begin + j) << mtd->erasesize_shift)) {
87 					mtd_map_blk_table[blk_begin + i] = blk_begin + j;
88 					j++;
89 					if (j == blk_cnt)
90 						j++;
91 					break;
92 				}
93 			}
94 		}
95 
96 		return 0;
97 	}
98 }
99 
100 static bool get_mtd_blk_map_address(struct mtd_info *mtd, loff_t *off)
101 {
102 	bool mapped;
103 	loff_t offset = *off;
104 	size_t block_offset = offset & (mtd->erasesize - 1);
105 
106 	mapped = false;
107 	if (!mtd_map_blk_table ||
108 	    mtd_map_blk_table[(u64)offset >> mtd->erasesize_shift] ==
109 	    MTD_BLK_TABLE_BLOCK_UNKNOWN ||
110 	    mtd_map_blk_table[(u64)offset >> mtd->erasesize_shift] ==
111 	    0xffffffff)
112 		return mapped;
113 
114 	mapped = true;
115 	*off = (loff_t)(((u32)mtd_map_blk_table[(u64)offset >>
116 		mtd->erasesize_shift] << mtd->erasesize_shift) + block_offset);
117 
118 	return mapped;
119 }
120 
121 void mtd_blk_map_partitions(struct blk_desc *desc)
122 {
123 	disk_partition_t info;
124 	int i, ret;
125 
126 	if (!desc)
127 		return;
128 
129 	if (desc->if_type != IF_TYPE_MTD)
130 		return;
131 
132 	for (i = 1; i < MAX_SEARCH_PARTITIONS; i++) {
133 		ret = part_get_info(desc, i, &info);
134 		if (ret != 0)
135 			break;
136 
137 		if (mtd_blk_map_table_init(desc,
138 					   info.start << 9,
139 					   info.size << 9)) {
140 			pr_debug("mtd block map table fail\n");
141 		}
142 	}
143 }
144 
145 void mtd_blk_map_fit(struct blk_desc *desc, ulong sector, void *fit)
146 {
147 	struct mtd_info *mtd = NULL;
148 	int totalsize = 0;
149 
150 	if (desc->if_type != IF_TYPE_MTD)
151 		return;
152 
153 	if (desc->devnum == BLK_MTD_NAND) {
154 #if defined(CONFIG_NAND)
155 		mtd = dev_get_priv(desc->bdev->parent);
156 #endif
157 	} else if (desc->devnum == BLK_MTD_SPI_NAND) {
158 #if defined(CONFIG_MTD_SPI_NAND)
159 		mtd = desc->bdev->priv;
160 #endif
161 	}
162 
163 #ifdef CONFIG_SPL_FIT
164 	if (fit_get_totalsize(fit, &totalsize))
165 		debug("Can not find /totalsize node.\n");
166 #endif
167 	if (mtd && totalsize) {
168 		if (mtd_blk_map_table_init(desc, sector << 9, totalsize + (size_t)mtd->erasesize))
169 			debug("Map block table fail.\n");
170 	}
171 }
172 
173 static __maybe_unused int mtd_map_read(struct mtd_info *mtd, loff_t offset,
174 				       size_t *length, size_t *actual,
175 				       loff_t lim, u_char *buffer)
176 {
177 	size_t left_to_read = *length;
178 	u_char *p_buffer = buffer;
179 	int rval;
180 
181 	while (left_to_read > 0) {
182 		size_t block_offset = offset & (mtd->erasesize - 1);
183 		size_t read_length;
184 		loff_t mapped_offset;
185 
186 		if (offset >= mtd->size)
187 			return 0;
188 
189 		mapped_offset = offset;
190 		if (!get_mtd_blk_map_address(mtd, &mapped_offset)) {
191 			if (mtd_block_isbad(mtd, mapped_offset &
192 					    ~(mtd->erasesize - 1))) {
193 				printf("Skipping bad block 0x%08llx\n",
194 				       offset & ~(mtd->erasesize - 1));
195 				offset += mtd->erasesize - block_offset;
196 				continue;
197 			}
198 		}
199 
200 		if (left_to_read < (mtd->erasesize - block_offset))
201 			read_length = left_to_read;
202 		else
203 			read_length = mtd->erasesize - block_offset;
204 
205 		rval = mtd_read(mtd, mapped_offset, read_length, &read_length,
206 				p_buffer);
207 		if (rval && rval != -EUCLEAN) {
208 			printf("NAND read from offset %x failed %d\n",
209 			       (u32)offset, rval);
210 			*length -= left_to_read;
211 			return rval;
212 		}
213 
214 		left_to_read -= read_length;
215 		offset       += read_length;
216 		p_buffer     += read_length;
217 	}
218 
219 	return 0;
220 }
221 
222 static __maybe_unused int mtd_map_write(struct mtd_info *mtd, loff_t offset,
223 					size_t *length, size_t *actual,
224 					loff_t lim, u_char *buffer, int flags)
225 {
226 	int rval = 0, blocksize;
227 	size_t left_to_write = *length;
228 	u_char *p_buffer = buffer;
229 	struct erase_info ei;
230 
231 	blocksize = mtd->erasesize;
232 
233 	/*
234 	 * nand_write() handles unaligned, partial page writes.
235 	 *
236 	 * We allow length to be unaligned, for convenience in
237 	 * using the $filesize variable.
238 	 *
239 	 * However, starting at an unaligned offset makes the
240 	 * semantics of bad block skipping ambiguous (really,
241 	 * you should only start a block skipping access at a
242 	 * partition boundary).  So don't try to handle that.
243 	 */
244 	if ((offset & (mtd->writesize - 1)) != 0) {
245 		printf("Attempt to write non page-aligned data\n");
246 		*length = 0;
247 		return -EINVAL;
248 	}
249 
250 	while (left_to_write > 0) {
251 		size_t block_offset = offset & (mtd->erasesize - 1);
252 		size_t write_size, truncated_write_size;
253 		loff_t mapped_offset;
254 
255 		if (offset >= mtd->size)
256 			return 0;
257 
258 		mapped_offset = offset;
259 		if (!get_mtd_blk_map_address(mtd, &mapped_offset)) {
260 			if (mtd_block_isbad(mtd, mapped_offset &
261 					    ~(mtd->erasesize - 1))) {
262 				printf("Skipping bad block 0x%08llx\n",
263 				       offset & ~(mtd->erasesize - 1));
264 				offset += mtd->erasesize - block_offset;
265 				continue;
266 			}
267 		}
268 
269 		if (!(mapped_offset & mtd->erasesize_mask)) {
270 			memset(&ei, 0, sizeof(struct erase_info));
271 			ei.addr = mapped_offset;
272 			ei.len  = mtd->erasesize;
273 			rval = mtd_erase(mtd, &ei);
274 			if (rval) {
275 				pr_info("error %d while erasing %llx\n", rval,
276 					mapped_offset);
277 				return rval;
278 			}
279 		}
280 
281 		if (left_to_write < (blocksize - block_offset))
282 			write_size = left_to_write;
283 		else
284 			write_size = blocksize - block_offset;
285 
286 		truncated_write_size = write_size;
287 		rval = mtd_write(mtd, mapped_offset, truncated_write_size,
288 				 (size_t *)(&truncated_write_size), p_buffer);
289 
290 		offset += write_size;
291 		p_buffer += write_size;
292 
293 		if (rval != 0) {
294 			printf("NAND write to offset %llx failed %d\n",
295 			       offset, rval);
296 			*length -= left_to_write;
297 			return rval;
298 		}
299 
300 		left_to_write -= write_size;
301 	}
302 
303 	return 0;
304 }
305 
306 static __maybe_unused int mtd_map_erase(struct mtd_info *mtd, loff_t offset,
307 					size_t length)
308 {
309 	struct erase_info ei;
310 	loff_t pos, len;
311 	int ret;
312 
313 	pos = offset;
314 	len = length;
315 
316 	if ((pos & mtd->erasesize_mask) || (len & mtd->erasesize_mask)) {
317 		pr_err("Attempt to erase non block-aligned data, pos= %llx, len= %llx\n",
318 		       pos, len);
319 
320 		return -EINVAL;
321 	}
322 
323 	while (len) {
324 		loff_t mapped_offset;
325 
326 		mapped_offset = pos;
327 		if (!get_mtd_blk_map_address(mtd, &mapped_offset)) {
328 			if (mtd_block_isbad(mtd, pos) || mtd_block_isreserved(mtd, pos)) {
329 				pr_debug("attempt to erase a bad/reserved block @%llx\n",
330 					 pos);
331 				pos += mtd->erasesize;
332 				continue;
333 			}
334 		}
335 
336 		memset(&ei, 0, sizeof(struct erase_info));
337 		ei.addr = mapped_offset;
338 		ei.len  = mtd->erasesize;
339 		ret = mtd_erase(mtd, &ei);
340 		if (ret) {
341 			pr_err("map_erase error %d while erasing %llx\n", ret,
342 			       pos);
343 			return ret;
344 		}
345 
346 		pos += mtd->erasesize;
347 		len -= mtd->erasesize;
348 	}
349 
350 	return 0;
351 }
352 
353 char *mtd_part_parse(struct blk_desc *dev_desc)
354 {
355 	char mtd_part_info_temp[MTD_SINGLE_PART_INFO_MAX_SIZE] = {0};
356 	u32 length, data_len = MTD_PART_INFO_MAX_SIZE;
357 	disk_partition_t info;
358 	char *mtd_part_info_p;
359 	struct mtd_info *mtd;
360 	char *mtd_part_info;
361 	int ret;
362 	int p;
363 
364 #ifndef CONFIG_SPL_BUILD
365 	dev_desc = rockchip_get_bootdev();
366 #endif
367 	if (!dev_desc)
368 		return NULL;
369 
370 	mtd = (struct mtd_info *)dev_desc->bdev->priv;
371 	if (!mtd)
372 		return NULL;
373 
374 	mtd_part_info = (char *)calloc(MTD_PART_INFO_MAX_SIZE, sizeof(char));
375 	if (!mtd_part_info) {
376 		printf("%s: Fail to malloc!", __func__);
377 		return NULL;
378 	}
379 
380 	mtd_part_info_p = mtd_part_info;
381 	snprintf(mtd_part_info_p, data_len - 1, "%s%s:",
382 		 MTD_PART_NAND_HEAD,
383 		 dev_desc->product);
384 	data_len -= strlen(mtd_part_info_p);
385 	mtd_part_info_p = mtd_part_info_p + strlen(mtd_part_info_p);
386 
387 	for (p = 1; p < MAX_SEARCH_PARTITIONS; p++) {
388 		ret = part_get_info(dev_desc, p, &info);
389 		if (ret)
390 			break;
391 
392 		debug("name is %s, start addr is %x\n", info.name,
393 		      (int)(size_t)info.start);
394 
395 		snprintf(mtd_part_info_p, data_len - 1, "0x%x@0x%x(%s)",
396 			 (int)(size_t)info.size << 9,
397 			 (int)(size_t)info.start << 9,
398 			 info.name);
399 		snprintf(mtd_part_info_temp, MTD_SINGLE_PART_INFO_MAX_SIZE - 1,
400 			 "0x%x@0x%x(%s)",
401 			 (int)(size_t)info.size << 9,
402 			 (int)(size_t)info.start << 9,
403 			 info.name);
404 		strcat(mtd_part_info, ",");
405 		if (part_get_info(dev_desc, p + 1, &info)) {
406 			/* Partition with grow tag in parameter will be resized */
407 			if ((info.size + info.start + 64) >= dev_desc->lba) {
408 				if (dev_desc->devnum == BLK_MTD_SPI_NOR) {
409 					/* Nor is 64KB erase block(kernel) and gpt table just
410 					 * resserve 33 sectors for the last partition. This
411 					 * will erase the backup gpt table by user program,
412 					 * so reserve one block.
413 					 */
414 					snprintf(mtd_part_info_p, data_len - 1, "0x%x@0x%x(%s)",
415 						 (int)(size_t)(info.size -
416 						 (info.size - 1) %
417 						 (0x10000 >> 9) - 1) << 9,
418 						 (int)(size_t)info.start << 9,
419 						 info.name);
420 					break;
421 				} else {
422 					/* Nand flash is erased by block and gpt table just
423 					 * resserve 33 sectors for the last partition. This
424 					 * will erase the backup gpt table by user program,
425 					 * so reserve one block.
426 					 */
427 					snprintf(mtd_part_info_p, data_len - 1, "0x%x@0x%x(%s)",
428 						 (int)(size_t)(info.size -
429 						 (info.size - 1) %
430 						 (mtd->erasesize >> 9) - 1) << 9,
431 						 (int)(size_t)info.start << 9,
432 						 info.name);
433 					break;
434 				}
435 			} else {
436 				snprintf(mtd_part_info_temp, MTD_SINGLE_PART_INFO_MAX_SIZE - 1,
437 					 "0x%x@0x%x(%s)",
438 					 (int)(size_t)info.size << 9,
439 					 (int)(size_t)info.start << 9,
440 					 info.name);
441 				break;
442 			}
443 		}
444 		length = strlen(mtd_part_info_temp);
445 		data_len -= length;
446 		mtd_part_info_p = mtd_part_info_p + length + 1;
447 		memset(mtd_part_info_temp, 0, MTD_SINGLE_PART_INFO_MAX_SIZE);
448 	}
449 
450 	return mtd_part_info;
451 }
452 
453 ulong mtd_dread(struct udevice *udev, lbaint_t start,
454 		lbaint_t blkcnt, void *dst)
455 {
456 	struct blk_desc *desc = dev_get_uclass_platdata(udev);
457 #if defined(CONFIG_NAND) || defined(CONFIG_MTD_SPI_NAND) || defined(CONFIG_SPI_FLASH_MTD)
458 	loff_t off = (loff_t)(start * 512);
459 	size_t rwsize = blkcnt * 512;
460 #endif
461 	struct mtd_info *mtd;
462 	int ret = 0;
463 #ifdef MTD_BLK_VERBOSE
464 	ulong us = 1;
465 #endif
466 
467 	if (!desc)
468 		return ret;
469 
470 	mtd = desc->bdev->priv;
471 	if (!mtd)
472 		return 0;
473 
474 	if (blkcnt == 0)
475 		return 0;
476 
477 #ifdef MTD_BLK_VERBOSE
478 	us = get_ticks();
479 #endif
480 	if (desc->devnum == BLK_MTD_NAND) {
481 		ret = mtd_map_read(mtd, off, &rwsize,
482 				   NULL, mtd->size,
483 				   (u_char *)(dst));
484 		if (!ret)
485 			ret = blkcnt;
486 	} else if (desc->devnum == BLK_MTD_SPI_NAND) {
487 		ret = mtd_map_read(mtd, off, &rwsize,
488 				   NULL, mtd->size,
489 				   (u_char *)(dst));
490 		if (!ret)
491 			ret = blkcnt;
492 	} else if (desc->devnum == BLK_MTD_SPI_NOR) {
493 #if defined(CONFIG_SPI_FLASH_MTD) || defined(CONFIG_SPL_BUILD)
494 		struct spi_nor *nor = (struct spi_nor *)mtd->priv;
495 		struct spi_slave *spi = nor->spi;
496 		size_t retlen_nor;
497 
498 		if (desc->op_flag == BLK_PRE_RW)
499 			spi->mode |= SPI_DMA_PREPARE;
500 		ret = mtd_read(mtd, off, rwsize, &retlen_nor, dst);
501 		if (desc->op_flag == BLK_PRE_RW)
502 			spi->mode &= ~SPI_DMA_PREPARE;
503 
504 		if (retlen_nor == rwsize)
505 			ret = blkcnt;
506 #endif
507 	}
508 #ifdef MTD_BLK_VERBOSE
509 	us = (get_ticks() - us) / 24UL;
510 	pr_err("mtd dread %s %lx %lx cost %ldus: %ldMB/s\n\n", mtd->name, start, blkcnt, us, (blkcnt / 2) / ((us + 999) / 1000));
511 #else
512 	pr_debug("mtd dread %s %lx %lx\n\n", mtd->name, start, blkcnt);
513 #endif
514 
515 	return ret;
516 }
517 
518 #if CONFIG_IS_ENABLED(MTD_WRITE)
519 ulong mtd_dwrite(struct udevice *udev, lbaint_t start,
520 		 lbaint_t blkcnt, const void *src)
521 {
522 	struct blk_desc *desc = dev_get_uclass_platdata(udev);
523 #if defined(CONFIG_NAND) || defined(CONFIG_MTD_SPI_NAND) || defined(CONFIG_SPI_FLASH_MTD)
524 	loff_t off = (loff_t)(start * 512);
525 	size_t rwsize = blkcnt * 512;
526 #endif
527 	struct mtd_info *mtd;
528 	int ret = 0;
529 
530 	if (!desc)
531 		return ret;
532 
533 	mtd = desc->bdev->priv;
534 	if (!mtd)
535 		return 0;
536 
537 	pr_debug("mtd dwrite %s %lx %lx\n", mtd->name, start, blkcnt);
538 
539 	if (blkcnt == 0)
540 		return 0;
541 
542 	if (desc->devnum == BLK_MTD_NAND ||
543 	    desc->devnum == BLK_MTD_SPI_NAND ||
544 	    desc->devnum == BLK_MTD_SPI_NOR) {
545 		if (desc->op_flag == BLK_MTD_CONT_WRITE) {
546 			ret = mtd_map_write(mtd, off, &rwsize,
547 					    NULL, mtd->size,
548 					    (u_char *)(src), 0);
549 			if (!ret)
550 				return blkcnt;
551 			else
552 				return 0;
553 		} else {
554 			lbaint_t off_aligned, alinged;
555 			size_t rwsize_aligned;
556 			u8 *p_buf;
557 
558 			alinged = off & mtd->erasesize_mask;
559 			off_aligned = off - alinged;
560 			rwsize_aligned = rwsize + alinged;
561 			rwsize_aligned = (rwsize_aligned + mtd->erasesize - 1) &
562 				~(mtd->erasesize - 1);
563 
564 			p_buf = malloc(rwsize_aligned);
565 			if (!p_buf) {
566 				printf("%s: Fail to malloc!", __func__);
567 				return 0;
568 			}
569 
570 			ret = mtd_map_read(mtd, off_aligned, &rwsize_aligned,
571 					   NULL, mtd->size,
572 					   (u_char *)(p_buf));
573 			if (ret) {
574 				free(p_buf);
575 				return 0;
576 			}
577 
578 			memcpy(p_buf + alinged, src, rwsize);
579 
580 			ret = mtd_map_write(mtd, off_aligned, &rwsize_aligned,
581 					    NULL, mtd->size,
582 					    (u_char *)(p_buf), 0);
583 			free(p_buf);
584 			if (!ret)
585 				return blkcnt;
586 			else
587 				return 0;
588 		}
589 	} else {
590 		return 0;
591 	}
592 
593 	return 0;
594 }
595 
596 ulong mtd_derase(struct udevice *udev, lbaint_t start,
597 		 lbaint_t blkcnt)
598 {
599 	struct blk_desc *desc = dev_get_uclass_platdata(udev);
600 #if defined(CONFIG_NAND) || defined(CONFIG_MTD_SPI_NAND) || defined(CONFIG_SPI_FLASH_MTD)
601 	loff_t off = (loff_t)(start * 512);
602 	size_t len = blkcnt * 512;
603 #endif
604 	struct mtd_info *mtd;
605 	int ret = 0;
606 
607 	if (!desc)
608 		return ret;
609 
610 	mtd = desc->bdev->priv;
611 	if (!mtd)
612 		return 0;
613 
614 	pr_debug("mtd derase %s %lx %lx\n", mtd->name, start, blkcnt);
615 
616 	if (blkcnt == 0)
617 		return 0;
618 
619 	if (desc->devnum == BLK_MTD_NAND ||
620 	    desc->devnum == BLK_MTD_SPI_NAND ||
621 	    desc->devnum == BLK_MTD_SPI_NOR) {
622 		ret = mtd_map_erase(mtd, off, len);
623 		if (ret)
624 			return ret;
625 	} else {
626 		return 0;
627 	}
628 
629 	return blkcnt;
630 }
631 #endif
632 
633 static int mtd_blk_probe(struct udevice *udev)
634 {
635 	struct mtd_info *mtd;
636 	struct blk_desc *desc = dev_get_uclass_platdata(udev);
637 	int ret, i = 0;
638 
639 	mtd = dev_get_uclass_priv(udev->parent);
640 	if (mtd->type == MTD_NANDFLASH && desc->devnum == BLK_MTD_NAND) {
641 #ifndef CONFIG_SPL_BUILD
642 		mtd = dev_get_priv(udev->parent);
643 #endif
644 	}
645 
646 	/* Fill mtd devices information */
647 	if (is_power_of_2(mtd->erasesize))
648 		mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
649 	else
650 		mtd->erasesize_shift = 0;
651 
652 	if (is_power_of_2(mtd->writesize))
653 		mtd->writesize_shift = ffs(mtd->writesize) - 1;
654 	else
655 		mtd->writesize_shift = 0;
656 
657 	mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
658 	mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
659 
660 	desc->bdev->priv = mtd;
661 	sprintf(desc->vendor, "0x%.4x", 0x2207);
662 	if (strncmp(mtd->name, "nand", 4) == 0)
663 		memcpy(desc->product, "rk-nand", strlen("rk-nand"));
664 	else
665 		memcpy(desc->product, mtd->name, strlen(mtd->name));
666 	memcpy(desc->revision, "V1.00", sizeof("V1.00"));
667 	if (mtd->type == MTD_NANDFLASH) {
668 #ifdef CONFIG_NAND
669 		if (desc->devnum == BLK_MTD_NAND)
670 			i = NAND_BBT_SCAN_MAXBLOCKS;
671 		else if (desc->devnum == BLK_MTD_SPI_NAND)
672 			i = NANDDEV_BBT_SCAN_MAXBLOCKS;
673 #endif
674 
675 		/*
676 		 * Find the first useful block in the end,
677 		 * and it is the end lba of the nand storage.
678 		 */
679 		for (; i < (mtd->size / mtd->erasesize); i++) {
680 			ret =  mtd_block_isbad(mtd,
681 					       mtd->size - mtd->erasesize * (i + 1));
682 			if (!ret) {
683 				desc->lba = (mtd->size >> 9) -
684 					(mtd->erasesize >> 9) * i;
685 				break;
686 			}
687 		}
688 	} else {
689 		desc->lba = mtd->size >> 9;
690 	}
691 
692 	debug("MTD: desc->lba is %lx\n", desc->lba);
693 
694 	return 0;
695 }
696 
697 static const struct blk_ops mtd_blk_ops = {
698 	.read	= mtd_dread,
699 #if CONFIG_IS_ENABLED(MTD_WRITE)
700 	.write	= mtd_dwrite,
701 	.erase	= mtd_derase,
702 #endif
703 };
704 
705 U_BOOT_DRIVER(mtd_blk) = {
706 	.name		= "mtd_blk",
707 	.id		= UCLASS_BLK,
708 	.ops		= &mtd_blk_ops,
709 	.probe		= mtd_blk_probe,
710 };
711