xref: /rk3399_rockchip-uboot/drivers/mtd/mtd_blk.c (revision 5ddf13183b7dc6605e47e8415bb3c98eef2234fa)
1 /*
2  * (C) Copyright 2019 Rockchip Electronics Co., Ltd
3  *
4  * SPDX-License-Identifier:	GPL-2.0+
5  */
6 
7 #include <common.h>
8 #include <blk.h>
9 #include <boot_rkimg.h>
10 #include <dm.h>
11 #include <errno.h>
12 #include <image.h>
13 #include <malloc.h>
14 #include <nand.h>
15 #include <part.h>
16 #include <spi.h>
17 #include <dm/device-internal.h>
18 #include <linux/mtd/spi-nor.h>
19 #ifdef CONFIG_NAND
20 #include <linux/mtd/nand.h>
21 #endif
22 
23 #define MTD_PART_NAND_HEAD		"mtdparts="
24 #define MTD_ROOT_PART_NUM		"ubi.mtd="
25 #define MTD_ROOT_PART_NAME_UBIFS	"root=ubi0:rootfs"
26 #define MTD_ROOT_PART_NAME_SQUASHFS	"root=/dev/ubiblock0_0"
27 #define MTD_PART_INFO_MAX_SIZE		512
28 #define MTD_SINGLE_PART_INFO_MAX_SIZE	40
29 
30 #define MTD_BLK_TABLE_BLOCK_UNKNOWN	(-2)
31 #define MTD_BLK_TABLE_BLOCK_SHIFT	(-1)
32 
33 static int *mtd_map_blk_table;
34 
35 int mtd_blk_map_table_init(struct blk_desc *desc,
36 			   loff_t offset,
37 			   size_t length)
38 {
39 	u32 blk_total, blk_begin, blk_cnt;
40 	struct mtd_info *mtd = NULL;
41 	int i, j;
42 
43 	if (!desc)
44 		return -ENODEV;
45 
46 	switch (desc->devnum) {
47 	case BLK_MTD_NAND:
48 	case BLK_MTD_SPI_NAND:
49 		mtd = desc->bdev->priv;
50 		break;
51 	default:
52 		break;
53 	}
54 
55 	if (!mtd) {
56 		return -ENODEV;
57 	} else {
58 		blk_total = (mtd->size + mtd->erasesize - 1) >> mtd->erasesize_shift;
59 		if (!mtd_map_blk_table) {
60 			mtd_map_blk_table = (int *)malloc(blk_total * sizeof(int));
61 			if (!mtd_map_blk_table)
62 				return -ENOMEM;
63 			for (i = 0; i < blk_total; i++)
64 				mtd_map_blk_table[i] = MTD_BLK_TABLE_BLOCK_UNKNOWN;
65 		}
66 
67 		blk_begin = (u32)offset >> mtd->erasesize_shift;
68 		blk_cnt = ((u32)((offset & mtd->erasesize_mask) + length + \
69 			mtd->erasesize - 1) >> mtd->erasesize_shift);
70 		if (blk_begin >= blk_total) {
71 			pr_err("map table blk begin[%d] overflow\n", blk_begin);
72 			return -EINVAL;
73 		}
74 		if ((blk_begin + blk_cnt) > blk_total)
75 			blk_cnt = blk_total - blk_begin;
76 
77 		if (mtd_map_blk_table[blk_begin] != MTD_BLK_TABLE_BLOCK_UNKNOWN)
78 			return 0;
79 
80 		j = 0;
81 		 /* should not across blk_cnt */
82 		for (i = 0; i < blk_cnt; i++) {
83 			if (j >= blk_cnt)
84 				mtd_map_blk_table[blk_begin + i] = MTD_BLK_TABLE_BLOCK_SHIFT;
85 			for (; j < blk_cnt; j++) {
86 				if (!mtd_block_isbad(mtd, (blk_begin + j) << mtd->erasesize_shift)) {
87 					mtd_map_blk_table[blk_begin + i] = blk_begin + j;
88 					j++;
89 					if (j == blk_cnt)
90 						j++;
91 					break;
92 				}
93 			}
94 		}
95 
96 		return 0;
97 	}
98 }
99 
100 static bool get_mtd_blk_map_address(struct mtd_info *mtd, loff_t *off)
101 {
102 	bool mapped;
103 	loff_t offset = *off;
104 	size_t block_offset = offset & (mtd->erasesize - 1);
105 
106 	mapped = false;
107 	if (!mtd_map_blk_table ||
108 	    mtd_map_blk_table[(u64)offset >> mtd->erasesize_shift] ==
109 	    MTD_BLK_TABLE_BLOCK_UNKNOWN ||
110 	    mtd_map_blk_table[(u64)offset >> mtd->erasesize_shift] ==
111 	    0xffffffff)
112 		return mapped;
113 
114 	mapped = true;
115 	*off = (loff_t)(((u32)mtd_map_blk_table[(u64)offset >>
116 		mtd->erasesize_shift] << mtd->erasesize_shift) + block_offset);
117 
118 	return mapped;
119 }
120 
121 void mtd_blk_map_partitions(struct blk_desc *desc)
122 {
123 	disk_partition_t info;
124 	int i, ret;
125 
126 	if (!desc)
127 		return;
128 
129 	if (desc->if_type != IF_TYPE_MTD)
130 		return;
131 
132 	for (i = 1; i < MAX_SEARCH_PARTITIONS; i++) {
133 		ret = part_get_info(desc, i, &info);
134 		if (ret != 0)
135 			continue;
136 
137 		if (mtd_blk_map_table_init(desc,
138 					   info.start << 9,
139 					   info.size << 9)) {
140 			pr_debug("mtd block map table fail\n");
141 		}
142 	}
143 }
144 
145 void mtd_blk_map_fit(struct blk_desc *desc, ulong sector, void *fit)
146 {
147 	struct mtd_info *mtd = NULL;
148 	int totalsize = 0;
149 
150 	if (desc->if_type != IF_TYPE_MTD)
151 		return;
152 
153 	if (desc->devnum == BLK_MTD_NAND) {
154 #if defined(CONFIG_NAND)
155 		mtd = dev_get_priv(desc->bdev->parent);
156 #endif
157 	} else if (desc->devnum == BLK_MTD_SPI_NAND) {
158 #if defined(CONFIG_MTD_SPI_NAND)
159 		mtd = desc->bdev->priv;
160 #endif
161 	}
162 
163 #ifdef CONFIG_SPL_FIT
164 	if (fit_get_totalsize(fit, &totalsize))
165 		debug("Can not find /totalsize node.\n");
166 #endif
167 	if (mtd && totalsize) {
168 		if (mtd_blk_map_table_init(desc, sector << 9, totalsize + (size_t)mtd->erasesize))
169 			debug("Map block table fail.\n");
170 	}
171 }
172 
173 static __maybe_unused int mtd_map_read(struct mtd_info *mtd, loff_t offset,
174 				       size_t *length, size_t *actual,
175 				       loff_t lim, u_char *buffer)
176 {
177 	size_t left_to_read = *length;
178 	u_char *p_buffer = buffer;
179 	int rval;
180 
181 	while (left_to_read > 0) {
182 		size_t block_offset = offset & (mtd->erasesize - 1);
183 		size_t read_length;
184 		loff_t mapped_offset;
185 
186 		if (offset >= mtd->size)
187 			return 0;
188 
189 		mapped_offset = offset;
190 		if (!get_mtd_blk_map_address(mtd, &mapped_offset)) {
191 			if (mtd_block_isbad(mtd, mapped_offset &
192 					    ~(mtd->erasesize - 1))) {
193 				printf("Skipping bad block 0x%08llx\n",
194 				       offset & ~(mtd->erasesize - 1));
195 				offset += mtd->erasesize - block_offset;
196 				continue;
197 			}
198 		}
199 
200 		if (left_to_read < (mtd->erasesize - block_offset))
201 			read_length = left_to_read;
202 		else
203 			read_length = mtd->erasesize - block_offset;
204 
205 		rval = mtd_read(mtd, mapped_offset, read_length, &read_length,
206 				p_buffer);
207 		if (rval && rval != -EUCLEAN) {
208 			printf("NAND read from offset %llx failed %d\n",
209 			       offset, rval);
210 			*length -= left_to_read;
211 			return rval;
212 		}
213 
214 		left_to_read -= read_length;
215 		offset       += read_length;
216 		p_buffer     += read_length;
217 	}
218 
219 	return 0;
220 }
221 
222 static __maybe_unused int mtd_map_write(struct mtd_info *mtd, loff_t offset,
223 					size_t *length, size_t *actual,
224 					loff_t lim, u_char *buffer, int flags)
225 {
226 	int rval = 0, blocksize;
227 	size_t left_to_write = *length;
228 	u_char *p_buffer = buffer;
229 	struct erase_info ei;
230 
231 	blocksize = mtd->erasesize;
232 
233 	/*
234 	 * nand_write() handles unaligned, partial page writes.
235 	 *
236 	 * We allow length to be unaligned, for convenience in
237 	 * using the $filesize variable.
238 	 *
239 	 * However, starting at an unaligned offset makes the
240 	 * semantics of bad block skipping ambiguous (really,
241 	 * you should only start a block skipping access at a
242 	 * partition boundary).  So don't try to handle that.
243 	 */
244 	if ((offset & (mtd->writesize - 1)) != 0) {
245 		printf("Attempt to write non page-aligned data\n");
246 		*length = 0;
247 		return -EINVAL;
248 	}
249 
250 	while (left_to_write > 0) {
251 		size_t block_offset = offset & (mtd->erasesize - 1);
252 		size_t write_size, truncated_write_size;
253 		loff_t mapped_offset;
254 
255 		if (offset >= mtd->size)
256 			return 0;
257 
258 		mapped_offset = offset;
259 		if (!get_mtd_blk_map_address(mtd, &mapped_offset)) {
260 			if (mtd_block_isbad(mtd, mapped_offset &
261 					    ~(mtd->erasesize - 1))) {
262 				printf("Skipping bad block 0x%08llx\n",
263 				       offset & ~(mtd->erasesize - 1));
264 				offset += mtd->erasesize - block_offset;
265 				continue;
266 			}
267 		}
268 
269 		if (!(mapped_offset & mtd->erasesize_mask)) {
270 			memset(&ei, 0, sizeof(struct erase_info));
271 			ei.addr = mapped_offset;
272 			ei.len  = mtd->erasesize;
273 			rval = mtd_erase(mtd, &ei);
274 			if (rval) {
275 				pr_info("error %d while erasing %llx\n", rval,
276 					mapped_offset);
277 				return rval;
278 			}
279 		}
280 
281 		if (left_to_write < (blocksize - block_offset))
282 			write_size = left_to_write;
283 		else
284 			write_size = blocksize - block_offset;
285 
286 		truncated_write_size = write_size;
287 		rval = mtd_write(mtd, mapped_offset, truncated_write_size,
288 				 (size_t *)(&truncated_write_size), p_buffer);
289 
290 		offset += write_size;
291 		p_buffer += write_size;
292 
293 		if (rval != 0) {
294 			printf("NAND write to offset %llx failed %d\n",
295 			       offset, rval);
296 			*length -= left_to_write;
297 			return rval;
298 		}
299 
300 		left_to_write -= write_size;
301 	}
302 
303 	return 0;
304 }
305 
306 static __maybe_unused int mtd_map_erase(struct mtd_info *mtd, loff_t offset,
307 					size_t length)
308 {
309 	struct erase_info ei;
310 	loff_t pos, len;
311 	int ret;
312 
313 	pos = offset;
314 	len = length;
315 
316 	if ((pos & mtd->erasesize_mask) || (len & mtd->erasesize_mask)) {
317 		pr_err("Attempt to erase non block-aligned data, pos= %llx, len= %llx\n",
318 		       pos, len);
319 
320 		return -EINVAL;
321 	}
322 
323 	while (len) {
324 		loff_t mapped_offset;
325 
326 		mapped_offset = pos;
327 		if (!get_mtd_blk_map_address(mtd, &mapped_offset)) {
328 			if (mtd_block_isbad(mtd, pos) || mtd_block_isreserved(mtd, pos)) {
329 				pr_debug("attempt to erase a bad/reserved block @%llx\n",
330 					 pos);
331 				pos += mtd->erasesize;
332 				continue;
333 			}
334 		}
335 
336 		memset(&ei, 0, sizeof(struct erase_info));
337 		ei.addr = mapped_offset;
338 		ei.len  = mtd->erasesize;
339 		ret = mtd_erase(mtd, &ei);
340 		if (ret) {
341 			pr_err("map_erase error %d while erasing %llx\n", ret,
342 			       pos);
343 			return ret;
344 		}
345 
346 		pos += mtd->erasesize;
347 		len -= mtd->erasesize;
348 	}
349 
350 	return 0;
351 }
352 
353 char *mtd_part_parse(void)
354 {
355 	char mtd_part_info_temp[MTD_SINGLE_PART_INFO_MAX_SIZE] = {0};
356 	u32 length, data_len = MTD_PART_INFO_MAX_SIZE;
357 	char mtd_root_part_info[40] = {0};
358 	struct blk_desc *dev_desc;
359 	disk_partition_t info;
360 	char *mtd_part_info_p;
361 	struct mtd_info *mtd;
362 	char *mtd_part_info;
363 	int ret;
364 	int p;
365 
366 	dev_desc = rockchip_get_bootdev();
367 	if (!dev_desc)
368 		return NULL;
369 
370 	mtd = (struct mtd_info *)dev_desc->bdev->priv;
371 	if (!mtd)
372 		return NULL;
373 
374 	p = part_get_info_by_name(dev_desc, PART_SYSTEM, &info);
375 	if (p > 0) {
376 		if (strstr(env_get("bootargs"), "rootfstype=squashfs"))
377 			snprintf(mtd_root_part_info, ARRAY_SIZE(mtd_root_part_info), "%s%d %s",
378 				 MTD_ROOT_PART_NUM, p - 1, MTD_ROOT_PART_NAME_SQUASHFS);
379 		else
380 			snprintf(mtd_root_part_info, ARRAY_SIZE(mtd_root_part_info), "%s%d %s",
381 				 MTD_ROOT_PART_NUM, p - 1, MTD_ROOT_PART_NAME_UBIFS);
382 		env_update("bootargs", mtd_root_part_info);
383 	}
384 
385 	mtd_part_info = (char *)calloc(MTD_PART_INFO_MAX_SIZE, sizeof(char));
386 	if (!mtd_part_info) {
387 		printf("%s: Fail to malloc!", __func__);
388 		return NULL;
389 	}
390 
391 	mtd_part_info_p = mtd_part_info;
392 	snprintf(mtd_part_info_p, data_len - 1, "%s%s:",
393 		 MTD_PART_NAND_HEAD,
394 		 dev_desc->product);
395 	data_len -= strlen(mtd_part_info_p);
396 	mtd_part_info_p = mtd_part_info_p + strlen(mtd_part_info_p);
397 
398 	for (p = 1; p < MAX_SEARCH_PARTITIONS; p++) {
399 		ret = part_get_info(dev_desc, p, &info);
400 		if (ret)
401 			break;
402 
403 		debug("name is %s, start addr is %x\n", info.name,
404 		      (int)(size_t)info.start);
405 
406 		snprintf(mtd_part_info_p, data_len - 1, "0x%x@0x%x(%s)",
407 			 (int)(size_t)info.size << 9,
408 			 (int)(size_t)info.start << 9,
409 			 info.name);
410 		snprintf(mtd_part_info_temp, MTD_SINGLE_PART_INFO_MAX_SIZE - 1,
411 			 "0x%x@0x%x(%s)",
412 			 (int)(size_t)info.size << 9,
413 			 (int)(size_t)info.start << 9,
414 			 info.name);
415 		strcat(mtd_part_info, ",");
416 		if (part_get_info(dev_desc, p + 1, &info) &&
417 		    (info.size + info.start + 33) == dev_desc->lba) {
418 			if (dev_desc->devnum == BLK_MTD_SPI_NOR) {
419 				/* Nor is 64KB erase block(kernel) and gpt table just
420 				* resserve 33 sectors for the last partition. This
421 				* will erase the backup gpt table by user program,
422 				* so reserve one block.
423 				*/
424 				snprintf(mtd_part_info_p, data_len - 1, "0x%x@0x%x(%s)",
425 					(int)(size_t)(info.size -
426 					(info.size - 1) %
427 					(0x10000 >> 9) - 1) << 9,
428 					(int)(size_t)info.start << 9,
429 					info.name);
430 				break;
431 			} else {
432 				/* Nand flash is erased by block and gpt table just
433 				* resserve 33 sectors for the last partition. This
434 				* will erase the backup gpt table by user program,
435 				* so reserve one block.
436 				*/
437 				snprintf(mtd_part_info_p, data_len - 1, "0x%x@0x%x(%s)",
438 					(int)(size_t)(info.size -
439 					(info.size - 1) %
440 					(mtd->erasesize >> 9) - 1) << 9,
441 					(int)(size_t)info.start << 9,
442 					info.name);
443 				break;
444 			}
445 		}
446 		length = strlen(mtd_part_info_temp);
447 		data_len -= length;
448 		mtd_part_info_p = mtd_part_info_p + length + 1;
449 		memset(mtd_part_info_temp, 0, MTD_SINGLE_PART_INFO_MAX_SIZE);
450 	}
451 
452 	return mtd_part_info;
453 }
454 
455 ulong mtd_dread(struct udevice *udev, lbaint_t start,
456 		lbaint_t blkcnt, void *dst)
457 {
458 	struct blk_desc *desc = dev_get_uclass_platdata(udev);
459 #if defined(CONFIG_NAND) || defined(CONFIG_MTD_SPI_NAND) || defined(CONFIG_SPI_FLASH_MTD)
460 	loff_t off = (loff_t)(start * 512);
461 	size_t rwsize = blkcnt * 512;
462 #endif
463 	struct mtd_info *mtd;
464 	int ret = 0;
465 
466 	if (!desc)
467 		return ret;
468 
469 	mtd = desc->bdev->priv;
470 	if (!mtd)
471 		return 0;
472 
473 	if (blkcnt == 0)
474 		return 0;
475 
476 	pr_debug("mtd dread %s %lx %lx\n", mtd->name, start, blkcnt);
477 
478 	if (desc->devnum == BLK_MTD_NAND) {
479 		ret = mtd_map_read(mtd, off, &rwsize,
480 				   NULL, mtd->size,
481 				   (u_char *)(dst));
482 		if (!ret)
483 			return blkcnt;
484 		else
485 			return 0;
486 	} else if (desc->devnum == BLK_MTD_SPI_NAND) {
487 		ret = mtd_map_read(mtd, off, &rwsize,
488 				   NULL, mtd->size,
489 				   (u_char *)(dst));
490 		if (!ret)
491 			return blkcnt;
492 		else
493 			return 0;
494 	} else if (desc->devnum == BLK_MTD_SPI_NOR) {
495 #if defined(CONFIG_SPI_FLASH_MTD) || defined(CONFIG_SPL_BUILD)
496 		struct spi_nor *nor = (struct spi_nor *)mtd->priv;
497 		struct spi_slave *spi = nor->spi;
498 		size_t retlen_nor;
499 
500 		if (desc->op_flag == BLK_PRE_RW)
501 			spi->mode |= SPI_DMA_PREPARE;
502 		mtd_read(mtd, off, rwsize, &retlen_nor, dst);
503 		if (desc->op_flag == BLK_PRE_RW)
504 			spi->mode |= SPI_DMA_PREPARE;
505 
506 		if (retlen_nor == rwsize)
507 			return blkcnt;
508 		else
509 #endif
510 			return 0;
511 	} else {
512 		return 0;
513 	}
514 }
515 
516 #if CONFIG_IS_ENABLED(MTD_WRITE)
517 ulong mtd_dwrite(struct udevice *udev, lbaint_t start,
518 		 lbaint_t blkcnt, const void *src)
519 {
520 	struct blk_desc *desc = dev_get_uclass_platdata(udev);
521 #if defined(CONFIG_NAND) || defined(CONFIG_MTD_SPI_NAND) || defined(CONFIG_SPI_FLASH_MTD)
522 	loff_t off = (loff_t)(start * 512);
523 	size_t rwsize = blkcnt * 512;
524 #endif
525 	struct mtd_info *mtd;
526 	int ret = 0;
527 
528 	if (!desc)
529 		return ret;
530 
531 	mtd = desc->bdev->priv;
532 	if (!mtd)
533 		return 0;
534 
535 	pr_debug("mtd dwrite %s %lx %lx\n", mtd->name, start, blkcnt);
536 
537 	if (blkcnt == 0)
538 		return 0;
539 
540 	if (desc->devnum == BLK_MTD_NAND ||
541 	    desc->devnum == BLK_MTD_SPI_NAND ||
542 	    desc->devnum == BLK_MTD_SPI_NOR) {
543 		if (desc->op_flag == BLK_MTD_CONT_WRITE) {
544 			ret = mtd_map_write(mtd, off, &rwsize,
545 					    NULL, mtd->size,
546 					    (u_char *)(src), 0);
547 			if (!ret)
548 				return blkcnt;
549 			else
550 				return 0;
551 		} else {
552 			lbaint_t off_aligned, alinged;
553 			size_t rwsize_aligned;
554 			u8 *p_buf;
555 
556 			alinged = off & mtd->erasesize_mask;
557 			off_aligned = off - alinged;
558 			rwsize_aligned = rwsize + alinged;
559 			rwsize_aligned = (rwsize_aligned + mtd->erasesize - 1) &
560 				~(mtd->erasesize - 1);
561 
562 			p_buf = malloc(rwsize_aligned);
563 			if (!p_buf) {
564 				printf("%s: Fail to malloc!", __func__);
565 				return 0;
566 			}
567 
568 			ret = mtd_map_read(mtd, off_aligned, &rwsize_aligned,
569 					   NULL, mtd->size,
570 					   (u_char *)(p_buf));
571 			if (ret) {
572 				free(p_buf);
573 				return 0;
574 			}
575 
576 			memcpy(p_buf + alinged, src, rwsize);
577 
578 			ret = mtd_map_write(mtd, off_aligned, &rwsize_aligned,
579 					    NULL, mtd->size,
580 					    (u_char *)(p_buf), 0);
581 			free(p_buf);
582 			if (!ret)
583 				return blkcnt;
584 			else
585 				return 0;
586 		}
587 	} else {
588 		return 0;
589 	}
590 
591 	return 0;
592 }
593 
594 ulong mtd_derase(struct udevice *udev, lbaint_t start,
595 		 lbaint_t blkcnt)
596 {
597 	struct blk_desc *desc = dev_get_uclass_platdata(udev);
598 #if defined(CONFIG_NAND) || defined(CONFIG_MTD_SPI_NAND) || defined(CONFIG_SPI_FLASH_MTD)
599 	loff_t off = (loff_t)(start * 512);
600 	size_t len = blkcnt * 512;
601 #endif
602 	struct mtd_info *mtd;
603 	int ret = 0;
604 
605 	if (!desc)
606 		return ret;
607 
608 	mtd = desc->bdev->priv;
609 	if (!mtd)
610 		return 0;
611 
612 	pr_debug("mtd derase %s %lx %lx\n", mtd->name, start, blkcnt);
613 
614 	if (blkcnt == 0)
615 		return 0;
616 
617 	if (desc->devnum == BLK_MTD_NAND ||
618 	    desc->devnum == BLK_MTD_SPI_NAND ||
619 	    desc->devnum == BLK_MTD_SPI_NOR) {
620 		ret = mtd_map_erase(mtd, off, len);
621 		if (ret)
622 			return ret;
623 	} else {
624 		return 0;
625 	}
626 
627 	return 0;
628 }
629 #endif
630 
631 static int mtd_blk_probe(struct udevice *udev)
632 {
633 	struct mtd_info *mtd;
634 	struct blk_desc *desc = dev_get_uclass_platdata(udev);
635 	int ret, i = 0;
636 
637 	mtd = dev_get_uclass_priv(udev->parent);
638 	if (mtd->type == MTD_NANDFLASH && desc->devnum == BLK_MTD_NAND) {
639 #ifndef CONFIG_SPL_BUILD
640 		mtd = dev_get_priv(udev->parent);
641 #endif
642 	}
643 
644 	desc->bdev->priv = mtd;
645 	sprintf(desc->vendor, "0x%.4x", 0x2207);
646 	if (strncmp(mtd->name, "nand", 4) == 0)
647 		memcpy(desc->product, "rk-nand", strlen("rk-nand"));
648 	else
649 		memcpy(desc->product, mtd->name, strlen(mtd->name));
650 	memcpy(desc->revision, "V1.00", sizeof("V1.00"));
651 	if (mtd->type == MTD_NANDFLASH) {
652 #ifdef CONFIG_NAND
653 		if (desc->devnum == BLK_MTD_NAND)
654 			i = NAND_BBT_SCAN_MAXBLOCKS;
655 		else if (desc->devnum == BLK_MTD_SPI_NAND)
656 			i = NANDDEV_BBT_SCAN_MAXBLOCKS;
657 #endif
658 
659 		/*
660 		 * Find the first useful block in the end,
661 		 * and it is the end lba of the nand storage.
662 		 */
663 		for (; i < (mtd->size / mtd->erasesize); i++) {
664 			ret =  mtd_block_isbad(mtd,
665 					       mtd->size - mtd->erasesize * (i + 1));
666 			if (!ret) {
667 				desc->lba = (mtd->size >> 9) -
668 					(mtd->erasesize >> 9) * i;
669 				break;
670 			}
671 		}
672 	} else {
673 		desc->lba = mtd->size >> 9;
674 	}
675 
676 	debug("MTD: desc->lba is %lx\n", desc->lba);
677 
678 	return 0;
679 }
680 
681 static const struct blk_ops mtd_blk_ops = {
682 	.read	= mtd_dread,
683 #if CONFIG_IS_ENABLED(MTD_WRITE)
684 	.write	= mtd_dwrite,
685 	.erase	= mtd_derase,
686 #endif
687 };
688 
689 U_BOOT_DRIVER(mtd_blk) = {
690 	.name		= "mtd_blk",
691 	.id		= UCLASS_BLK,
692 	.ops		= &mtd_blk_ops,
693 	.probe		= mtd_blk_probe,
694 };
695