xref: /rk3399_rockchip-uboot/drivers/mtd/mtd_blk.c (revision 7e044b9aeceaa3c07ba4dd8939761bd87f4c8300)
1 /*
2  * (C) Copyright 2019 Rockchip Electronics Co., Ltd
3  *
4  * SPDX-License-Identifier:	GPL-2.0+
5  */
6 
7 #include <common.h>
8 #include <blk.h>
9 #include <boot_rkimg.h>
10 #include <dm.h>
11 #include <errno.h>
12 #include <image.h>
13 #include <linux/log2.h>
14 #include <malloc.h>
15 #include <nand.h>
16 #include <part.h>
17 #include <spi.h>
18 #include <dm/device-internal.h>
19 #include <linux/mtd/spi-nor.h>
20 #ifdef CONFIG_NAND
21 #include <linux/mtd/nand.h>
22 #endif
23 
24 #define MTD_PART_NAND_HEAD		"mtdparts="
25 #define MTD_ROOT_PART_NUM		"ubi.mtd="
26 #define MTD_ROOT_PART_NAME_UBIFS	"root=ubi0:rootfs"
27 #define MTD_ROOT_PART_NAME_SQUASHFS	"root=/dev/ubiblock0_0"
28 #define MTD_PART_INFO_MAX_SIZE		512
29 #define MTD_SINGLE_PART_INFO_MAX_SIZE	40
30 
31 #define MTD_BLK_TABLE_BLOCK_UNKNOWN	(-2)
32 #define MTD_BLK_TABLE_BLOCK_SHIFT	(-1)
33 
34 static int *mtd_map_blk_table;
35 
36 int mtd_blk_map_table_init(struct blk_desc *desc,
37 			   loff_t offset,
38 			   size_t length)
39 {
40 	u32 blk_total, blk_begin, blk_cnt;
41 	struct mtd_info *mtd = NULL;
42 	int i, j;
43 
44 	if (!desc)
45 		return -ENODEV;
46 
47 	switch (desc->devnum) {
48 	case BLK_MTD_NAND:
49 	case BLK_MTD_SPI_NAND:
50 		mtd = desc->bdev->priv;
51 		break;
52 	default:
53 		break;
54 	}
55 
56 	if (!mtd) {
57 		return -ENODEV;
58 	} else {
59 		blk_total = (mtd->size + mtd->erasesize - 1) >> mtd->erasesize_shift;
60 		if (!mtd_map_blk_table) {
61 			mtd_map_blk_table = (int *)malloc(blk_total * sizeof(int));
62 			if (!mtd_map_blk_table)
63 				return -ENOMEM;
64 			for (i = 0; i < blk_total; i++)
65 				mtd_map_blk_table[i] = MTD_BLK_TABLE_BLOCK_UNKNOWN;
66 		}
67 
68 		blk_begin = (u32)offset >> mtd->erasesize_shift;
69 		blk_cnt = ((u32)((offset & mtd->erasesize_mask) + length + \
70 			mtd->erasesize - 1) >> mtd->erasesize_shift);
71 		if (blk_begin >= blk_total) {
72 			pr_err("map table blk begin[%d] overflow\n", blk_begin);
73 			return -EINVAL;
74 		}
75 		if ((blk_begin + blk_cnt) > blk_total)
76 			blk_cnt = blk_total - blk_begin;
77 
78 		if (mtd_map_blk_table[blk_begin] != MTD_BLK_TABLE_BLOCK_UNKNOWN)
79 			return 0;
80 
81 		j = 0;
82 		 /* should not across blk_cnt */
83 		for (i = 0; i < blk_cnt; i++) {
84 			if (j >= blk_cnt)
85 				mtd_map_blk_table[blk_begin + i] = MTD_BLK_TABLE_BLOCK_SHIFT;
86 			for (; j < blk_cnt; j++) {
87 				if (!mtd_block_isbad(mtd, (blk_begin + j) << mtd->erasesize_shift)) {
88 					mtd_map_blk_table[blk_begin + i] = blk_begin + j;
89 					j++;
90 					if (j == blk_cnt)
91 						j++;
92 					break;
93 				}
94 			}
95 		}
96 
97 		return 0;
98 	}
99 }
100 
101 static bool get_mtd_blk_map_address(struct mtd_info *mtd, loff_t *off)
102 {
103 	bool mapped;
104 	loff_t offset = *off;
105 	size_t block_offset = offset & (mtd->erasesize - 1);
106 
107 	mapped = false;
108 	if (!mtd_map_blk_table ||
109 	    mtd_map_blk_table[(u64)offset >> mtd->erasesize_shift] ==
110 	    MTD_BLK_TABLE_BLOCK_UNKNOWN ||
111 	    mtd_map_blk_table[(u64)offset >> mtd->erasesize_shift] ==
112 	    0xffffffff)
113 		return mapped;
114 
115 	mapped = true;
116 	*off = (loff_t)(((u32)mtd_map_blk_table[(u64)offset >>
117 		mtd->erasesize_shift] << mtd->erasesize_shift) + block_offset);
118 
119 	return mapped;
120 }
121 
122 void mtd_blk_map_partitions(struct blk_desc *desc)
123 {
124 	disk_partition_t info;
125 	int i, ret;
126 
127 	if (!desc)
128 		return;
129 
130 	if (desc->if_type != IF_TYPE_MTD)
131 		return;
132 
133 	for (i = 1; i < MAX_SEARCH_PARTITIONS; i++) {
134 		ret = part_get_info(desc, i, &info);
135 		if (ret != 0)
136 			continue;
137 
138 		if (mtd_blk_map_table_init(desc,
139 					   info.start << 9,
140 					   info.size << 9)) {
141 			pr_debug("mtd block map table fail\n");
142 		}
143 	}
144 }
145 
146 void mtd_blk_map_fit(struct blk_desc *desc, ulong sector, void *fit)
147 {
148 	struct mtd_info *mtd = NULL;
149 	int totalsize = 0;
150 
151 	if (desc->if_type != IF_TYPE_MTD)
152 		return;
153 
154 	if (desc->devnum == BLK_MTD_NAND) {
155 #if defined(CONFIG_NAND)
156 		mtd = dev_get_priv(desc->bdev->parent);
157 #endif
158 	} else if (desc->devnum == BLK_MTD_SPI_NAND) {
159 #if defined(CONFIG_MTD_SPI_NAND)
160 		mtd = desc->bdev->priv;
161 #endif
162 	}
163 
164 #ifdef CONFIG_SPL_FIT
165 	if (fit_get_totalsize(fit, &totalsize))
166 		debug("Can not find /totalsize node.\n");
167 #endif
168 	if (mtd && totalsize) {
169 		if (mtd_blk_map_table_init(desc, sector << 9, totalsize + (size_t)mtd->erasesize))
170 			debug("Map block table fail.\n");
171 	}
172 }
173 
174 static __maybe_unused int mtd_map_read(struct mtd_info *mtd, loff_t offset,
175 				       size_t *length, size_t *actual,
176 				       loff_t lim, u_char *buffer)
177 {
178 	size_t left_to_read = *length;
179 	u_char *p_buffer = buffer;
180 	int rval;
181 
182 	while (left_to_read > 0) {
183 		size_t block_offset = offset & (mtd->erasesize - 1);
184 		size_t read_length;
185 		loff_t mapped_offset;
186 
187 		if (offset >= mtd->size)
188 			return 0;
189 
190 		mapped_offset = offset;
191 		if (!get_mtd_blk_map_address(mtd, &mapped_offset)) {
192 			if (mtd_block_isbad(mtd, mapped_offset &
193 					    ~(mtd->erasesize - 1))) {
194 				printf("Skipping bad block 0x%08llx\n",
195 				       offset & ~(mtd->erasesize - 1));
196 				offset += mtd->erasesize - block_offset;
197 				continue;
198 			}
199 		}
200 
201 		if (left_to_read < (mtd->erasesize - block_offset))
202 			read_length = left_to_read;
203 		else
204 			read_length = mtd->erasesize - block_offset;
205 
206 		rval = mtd_read(mtd, mapped_offset, read_length, &read_length,
207 				p_buffer);
208 		if (rval && rval != -EUCLEAN) {
209 			printf("NAND read from offset %llx failed %d\n",
210 			       offset, rval);
211 			*length -= left_to_read;
212 			return rval;
213 		}
214 
215 		left_to_read -= read_length;
216 		offset       += read_length;
217 		p_buffer     += read_length;
218 	}
219 
220 	return 0;
221 }
222 
223 static __maybe_unused int mtd_map_write(struct mtd_info *mtd, loff_t offset,
224 					size_t *length, size_t *actual,
225 					loff_t lim, u_char *buffer, int flags)
226 {
227 	int rval = 0, blocksize;
228 	size_t left_to_write = *length;
229 	u_char *p_buffer = buffer;
230 	struct erase_info ei;
231 
232 	blocksize = mtd->erasesize;
233 
234 	/*
235 	 * nand_write() handles unaligned, partial page writes.
236 	 *
237 	 * We allow length to be unaligned, for convenience in
238 	 * using the $filesize variable.
239 	 *
240 	 * However, starting at an unaligned offset makes the
241 	 * semantics of bad block skipping ambiguous (really,
242 	 * you should only start a block skipping access at a
243 	 * partition boundary).  So don't try to handle that.
244 	 */
245 	if ((offset & (mtd->writesize - 1)) != 0) {
246 		printf("Attempt to write non page-aligned data\n");
247 		*length = 0;
248 		return -EINVAL;
249 	}
250 
251 	while (left_to_write > 0) {
252 		size_t block_offset = offset & (mtd->erasesize - 1);
253 		size_t write_size, truncated_write_size;
254 		loff_t mapped_offset;
255 
256 		if (offset >= mtd->size)
257 			return 0;
258 
259 		mapped_offset = offset;
260 		if (!get_mtd_blk_map_address(mtd, &mapped_offset)) {
261 			if (mtd_block_isbad(mtd, mapped_offset &
262 					    ~(mtd->erasesize - 1))) {
263 				printf("Skipping bad block 0x%08llx\n",
264 				       offset & ~(mtd->erasesize - 1));
265 				offset += mtd->erasesize - block_offset;
266 				continue;
267 			}
268 		}
269 
270 		if (!(mapped_offset & mtd->erasesize_mask)) {
271 			memset(&ei, 0, sizeof(struct erase_info));
272 			ei.addr = mapped_offset;
273 			ei.len  = mtd->erasesize;
274 			rval = mtd_erase(mtd, &ei);
275 			if (rval) {
276 				pr_info("error %d while erasing %llx\n", rval,
277 					mapped_offset);
278 				return rval;
279 			}
280 		}
281 
282 		if (left_to_write < (blocksize - block_offset))
283 			write_size = left_to_write;
284 		else
285 			write_size = blocksize - block_offset;
286 
287 		truncated_write_size = write_size;
288 		rval = mtd_write(mtd, mapped_offset, truncated_write_size,
289 				 (size_t *)(&truncated_write_size), p_buffer);
290 
291 		offset += write_size;
292 		p_buffer += write_size;
293 
294 		if (rval != 0) {
295 			printf("NAND write to offset %llx failed %d\n",
296 			       offset, rval);
297 			*length -= left_to_write;
298 			return rval;
299 		}
300 
301 		left_to_write -= write_size;
302 	}
303 
304 	return 0;
305 }
306 
307 static __maybe_unused int mtd_map_erase(struct mtd_info *mtd, loff_t offset,
308 					size_t length)
309 {
310 	struct erase_info ei;
311 	loff_t pos, len;
312 	int ret;
313 
314 	pos = offset;
315 	len = length;
316 
317 	if ((pos & mtd->erasesize_mask) || (len & mtd->erasesize_mask)) {
318 		pr_err("Attempt to erase non block-aligned data, pos= %llx, len= %llx\n",
319 		       pos, len);
320 
321 		return -EINVAL;
322 	}
323 
324 	while (len) {
325 		loff_t mapped_offset;
326 
327 		mapped_offset = pos;
328 		if (!get_mtd_blk_map_address(mtd, &mapped_offset)) {
329 			if (mtd_block_isbad(mtd, pos) || mtd_block_isreserved(mtd, pos)) {
330 				pr_debug("attempt to erase a bad/reserved block @%llx\n",
331 					 pos);
332 				pos += mtd->erasesize;
333 				continue;
334 			}
335 		}
336 
337 		memset(&ei, 0, sizeof(struct erase_info));
338 		ei.addr = mapped_offset;
339 		ei.len  = mtd->erasesize;
340 		ret = mtd_erase(mtd, &ei);
341 		if (ret) {
342 			pr_err("map_erase error %d while erasing %llx\n", ret,
343 			       pos);
344 			return ret;
345 		}
346 
347 		pos += mtd->erasesize;
348 		len -= mtd->erasesize;
349 	}
350 
351 	return 0;
352 }
353 
354 char *mtd_part_parse(struct blk_desc *dev_desc)
355 {
356 	char mtd_part_info_temp[MTD_SINGLE_PART_INFO_MAX_SIZE] = {0};
357 	u32 length, data_len = MTD_PART_INFO_MAX_SIZE;
358 	disk_partition_t info;
359 	char *mtd_part_info_p;
360 	struct mtd_info *mtd;
361 	char *mtd_part_info;
362 	int ret;
363 	int p;
364 
365 #ifndef CONFIG_SPL_BUILD
366 	dev_desc = rockchip_get_bootdev();
367 #endif
368 	if (!dev_desc)
369 		return NULL;
370 
371 	mtd = (struct mtd_info *)dev_desc->bdev->priv;
372 	if (!mtd)
373 		return NULL;
374 #ifndef CONFIG_SPL_BUILD
375 	char mtd_root_part_info[40] = {0};
376 
377 	p = part_get_info_by_name(dev_desc, PART_SYSTEM, &info);
378 	if (p > 0) {
379 		if (strstr(env_get("bootargs"), "rootfstype=squashfs"))
380 			snprintf(mtd_root_part_info, ARRAY_SIZE(mtd_root_part_info), "%s%d %s",
381 				 MTD_ROOT_PART_NUM, p - 1, MTD_ROOT_PART_NAME_SQUASHFS);
382 		else
383 			snprintf(mtd_root_part_info, ARRAY_SIZE(mtd_root_part_info), "%s%d %s",
384 				 MTD_ROOT_PART_NUM, p - 1, MTD_ROOT_PART_NAME_UBIFS);
385 		env_update("bootargs", mtd_root_part_info);
386 	}
387 #endif
388 	mtd_part_info = (char *)calloc(MTD_PART_INFO_MAX_SIZE, sizeof(char));
389 	if (!mtd_part_info) {
390 		printf("%s: Fail to malloc!", __func__);
391 		return NULL;
392 	}
393 
394 	mtd_part_info_p = mtd_part_info;
395 	snprintf(mtd_part_info_p, data_len - 1, "%s%s:",
396 		 MTD_PART_NAND_HEAD,
397 		 dev_desc->product);
398 	data_len -= strlen(mtd_part_info_p);
399 	mtd_part_info_p = mtd_part_info_p + strlen(mtd_part_info_p);
400 
401 	for (p = 1; p < MAX_SEARCH_PARTITIONS; p++) {
402 		ret = part_get_info(dev_desc, p, &info);
403 		if (ret)
404 			break;
405 
406 		debug("name is %s, start addr is %x\n", info.name,
407 		      (int)(size_t)info.start);
408 
409 		snprintf(mtd_part_info_p, data_len - 1, "0x%x@0x%x(%s)",
410 			 (int)(size_t)info.size << 9,
411 			 (int)(size_t)info.start << 9,
412 			 info.name);
413 		snprintf(mtd_part_info_temp, MTD_SINGLE_PART_INFO_MAX_SIZE - 1,
414 			 "0x%x@0x%x(%s)",
415 			 (int)(size_t)info.size << 9,
416 			 (int)(size_t)info.start << 9,
417 			 info.name);
418 		strcat(mtd_part_info, ",");
419 		if (part_get_info(dev_desc, p + 1, &info)) {
420 			/* Partition with grow tag in parameter will be resized */
421 			if ((info.size + info.start + 64) >= dev_desc->lba) {
422 				if (dev_desc->devnum == BLK_MTD_SPI_NOR) {
423 					/* Nor is 64KB erase block(kernel) and gpt table just
424 					 * resserve 33 sectors for the last partition. This
425 					 * will erase the backup gpt table by user program,
426 					 * so reserve one block.
427 					 */
428 					snprintf(mtd_part_info_p, data_len - 1, "0x%x@0x%x(%s)",
429 						 (int)(size_t)(info.size -
430 						 (info.size - 1) %
431 						 (0x10000 >> 9) - 1) << 9,
432 						 (int)(size_t)info.start << 9,
433 						 info.name);
434 					break;
435 				} else {
436 					/* Nand flash is erased by block and gpt table just
437 					 * resserve 33 sectors for the last partition. This
438 					 * will erase the backup gpt table by user program,
439 					 * so reserve one block.
440 					 */
441 					snprintf(mtd_part_info_p, data_len - 1, "0x%x@0x%x(%s)",
442 						 (int)(size_t)(info.size -
443 						 (info.size - 1) %
444 						 (mtd->erasesize >> 9) - 1) << 9,
445 						 (int)(size_t)info.start << 9,
446 						 info.name);
447 					break;
448 				}
449 			} else {
450 				snprintf(mtd_part_info_temp, MTD_SINGLE_PART_INFO_MAX_SIZE - 1,
451 					 "0x%x@0x%x(%s)",
452 					 (int)(size_t)info.size << 9,
453 					 (int)(size_t)info.start << 9,
454 					 info.name);
455 				break;
456 			}
457 		}
458 		length = strlen(mtd_part_info_temp);
459 		data_len -= length;
460 		mtd_part_info_p = mtd_part_info_p + length + 1;
461 		memset(mtd_part_info_temp, 0, MTD_SINGLE_PART_INFO_MAX_SIZE);
462 	}
463 
464 	return mtd_part_info;
465 }
466 
467 ulong mtd_dread(struct udevice *udev, lbaint_t start,
468 		lbaint_t blkcnt, void *dst)
469 {
470 	struct blk_desc *desc = dev_get_uclass_platdata(udev);
471 #if defined(CONFIG_NAND) || defined(CONFIG_MTD_SPI_NAND) || defined(CONFIG_SPI_FLASH_MTD)
472 	loff_t off = (loff_t)(start * 512);
473 	size_t rwsize = blkcnt * 512;
474 #endif
475 	struct mtd_info *mtd;
476 	int ret = 0;
477 
478 	if (!desc)
479 		return ret;
480 
481 	mtd = desc->bdev->priv;
482 	if (!mtd)
483 		return 0;
484 
485 	if (blkcnt == 0)
486 		return 0;
487 
488 	pr_debug("mtd dread %s %lx %lx\n", mtd->name, start, blkcnt);
489 
490 	if (desc->devnum == BLK_MTD_NAND) {
491 		ret = mtd_map_read(mtd, off, &rwsize,
492 				   NULL, mtd->size,
493 				   (u_char *)(dst));
494 		if (!ret)
495 			return blkcnt;
496 		else
497 			return 0;
498 	} else if (desc->devnum == BLK_MTD_SPI_NAND) {
499 		ret = mtd_map_read(mtd, off, &rwsize,
500 				   NULL, mtd->size,
501 				   (u_char *)(dst));
502 		if (!ret)
503 			return blkcnt;
504 		else
505 			return 0;
506 	} else if (desc->devnum == BLK_MTD_SPI_NOR) {
507 #if defined(CONFIG_SPI_FLASH_MTD) || defined(CONFIG_SPL_BUILD)
508 		struct spi_nor *nor = (struct spi_nor *)mtd->priv;
509 		struct spi_slave *spi = nor->spi;
510 		size_t retlen_nor;
511 
512 		if (desc->op_flag == BLK_PRE_RW)
513 			spi->mode |= SPI_DMA_PREPARE;
514 		mtd_read(mtd, off, rwsize, &retlen_nor, dst);
515 		if (desc->op_flag == BLK_PRE_RW)
516 			spi->mode &= ~SPI_DMA_PREPARE;
517 
518 		if (retlen_nor == rwsize)
519 			return blkcnt;
520 		else
521 #endif
522 			return 0;
523 	} else {
524 		return 0;
525 	}
526 }
527 
528 #if CONFIG_IS_ENABLED(MTD_WRITE)
529 ulong mtd_dwrite(struct udevice *udev, lbaint_t start,
530 		 lbaint_t blkcnt, const void *src)
531 {
532 	struct blk_desc *desc = dev_get_uclass_platdata(udev);
533 #if defined(CONFIG_NAND) || defined(CONFIG_MTD_SPI_NAND) || defined(CONFIG_SPI_FLASH_MTD)
534 	loff_t off = (loff_t)(start * 512);
535 	size_t rwsize = blkcnt * 512;
536 #endif
537 	struct mtd_info *mtd;
538 	int ret = 0;
539 
540 	if (!desc)
541 		return ret;
542 
543 	mtd = desc->bdev->priv;
544 	if (!mtd)
545 		return 0;
546 
547 	pr_debug("mtd dwrite %s %lx %lx\n", mtd->name, start, blkcnt);
548 
549 	if (blkcnt == 0)
550 		return 0;
551 
552 	if (desc->devnum == BLK_MTD_NAND ||
553 	    desc->devnum == BLK_MTD_SPI_NAND ||
554 	    desc->devnum == BLK_MTD_SPI_NOR) {
555 		if (desc->op_flag == BLK_MTD_CONT_WRITE) {
556 			ret = mtd_map_write(mtd, off, &rwsize,
557 					    NULL, mtd->size,
558 					    (u_char *)(src), 0);
559 			if (!ret)
560 				return blkcnt;
561 			else
562 				return 0;
563 		} else {
564 			lbaint_t off_aligned, alinged;
565 			size_t rwsize_aligned;
566 			u8 *p_buf;
567 
568 			alinged = off & mtd->erasesize_mask;
569 			off_aligned = off - alinged;
570 			rwsize_aligned = rwsize + alinged;
571 			rwsize_aligned = (rwsize_aligned + mtd->erasesize - 1) &
572 				~(mtd->erasesize - 1);
573 
574 			p_buf = malloc(rwsize_aligned);
575 			if (!p_buf) {
576 				printf("%s: Fail to malloc!", __func__);
577 				return 0;
578 			}
579 
580 			ret = mtd_map_read(mtd, off_aligned, &rwsize_aligned,
581 					   NULL, mtd->size,
582 					   (u_char *)(p_buf));
583 			if (ret) {
584 				free(p_buf);
585 				return 0;
586 			}
587 
588 			memcpy(p_buf + alinged, src, rwsize);
589 
590 			ret = mtd_map_write(mtd, off_aligned, &rwsize_aligned,
591 					    NULL, mtd->size,
592 					    (u_char *)(p_buf), 0);
593 			free(p_buf);
594 			if (!ret)
595 				return blkcnt;
596 			else
597 				return 0;
598 		}
599 	} else {
600 		return 0;
601 	}
602 
603 	return 0;
604 }
605 
606 ulong mtd_derase(struct udevice *udev, lbaint_t start,
607 		 lbaint_t blkcnt)
608 {
609 	struct blk_desc *desc = dev_get_uclass_platdata(udev);
610 #if defined(CONFIG_NAND) || defined(CONFIG_MTD_SPI_NAND) || defined(CONFIG_SPI_FLASH_MTD)
611 	loff_t off = (loff_t)(start * 512);
612 	size_t len = blkcnt * 512;
613 #endif
614 	struct mtd_info *mtd;
615 	int ret = 0;
616 
617 	if (!desc)
618 		return ret;
619 
620 	mtd = desc->bdev->priv;
621 	if (!mtd)
622 		return 0;
623 
624 	pr_debug("mtd derase %s %lx %lx\n", mtd->name, start, blkcnt);
625 
626 	if (blkcnt == 0)
627 		return 0;
628 
629 	if (desc->devnum == BLK_MTD_NAND ||
630 	    desc->devnum == BLK_MTD_SPI_NAND ||
631 	    desc->devnum == BLK_MTD_SPI_NOR) {
632 		ret = mtd_map_erase(mtd, off, len);
633 		if (ret)
634 			return ret;
635 	} else {
636 		return 0;
637 	}
638 
639 	return blkcnt;
640 }
641 #endif
642 
643 static int mtd_blk_probe(struct udevice *udev)
644 {
645 	struct mtd_info *mtd;
646 	struct blk_desc *desc = dev_get_uclass_platdata(udev);
647 	int ret, i = 0;
648 
649 	mtd = dev_get_uclass_priv(udev->parent);
650 	if (mtd->type == MTD_NANDFLASH && desc->devnum == BLK_MTD_NAND) {
651 #ifndef CONFIG_SPL_BUILD
652 		mtd = dev_get_priv(udev->parent);
653 #endif
654 	}
655 
656 	/* Fill mtd devices information */
657 	if (is_power_of_2(mtd->erasesize))
658 		mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
659 	else
660 		mtd->erasesize_shift = 0;
661 
662 	if (is_power_of_2(mtd->writesize))
663 		mtd->writesize_shift = ffs(mtd->writesize) - 1;
664 	else
665 		mtd->writesize_shift = 0;
666 
667 	mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
668 	mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
669 
670 	desc->bdev->priv = mtd;
671 	sprintf(desc->vendor, "0x%.4x", 0x2207);
672 	if (strncmp(mtd->name, "nand", 4) == 0)
673 		memcpy(desc->product, "rk-nand", strlen("rk-nand"));
674 	else
675 		memcpy(desc->product, mtd->name, strlen(mtd->name));
676 	memcpy(desc->revision, "V1.00", sizeof("V1.00"));
677 	if (mtd->type == MTD_NANDFLASH) {
678 #ifdef CONFIG_NAND
679 		if (desc->devnum == BLK_MTD_NAND)
680 			i = NAND_BBT_SCAN_MAXBLOCKS;
681 		else if (desc->devnum == BLK_MTD_SPI_NAND)
682 			i = NANDDEV_BBT_SCAN_MAXBLOCKS;
683 #endif
684 
685 		/*
686 		 * Find the first useful block in the end,
687 		 * and it is the end lba of the nand storage.
688 		 */
689 		for (; i < (mtd->size / mtd->erasesize); i++) {
690 			ret =  mtd_block_isbad(mtd,
691 					       mtd->size - mtd->erasesize * (i + 1));
692 			if (!ret) {
693 				desc->lba = (mtd->size >> 9) -
694 					(mtd->erasesize >> 9) * i;
695 				break;
696 			}
697 		}
698 	} else {
699 		desc->lba = mtd->size >> 9;
700 	}
701 
702 	debug("MTD: desc->lba is %lx\n", desc->lba);
703 
704 	return 0;
705 }
706 
707 static const struct blk_ops mtd_blk_ops = {
708 	.read	= mtd_dread,
709 #if CONFIG_IS_ENABLED(MTD_WRITE)
710 	.write	= mtd_dwrite,
711 	.erase	= mtd_derase,
712 #endif
713 };
714 
715 U_BOOT_DRIVER(mtd_blk) = {
716 	.name		= "mtd_blk",
717 	.id		= UCLASS_BLK,
718 	.ops		= &mtd_blk_ops,
719 	.probe		= mtd_blk_probe,
720 };
721