xref: /OK3568_Linux_fs/u-boot/drivers/mtd/mtd_blk.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /*
2  * (C) Copyright 2019 Rockchip Electronics Co., Ltd
3  *
4  * SPDX-License-Identifier:	GPL-2.0+
5  */
6 
7 #include <common.h>
8 #include <blk.h>
9 #include <boot_rkimg.h>
10 #include <dm.h>
11 #include <errno.h>
12 #include <image.h>
13 #include <linux/log2.h>
14 #include <malloc.h>
15 #include <nand.h>
16 #include <part.h>
17 #include <spi.h>
18 #include <dm/device-internal.h>
19 #include <linux/mtd/spi-nor.h>
20 #ifdef CONFIG_NAND
21 #include <linux/mtd/nand.h>
22 #endif
23 
24 #define MTD_PART_NAND_HEAD		"mtdparts="
25 #define MTD_PART_INFO_MAX_SIZE		512
26 #define MTD_SINGLE_PART_INFO_MAX_SIZE	40
27 
28 #define MTD_BLK_TABLE_BLOCK_UNKNOWN	(-2)
29 #define MTD_BLK_TABLE_BLOCK_SHIFT	(-1)
30 
31 static int *mtd_map_blk_table;
32 
mtd_blk_map_table_init(struct blk_desc * desc,loff_t offset,size_t length)33 int mtd_blk_map_table_init(struct blk_desc *desc,
34 			   loff_t offset,
35 			   size_t length)
36 {
37 	u32 blk_total, blk_begin, blk_cnt;
38 	struct mtd_info *mtd = NULL;
39 	int i, j;
40 
41 	if (!desc)
42 		return -ENODEV;
43 
44 	switch (desc->devnum) {
45 	case BLK_MTD_NAND:
46 	case BLK_MTD_SPI_NAND:
47 		mtd = desc->bdev->priv;
48 		break;
49 	default:
50 		break;
51 	}
52 
53 	if (!mtd) {
54 		return -ENODEV;
55 	} else {
56 		blk_total = (mtd->size + mtd->erasesize - 1) >> mtd->erasesize_shift;
57 		if (!mtd_map_blk_table) {
58 			mtd_map_blk_table = (int *)malloc(blk_total * sizeof(int));
59 			if (!mtd_map_blk_table)
60 				return -ENOMEM;
61 			for (i = 0; i < blk_total; i++)
62 				mtd_map_blk_table[i] = MTD_BLK_TABLE_BLOCK_UNKNOWN;
63 		}
64 
65 		blk_begin = (u32)offset >> mtd->erasesize_shift;
66 		blk_cnt = ((u32)((offset & mtd->erasesize_mask) + length + \
67 			mtd->erasesize - 1) >> mtd->erasesize_shift);
68 		if (blk_begin >= blk_total) {
69 			pr_err("map table blk begin[%d] overflow\n", blk_begin);
70 			return -EINVAL;
71 		}
72 		if ((blk_begin + blk_cnt) > blk_total)
73 			blk_cnt = blk_total - blk_begin;
74 
75 		if (mtd_map_blk_table[blk_begin] != MTD_BLK_TABLE_BLOCK_UNKNOWN)
76 			return 0;
77 
78 		j = 0;
79 		 /* should not across blk_cnt */
80 		for (i = 0; i < blk_cnt; i++) {
81 			if (j >= blk_cnt)
82 				mtd_map_blk_table[blk_begin + i] = MTD_BLK_TABLE_BLOCK_SHIFT;
83 			for (; j < blk_cnt; j++) {
84 				if (!mtd_block_isbad(mtd, (blk_begin + j) << mtd->erasesize_shift)) {
85 					mtd_map_blk_table[blk_begin + i] = blk_begin + j;
86 					j++;
87 					if (j == blk_cnt)
88 						j++;
89 					break;
90 				}
91 			}
92 		}
93 
94 		return 0;
95 	}
96 }
97 
get_mtd_blk_map_address(struct mtd_info * mtd,loff_t * off)98 static bool get_mtd_blk_map_address(struct mtd_info *mtd, loff_t *off)
99 {
100 	bool mapped;
101 	loff_t offset = *off;
102 	size_t block_offset = offset & (mtd->erasesize - 1);
103 
104 	mapped = false;
105 	if (!mtd_map_blk_table ||
106 	    mtd_map_blk_table[(u64)offset >> mtd->erasesize_shift] ==
107 	    MTD_BLK_TABLE_BLOCK_UNKNOWN ||
108 	    mtd_map_blk_table[(u64)offset >> mtd->erasesize_shift] ==
109 	    0xffffffff)
110 		return mapped;
111 
112 	mapped = true;
113 	*off = (loff_t)(((u32)mtd_map_blk_table[(u64)offset >>
114 		mtd->erasesize_shift] << mtd->erasesize_shift) + block_offset);
115 
116 	return mapped;
117 }
118 
mtd_blk_map_partitions(struct blk_desc * desc)119 void mtd_blk_map_partitions(struct blk_desc *desc)
120 {
121 	disk_partition_t info;
122 	int i, ret;
123 
124 	if (!desc)
125 		return;
126 
127 	if (desc->if_type != IF_TYPE_MTD)
128 		return;
129 
130 	for (i = 1; i < MAX_SEARCH_PARTITIONS; i++) {
131 		ret = part_get_info(desc, i, &info);
132 		if (ret != 0)
133 			break;
134 
135 		if (mtd_blk_map_table_init(desc,
136 					   info.start << 9,
137 					   info.size << 9)) {
138 			pr_debug("mtd block map table fail\n");
139 		}
140 	}
141 }
142 
mtd_blk_map_fit(struct blk_desc * desc,ulong sector,void * fit)143 void mtd_blk_map_fit(struct blk_desc *desc, ulong sector, void *fit)
144 {
145 	struct mtd_info *mtd = NULL;
146 	int totalsize = 0;
147 
148 	if (desc->if_type != IF_TYPE_MTD)
149 		return;
150 
151 	if (desc->devnum == BLK_MTD_NAND) {
152 #if defined(CONFIG_NAND)
153 		mtd = dev_get_priv(desc->bdev->parent);
154 #endif
155 	} else if (desc->devnum == BLK_MTD_SPI_NAND) {
156 #if defined(CONFIG_MTD_SPI_NAND)
157 		mtd = desc->bdev->priv;
158 #endif
159 	}
160 
161 #ifdef CONFIG_SPL_FIT
162 	if (fit_get_totalsize(fit, &totalsize))
163 		debug("Can not find /totalsize node.\n");
164 #endif
165 	if (mtd && totalsize) {
166 		if (mtd_blk_map_table_init(desc, sector << 9, totalsize + (size_t)mtd->erasesize))
167 			debug("Map block table fail.\n");
168 	}
169 }
170 
mtd_map_read(struct mtd_info * mtd,loff_t offset,size_t * length,size_t * actual,loff_t lim,u_char * buffer)171 static __maybe_unused int mtd_map_read(struct mtd_info *mtd, loff_t offset,
172 				       size_t *length, size_t *actual,
173 				       loff_t lim, u_char *buffer)
174 {
175 	size_t left_to_read = *length;
176 	u_char *p_buffer = buffer;
177 	int rval;
178 
179 	while (left_to_read > 0) {
180 		size_t block_offset = offset & (mtd->erasesize - 1);
181 		size_t read_length;
182 		loff_t mapped_offset;
183 
184 		if (offset >= mtd->size)
185 			return 0;
186 
187 		mapped_offset = offset;
188 		if (!get_mtd_blk_map_address(mtd, &mapped_offset)) {
189 			if (mtd_block_isbad(mtd, mapped_offset &
190 					    ~(mtd->erasesize - 1))) {
191 				printf("Skipping bad block 0x%08llx\n",
192 				       offset & ~(mtd->erasesize - 1));
193 				offset += mtd->erasesize - block_offset;
194 				continue;
195 			}
196 		}
197 
198 		if (left_to_read < (mtd->erasesize - block_offset))
199 			read_length = left_to_read;
200 		else
201 			read_length = mtd->erasesize - block_offset;
202 
203 		rval = mtd_read(mtd, mapped_offset, read_length, &read_length,
204 				p_buffer);
205 		if (rval && rval != -EUCLEAN) {
206 			printf("NAND read from offset %llx failed %d\n",
207 			       offset, rval);
208 			*length -= left_to_read;
209 			return rval;
210 		}
211 
212 		left_to_read -= read_length;
213 		offset       += read_length;
214 		p_buffer     += read_length;
215 	}
216 
217 	return 0;
218 }
219 
mtd_map_write(struct mtd_info * mtd,loff_t offset,size_t * length,size_t * actual,loff_t lim,u_char * buffer,int flags)220 static __maybe_unused int mtd_map_write(struct mtd_info *mtd, loff_t offset,
221 					size_t *length, size_t *actual,
222 					loff_t lim, u_char *buffer, int flags)
223 {
224 	int rval = 0, blocksize;
225 	size_t left_to_write = *length;
226 	u_char *p_buffer = buffer;
227 	struct erase_info ei;
228 
229 	blocksize = mtd->erasesize;
230 
231 	/*
232 	 * nand_write() handles unaligned, partial page writes.
233 	 *
234 	 * We allow length to be unaligned, for convenience in
235 	 * using the $filesize variable.
236 	 *
237 	 * However, starting at an unaligned offset makes the
238 	 * semantics of bad block skipping ambiguous (really,
239 	 * you should only start a block skipping access at a
240 	 * partition boundary).  So don't try to handle that.
241 	 */
242 	if ((offset & (mtd->writesize - 1)) != 0) {
243 		printf("Attempt to write non page-aligned data\n");
244 		*length = 0;
245 		return -EINVAL;
246 	}
247 
248 	while (left_to_write > 0) {
249 		size_t block_offset = offset & (mtd->erasesize - 1);
250 		size_t write_size, truncated_write_size;
251 		loff_t mapped_offset;
252 
253 		if (offset >= mtd->size)
254 			return 0;
255 
256 		mapped_offset = offset;
257 		if (!get_mtd_blk_map_address(mtd, &mapped_offset)) {
258 			if (mtd_block_isbad(mtd, mapped_offset &
259 					    ~(mtd->erasesize - 1))) {
260 				printf("Skipping bad block 0x%08llx\n",
261 				       offset & ~(mtd->erasesize - 1));
262 				offset += mtd->erasesize - block_offset;
263 				continue;
264 			}
265 		}
266 
267 		if (!(mapped_offset & mtd->erasesize_mask)) {
268 			memset(&ei, 0, sizeof(struct erase_info));
269 			ei.addr = mapped_offset;
270 			ei.len  = mtd->erasesize;
271 			rval = mtd_erase(mtd, &ei);
272 			if (rval) {
273 				pr_info("error %d while erasing %llx\n", rval,
274 					mapped_offset);
275 				return rval;
276 			}
277 		}
278 
279 		if (left_to_write < (blocksize - block_offset))
280 			write_size = left_to_write;
281 		else
282 			write_size = blocksize - block_offset;
283 
284 		truncated_write_size = write_size;
285 		rval = mtd_write(mtd, mapped_offset, truncated_write_size,
286 				 (size_t *)(&truncated_write_size), p_buffer);
287 
288 		offset += write_size;
289 		p_buffer += write_size;
290 
291 		if (rval != 0) {
292 			printf("NAND write to offset %llx failed %d\n",
293 			       offset, rval);
294 			*length -= left_to_write;
295 			return rval;
296 		}
297 
298 		left_to_write -= write_size;
299 	}
300 
301 	return 0;
302 }
303 
mtd_map_erase(struct mtd_info * mtd,loff_t offset,size_t length)304 static __maybe_unused int mtd_map_erase(struct mtd_info *mtd, loff_t offset,
305 					size_t length)
306 {
307 	struct erase_info ei;
308 	loff_t pos, len;
309 	int ret;
310 
311 	pos = offset;
312 	len = length;
313 
314 	if ((pos & mtd->erasesize_mask) || (len & mtd->erasesize_mask)) {
315 		pr_err("Attempt to erase non block-aligned data, pos= %llx, len= %llx\n",
316 		       pos, len);
317 
318 		return -EINVAL;
319 	}
320 
321 	while (len) {
322 		loff_t mapped_offset;
323 
324 		mapped_offset = pos;
325 		if (!get_mtd_blk_map_address(mtd, &mapped_offset)) {
326 			if (mtd_block_isbad(mtd, pos) || mtd_block_isreserved(mtd, pos)) {
327 				pr_debug("attempt to erase a bad/reserved block @%llx\n",
328 					 pos);
329 				pos += mtd->erasesize;
330 				continue;
331 			}
332 		}
333 
334 		memset(&ei, 0, sizeof(struct erase_info));
335 		ei.addr = mapped_offset;
336 		ei.len  = mtd->erasesize;
337 		ret = mtd_erase(mtd, &ei);
338 		if (ret) {
339 			pr_err("map_erase error %d while erasing %llx\n", ret,
340 			       pos);
341 			return ret;
342 		}
343 
344 		pos += mtd->erasesize;
345 		len -= mtd->erasesize;
346 	}
347 
348 	return 0;
349 }
350 
mtd_part_parse(struct blk_desc * dev_desc)351 char *mtd_part_parse(struct blk_desc *dev_desc)
352 {
353 	char mtd_part_info_temp[MTD_SINGLE_PART_INFO_MAX_SIZE] = {0};
354 	u32 length, data_len = MTD_PART_INFO_MAX_SIZE;
355 	disk_partition_t info;
356 	char *mtd_part_info_p;
357 	struct mtd_info *mtd;
358 	char *mtd_part_info;
359 	int ret;
360 	int p;
361 
362 #ifndef CONFIG_SPL_BUILD
363 	dev_desc = rockchip_get_bootdev();
364 #endif
365 	if (!dev_desc)
366 		return NULL;
367 
368 	mtd = (struct mtd_info *)dev_desc->bdev->priv;
369 	if (!mtd)
370 		return NULL;
371 
372 	mtd_part_info = (char *)calloc(MTD_PART_INFO_MAX_SIZE, sizeof(char));
373 	if (!mtd_part_info) {
374 		printf("%s: Fail to malloc!", __func__);
375 		return NULL;
376 	}
377 
378 	mtd_part_info_p = mtd_part_info;
379 	snprintf(mtd_part_info_p, data_len - 1, "%s%s:",
380 		 MTD_PART_NAND_HEAD,
381 		 dev_desc->product);
382 	data_len -= strlen(mtd_part_info_p);
383 	mtd_part_info_p = mtd_part_info_p + strlen(mtd_part_info_p);
384 
385 	for (p = 1; p < MAX_SEARCH_PARTITIONS; p++) {
386 		ret = part_get_info(dev_desc, p, &info);
387 		if (ret)
388 			break;
389 
390 		debug("name is %s, start addr is %x\n", info.name,
391 		      (int)(size_t)info.start);
392 
393 		snprintf(mtd_part_info_p, data_len - 1, "0x%x@0x%x(%s)",
394 			 (int)(size_t)info.size << 9,
395 			 (int)(size_t)info.start << 9,
396 			 info.name);
397 		snprintf(mtd_part_info_temp, MTD_SINGLE_PART_INFO_MAX_SIZE - 1,
398 			 "0x%x@0x%x(%s)",
399 			 (int)(size_t)info.size << 9,
400 			 (int)(size_t)info.start << 9,
401 			 info.name);
402 		strcat(mtd_part_info, ",");
403 		if (part_get_info(dev_desc, p + 1, &info)) {
404 			/* Partition with grow tag in parameter will be resized */
405 			if ((info.size + info.start + 64) >= dev_desc->lba) {
406 				if (dev_desc->devnum == BLK_MTD_SPI_NOR) {
407 					/* Nor is 64KB erase block(kernel) and gpt table just
408 					 * resserve 33 sectors for the last partition. This
409 					 * will erase the backup gpt table by user program,
410 					 * so reserve one block.
411 					 */
412 					snprintf(mtd_part_info_p, data_len - 1, "0x%x@0x%x(%s)",
413 						 (int)(size_t)(info.size -
414 						 (info.size - 1) %
415 						 (0x10000 >> 9) - 1) << 9,
416 						 (int)(size_t)info.start << 9,
417 						 info.name);
418 					break;
419 				} else {
420 					/* Nand flash is erased by block and gpt table just
421 					 * resserve 33 sectors for the last partition. This
422 					 * will erase the backup gpt table by user program,
423 					 * so reserve one block.
424 					 */
425 					snprintf(mtd_part_info_p, data_len - 1, "0x%x@0x%x(%s)",
426 						 (int)(size_t)(info.size -
427 						 (info.size - 1) %
428 						 (mtd->erasesize >> 9) - 1) << 9,
429 						 (int)(size_t)info.start << 9,
430 						 info.name);
431 					break;
432 				}
433 			} else {
434 				snprintf(mtd_part_info_temp, MTD_SINGLE_PART_INFO_MAX_SIZE - 1,
435 					 "0x%x@0x%x(%s)",
436 					 (int)(size_t)info.size << 9,
437 					 (int)(size_t)info.start << 9,
438 					 info.name);
439 				break;
440 			}
441 		}
442 		length = strlen(mtd_part_info_temp);
443 		data_len -= length;
444 		mtd_part_info_p = mtd_part_info_p + length + 1;
445 		memset(mtd_part_info_temp, 0, MTD_SINGLE_PART_INFO_MAX_SIZE);
446 	}
447 
448 	return mtd_part_info;
449 }
450 
mtd_dread(struct udevice * udev,lbaint_t start,lbaint_t blkcnt,void * dst)451 ulong mtd_dread(struct udevice *udev, lbaint_t start,
452 		lbaint_t blkcnt, void *dst)
453 {
454 	struct blk_desc *desc = dev_get_uclass_platdata(udev);
455 #if defined(CONFIG_NAND) || defined(CONFIG_MTD_SPI_NAND) || defined(CONFIG_SPI_FLASH_MTD)
456 	loff_t off = (loff_t)(start * 512);
457 	size_t rwsize = blkcnt * 512;
458 #endif
459 	struct mtd_info *mtd;
460 	int ret = 0;
461 
462 	if (!desc)
463 		return ret;
464 
465 	mtd = desc->bdev->priv;
466 	if (!mtd)
467 		return 0;
468 
469 	if (blkcnt == 0)
470 		return 0;
471 
472 	pr_debug("mtd dread %s %lx %lx\n", mtd->name, start, blkcnt);
473 
474 	if (desc->devnum == BLK_MTD_NAND) {
475 		ret = mtd_map_read(mtd, off, &rwsize,
476 				   NULL, mtd->size,
477 				   (u_char *)(dst));
478 		if (!ret)
479 			return blkcnt;
480 		else
481 			return 0;
482 	} else if (desc->devnum == BLK_MTD_SPI_NAND) {
483 		ret = mtd_map_read(mtd, off, &rwsize,
484 				   NULL, mtd->size,
485 				   (u_char *)(dst));
486 		if (!ret)
487 			return blkcnt;
488 		else
489 			return 0;
490 	} else if (desc->devnum == BLK_MTD_SPI_NOR) {
491 #if defined(CONFIG_SPI_FLASH_MTD) || defined(CONFIG_SPL_BUILD)
492 		struct spi_nor *nor = (struct spi_nor *)mtd->priv;
493 		struct spi_slave *spi = nor->spi;
494 		size_t retlen_nor;
495 
496 		if (desc->op_flag == BLK_PRE_RW)
497 			spi->mode |= SPI_DMA_PREPARE;
498 		mtd_read(mtd, off, rwsize, &retlen_nor, dst);
499 		if (desc->op_flag == BLK_PRE_RW)
500 			spi->mode &= ~SPI_DMA_PREPARE;
501 
502 		if (retlen_nor == rwsize)
503 			return blkcnt;
504 		else
505 #endif
506 			return 0;
507 	} else {
508 		return 0;
509 	}
510 }
511 
512 #if CONFIG_IS_ENABLED(MTD_WRITE)
mtd_dwrite(struct udevice * udev,lbaint_t start,lbaint_t blkcnt,const void * src)513 ulong mtd_dwrite(struct udevice *udev, lbaint_t start,
514 		 lbaint_t blkcnt, const void *src)
515 {
516 	struct blk_desc *desc = dev_get_uclass_platdata(udev);
517 #if defined(CONFIG_NAND) || defined(CONFIG_MTD_SPI_NAND) || defined(CONFIG_SPI_FLASH_MTD)
518 	loff_t off = (loff_t)(start * 512);
519 	size_t rwsize = blkcnt * 512;
520 #endif
521 	struct mtd_info *mtd;
522 	int ret = 0;
523 
524 	if (!desc)
525 		return ret;
526 
527 	mtd = desc->bdev->priv;
528 	if (!mtd)
529 		return 0;
530 
531 	pr_debug("mtd dwrite %s %lx %lx\n", mtd->name, start, blkcnt);
532 
533 	if (blkcnt == 0)
534 		return 0;
535 
536 	if (desc->devnum == BLK_MTD_NAND ||
537 	    desc->devnum == BLK_MTD_SPI_NAND ||
538 	    desc->devnum == BLK_MTD_SPI_NOR) {
539 		if (desc->op_flag == BLK_MTD_CONT_WRITE) {
540 			ret = mtd_map_write(mtd, off, &rwsize,
541 					    NULL, mtd->size,
542 					    (u_char *)(src), 0);
543 			if (!ret)
544 				return blkcnt;
545 			else
546 				return 0;
547 		} else {
548 			lbaint_t off_aligned, alinged;
549 			size_t rwsize_aligned;
550 			u8 *p_buf;
551 
552 			alinged = off & mtd->erasesize_mask;
553 			off_aligned = off - alinged;
554 			rwsize_aligned = rwsize + alinged;
555 			rwsize_aligned = (rwsize_aligned + mtd->erasesize - 1) &
556 				~(mtd->erasesize - 1);
557 
558 			p_buf = malloc(rwsize_aligned);
559 			if (!p_buf) {
560 				printf("%s: Fail to malloc!", __func__);
561 				return 0;
562 			}
563 
564 			ret = mtd_map_read(mtd, off_aligned, &rwsize_aligned,
565 					   NULL, mtd->size,
566 					   (u_char *)(p_buf));
567 			if (ret) {
568 				free(p_buf);
569 				return 0;
570 			}
571 
572 			memcpy(p_buf + alinged, src, rwsize);
573 
574 			ret = mtd_map_write(mtd, off_aligned, &rwsize_aligned,
575 					    NULL, mtd->size,
576 					    (u_char *)(p_buf), 0);
577 			free(p_buf);
578 			if (!ret)
579 				return blkcnt;
580 			else
581 				return 0;
582 		}
583 	} else {
584 		return 0;
585 	}
586 
587 	return 0;
588 }
589 
mtd_derase(struct udevice * udev,lbaint_t start,lbaint_t blkcnt)590 ulong mtd_derase(struct udevice *udev, lbaint_t start,
591 		 lbaint_t blkcnt)
592 {
593 	struct blk_desc *desc = dev_get_uclass_platdata(udev);
594 #if defined(CONFIG_NAND) || defined(CONFIG_MTD_SPI_NAND) || defined(CONFIG_SPI_FLASH_MTD)
595 	loff_t off = (loff_t)(start * 512);
596 	size_t len = blkcnt * 512;
597 #endif
598 	struct mtd_info *mtd;
599 	int ret = 0;
600 
601 	if (!desc)
602 		return ret;
603 
604 	mtd = desc->bdev->priv;
605 	if (!mtd)
606 		return 0;
607 
608 	pr_debug("mtd derase %s %lx %lx\n", mtd->name, start, blkcnt);
609 
610 	if (blkcnt == 0)
611 		return 0;
612 
613 	if (desc->devnum == BLK_MTD_NAND ||
614 	    desc->devnum == BLK_MTD_SPI_NAND ||
615 	    desc->devnum == BLK_MTD_SPI_NOR) {
616 		ret = mtd_map_erase(mtd, off, len);
617 		if (ret)
618 			return ret;
619 	} else {
620 		return 0;
621 	}
622 
623 	return blkcnt;
624 }
625 #endif
626 
mtd_blk_probe(struct udevice * udev)627 static int mtd_blk_probe(struct udevice *udev)
628 {
629 	struct mtd_info *mtd;
630 	struct blk_desc *desc = dev_get_uclass_platdata(udev);
631 	int ret, i = 0;
632 
633 	mtd = dev_get_uclass_priv(udev->parent);
634 	if (mtd->type == MTD_NANDFLASH && desc->devnum == BLK_MTD_NAND) {
635 #ifndef CONFIG_SPL_BUILD
636 		mtd = dev_get_priv(udev->parent);
637 #endif
638 	}
639 
640 	/* Fill mtd devices information */
641 	if (is_power_of_2(mtd->erasesize))
642 		mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
643 	else
644 		mtd->erasesize_shift = 0;
645 
646 	if (is_power_of_2(mtd->writesize))
647 		mtd->writesize_shift = ffs(mtd->writesize) - 1;
648 	else
649 		mtd->writesize_shift = 0;
650 
651 	mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
652 	mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
653 
654 	desc->bdev->priv = mtd;
655 	sprintf(desc->vendor, "0x%.4x", 0x2207);
656 	if (strncmp(mtd->name, "nand", 4) == 0)
657 		memcpy(desc->product, "rk-nand", strlen("rk-nand"));
658 	else
659 		memcpy(desc->product, mtd->name, strlen(mtd->name));
660 	memcpy(desc->revision, "V1.00", sizeof("V1.00"));
661 	if (mtd->type == MTD_NANDFLASH) {
662 #ifdef CONFIG_NAND
663 		if (desc->devnum == BLK_MTD_NAND)
664 			i = NAND_BBT_SCAN_MAXBLOCKS;
665 		else if (desc->devnum == BLK_MTD_SPI_NAND)
666 			i = NANDDEV_BBT_SCAN_MAXBLOCKS;
667 #endif
668 
669 		/*
670 		 * Find the first useful block in the end,
671 		 * and it is the end lba of the nand storage.
672 		 */
673 		for (; i < (mtd->size / mtd->erasesize); i++) {
674 			ret =  mtd_block_isbad(mtd,
675 					       mtd->size - mtd->erasesize * (i + 1));
676 			if (!ret) {
677 				desc->lba = (mtd->size >> 9) -
678 					(mtd->erasesize >> 9) * i;
679 				break;
680 			}
681 		}
682 	} else {
683 		desc->lba = mtd->size >> 9;
684 	}
685 
686 	debug("MTD: desc->lba is %lx\n", desc->lba);
687 
688 	return 0;
689 }
690 
691 static const struct blk_ops mtd_blk_ops = {
692 	.read	= mtd_dread,
693 #if CONFIG_IS_ENABLED(MTD_WRITE)
694 	.write	= mtd_dwrite,
695 	.erase	= mtd_derase,
696 #endif
697 };
698 
699 U_BOOT_DRIVER(mtd_blk) = {
700 	.name		= "mtd_blk",
701 	.id		= UCLASS_BLK,
702 	.ops		= &mtd_blk_ops,
703 	.probe		= mtd_blk_probe,
704 };
705