xref: /rk3399_rockchip-uboot/drivers/mtd/mtd_blk.c (revision e15c98ee20b8dd02519a58e16b25e68be6e7b08a)
1 /*
2  * (C) Copyright 2019 Rockchip Electronics Co., Ltd
3  *
4  * SPDX-License-Identifier:	GPL-2.0+
5  */
6 
7 #include <common.h>
8 #include <blk.h>
9 #include <boot_rkimg.h>
10 #include <dm.h>
11 #include <errno.h>
12 #include <image.h>
13 #include <linux/log2.h>
14 #include <malloc.h>
15 #include <nand.h>
16 #include <part.h>
17 #include <spi.h>
18 #include <dm/device-internal.h>
19 #include <linux/mtd/spinand.h>
20 #include <linux/mtd/spi-nor.h>
21 #ifdef CONFIG_NAND
22 #include <linux/mtd/nand.h>
23 #endif
24 
25 // #define MTD_BLK_VERBOSE
26 
27 #define MTD_PART_NAND_HEAD		"mtdparts="
28 #define MTD_PART_INFO_MAX_SIZE		512
29 #define MTD_SINGLE_PART_INFO_MAX_SIZE	40
30 
31 #define MTD_BLK_TABLE_BLOCK_UNKNOWN	(-2)
32 #define MTD_BLK_TABLE_BLOCK_SHIFT	(-1)
33 
34 #define FACTORY_UNKNOWN_LBA (0xffffffff - 34)
35 
36 static int *mtd_map_blk_table;
37 
38 #if CONFIG_IS_ENABLED(SUPPORT_USBPLUG)
39 static loff_t usbplug_dummy_partition_write_last_addr;
40 static loff_t usbplug_dummy_partition_write_seek;
41 static loff_t usbplug_dummy_partition_read_last_addr;
42 static loff_t usbplug_dummy_partition_read_seek;
43 #endif
44 
45 int mtd_blk_map_table_init(struct blk_desc *desc,
46 			   loff_t offset,
47 			   size_t length)
48 {
49 	u32 blk_total, blk_begin, blk_cnt;
50 	struct mtd_info *mtd = NULL;
51 	int i, j;
52 
53 	if (!desc)
54 		return -ENODEV;
55 
56 	switch (desc->devnum) {
57 	case BLK_MTD_NAND:
58 	case BLK_MTD_SPI_NAND:
59 		mtd = desc->bdev->priv;
60 		break;
61 	default:
62 		break;
63 	}
64 
65 	if (!mtd) {
66 		return -ENODEV;
67 	} else {
68 		blk_total = (mtd->size + mtd->erasesize - 1) >> mtd->erasesize_shift;
69 		if (!mtd_map_blk_table) {
70 			mtd_map_blk_table = (int *)malloc(blk_total * sizeof(int));
71 			if (!mtd_map_blk_table)
72 				return -ENOMEM;
73 			for (i = 0; i < blk_total; i++)
74 				mtd_map_blk_table[i] = MTD_BLK_TABLE_BLOCK_UNKNOWN;
75 		}
76 
77 		blk_begin = (u32)offset >> mtd->erasesize_shift;
78 		blk_cnt = ((u32)((offset & mtd->erasesize_mask) + length + \
79 			mtd->erasesize - 1) >> mtd->erasesize_shift);
80 		if (blk_begin >= blk_total) {
81 			pr_err("map table blk begin[%d] overflow\n", blk_begin);
82 			return -EINVAL;
83 		}
84 		if ((blk_begin + blk_cnt) > blk_total)
85 			blk_cnt = blk_total - blk_begin;
86 
87 		if (mtd_map_blk_table[blk_begin] != MTD_BLK_TABLE_BLOCK_UNKNOWN)
88 			return 0;
89 
90 		j = 0;
91 		 /* should not across blk_cnt */
92 		for (i = 0; i < blk_cnt; i++) {
93 			if (j >= blk_cnt)
94 				mtd_map_blk_table[blk_begin + i] = MTD_BLK_TABLE_BLOCK_SHIFT;
95 			for (; j < blk_cnt; j++) {
96 				if (!mtd_block_isbad(mtd, (blk_begin + j) << mtd->erasesize_shift)) {
97 					mtd_map_blk_table[blk_begin + i] = blk_begin + j;
98 					j++;
99 					if (j == blk_cnt)
100 						j++;
101 					break;
102 				}
103 			}
104 		}
105 
106 		return 0;
107 	}
108 }
109 
110 static bool get_mtd_blk_map_address(struct mtd_info *mtd, loff_t *off)
111 {
112 	bool mapped;
113 	loff_t offset = *off;
114 	size_t block_offset = offset & (mtd->erasesize - 1);
115 
116 	mapped = false;
117 	if (!mtd_map_blk_table ||
118 	    mtd_map_blk_table[(u64)offset >> mtd->erasesize_shift] ==
119 	    MTD_BLK_TABLE_BLOCK_UNKNOWN ||
120 	    mtd_map_blk_table[(u64)offset >> mtd->erasesize_shift] ==
121 	    0xffffffff)
122 		return mapped;
123 
124 	mapped = true;
125 	*off = (loff_t)(((u32)mtd_map_blk_table[(u64)offset >>
126 		mtd->erasesize_shift] << mtd->erasesize_shift) + block_offset);
127 
128 	return mapped;
129 }
130 
131 void mtd_blk_map_partitions(struct blk_desc *desc)
132 {
133 	disk_partition_t info;
134 	int i, ret;
135 
136 	if (!desc)
137 		return;
138 
139 	if (desc->if_type != IF_TYPE_MTD)
140 		return;
141 
142 	for (i = 1; i < MAX_SEARCH_PARTITIONS; i++) {
143 		ret = part_get_info(desc, i, &info);
144 		if (ret != 0)
145 			break;
146 
147 		if (mtd_blk_map_table_init(desc,
148 					   info.start << 9,
149 					   info.size << 9)) {
150 			pr_debug("mtd block map table fail\n");
151 		}
152 	}
153 }
154 
155 void mtd_blk_map_fit(struct blk_desc *desc, ulong sector, void *fit)
156 {
157 	struct mtd_info *mtd = NULL;
158 	int totalsize = 0;
159 
160 	if (desc->if_type != IF_TYPE_MTD)
161 		return;
162 
163 	if (desc->devnum == BLK_MTD_NAND) {
164 #if defined(CONFIG_NAND)
165 		mtd = dev_get_priv(desc->bdev->parent);
166 #endif
167 	} else if (desc->devnum == BLK_MTD_SPI_NAND) {
168 #if defined(CONFIG_MTD_SPI_NAND)
169 		mtd = desc->bdev->priv;
170 #endif
171 	}
172 
173 #ifdef CONFIG_SPL_FIT
174 	if (fit_get_totalsize(fit, &totalsize))
175 		debug("Can not find /totalsize node.\n");
176 #endif
177 	if (mtd && totalsize) {
178 		if (mtd_blk_map_table_init(desc, sector << 9, totalsize + (size_t)mtd->erasesize))
179 			debug("Map block table fail.\n");
180 	}
181 }
182 
183 static __maybe_unused int mtd_map_read(struct mtd_info *mtd, loff_t offset,
184 				       size_t *length, size_t *actual,
185 				       loff_t lim, u_char *buffer)
186 {
187 	size_t left_to_read = *length;
188 	u_char *p_buffer = buffer;
189 	int rval;
190 
191 #if CONFIG_IS_ENABLED(SUPPORT_USBPLUG)
192 	if (usbplug_dummy_partition_read_last_addr != offset)
193 		usbplug_dummy_partition_read_seek = 0;
194 	usbplug_dummy_partition_read_last_addr = offset + left_to_read;
195 	offset += usbplug_dummy_partition_read_seek;
196 #endif
197 
198 	while (left_to_read > 0) {
199 		size_t block_offset = offset & (mtd->erasesize - 1);
200 		size_t read_length;
201 		loff_t mapped_offset;
202 
203 		if (offset >= mtd->size)
204 			return 0;
205 
206 		mapped_offset = offset;
207 		if (!get_mtd_blk_map_address(mtd, &mapped_offset)) {
208 			if (mtd_block_isbad(mtd, mapped_offset &
209 					    ~(mtd->erasesize - 1))) {
210 				printf("Skipping bad block 0x%08x in read\n",
211 				       (u32)(offset & ~(mtd->erasesize - 1)));
212 				offset += mtd->erasesize - block_offset;
213 #if CONFIG_IS_ENABLED(SUPPORT_USBPLUG)
214 				usbplug_dummy_partition_read_seek += mtd->erasesize;
215 #endif
216 				continue;
217 			}
218 		}
219 
220 		if (left_to_read < (mtd->erasesize - block_offset))
221 			read_length = left_to_read;
222 		else
223 			read_length = mtd->erasesize - block_offset;
224 
225 		rval = mtd_read(mtd, mapped_offset, read_length, &read_length,
226 				p_buffer);
227 		if (rval && rval != -EUCLEAN) {
228 			printf("NAND read from offset %x failed %d\n",
229 			       (u32)offset, rval);
230 			*length -= left_to_read;
231 			return rval;
232 		}
233 
234 		left_to_read -= read_length;
235 		offset       += read_length;
236 		p_buffer     += read_length;
237 	}
238 
239 	return 0;
240 }
241 
242 static __maybe_unused int mtd_map_write(struct mtd_info *mtd, loff_t offset,
243 					size_t *length, size_t *actual,
244 					loff_t lim, u_char *buffer, int flags)
245 {
246 	int rval = 0, blocksize;
247 	size_t left_to_write = *length;
248 	u_char *p_buffer = buffer;
249 	struct erase_info ei;
250 
251 	blocksize = mtd->erasesize;
252 
253 #if CONFIG_IS_ENABLED(SUPPORT_USBPLUG)
254 	if (usbplug_dummy_partition_write_last_addr != offset)
255 		usbplug_dummy_partition_write_seek = 0;
256 	usbplug_dummy_partition_write_last_addr = offset + left_to_write;
257 	offset += usbplug_dummy_partition_write_seek;
258 #endif
259 
260 	/*
261 	 * nand_write() handles unaligned, partial page writes.
262 	 *
263 	 * We allow length to be unaligned, for convenience in
264 	 * using the $filesize variable.
265 	 *
266 	 * However, starting at an unaligned offset makes the
267 	 * semantics of bad block skipping ambiguous (really,
268 	 * you should only start a block skipping access at a
269 	 * partition boundary).  So don't try to handle that.
270 	 */
271 	if ((offset & (mtd->writesize - 1)) != 0) {
272 		printf("Attempt to write non page-aligned data\n");
273 		*length = 0;
274 		return -EINVAL;
275 	}
276 
277 	while (left_to_write > 0) {
278 		size_t block_offset = offset & (mtd->erasesize - 1);
279 		size_t write_size, truncated_write_size;
280 		loff_t mapped_offset;
281 
282 		if (offset >= mtd->size)
283 			return 0;
284 
285 		mapped_offset = offset;
286 		if (!get_mtd_blk_map_address(mtd, &mapped_offset)) {
287 			if (mtd_block_isbad(mtd, mapped_offset &
288 					    ~(mtd->erasesize - 1))) {
289 				printf("Skipping bad block 0x%08x in write\n",
290 				       (u32)(offset & ~(mtd->erasesize - 1)));
291 				offset += mtd->erasesize - block_offset;
292 #if CONFIG_IS_ENABLED(SUPPORT_USBPLUG)
293 				usbplug_dummy_partition_write_seek += mtd->erasesize;
294 #endif
295 				continue;
296 			}
297 		}
298 
299 		if (!(mapped_offset & mtd->erasesize_mask)) {
300 			memset(&ei, 0, sizeof(struct erase_info));
301 			ei.addr = mapped_offset;
302 			ei.len  = mtd->erasesize;
303 			rval = mtd_erase(mtd, &ei);
304 			if (rval) {
305 				pr_info("error %d while erasing %llx\n", rval,
306 					mapped_offset);
307 				return rval;
308 			}
309 		}
310 
311 		if (left_to_write < (blocksize - block_offset))
312 			write_size = left_to_write;
313 		else
314 			write_size = blocksize - block_offset;
315 
316 		truncated_write_size = write_size;
317 		rval = mtd_write(mtd, mapped_offset, truncated_write_size,
318 				 (size_t *)(&truncated_write_size), p_buffer);
319 
320 		offset += write_size;
321 		p_buffer += write_size;
322 
323 		if (rval != 0) {
324 			printf("NAND write to offset %llx failed %d\n",
325 			       offset, rval);
326 			*length -= left_to_write;
327 			return rval;
328 		}
329 
330 		left_to_write -= write_size;
331 	}
332 
333 	return 0;
334 }
335 
336 static __maybe_unused int mtd_map_erase(struct mtd_info *mtd, loff_t offset,
337 					size_t length)
338 {
339 	struct erase_info ei;
340 	loff_t pos, len;
341 	int ret;
342 
343 	pos = offset;
344 	len = length;
345 
346 	if ((pos & mtd->erasesize_mask) || (len & mtd->erasesize_mask)) {
347 		pr_err("Attempt to erase non block-aligned data, pos= %llx, len= %llx\n",
348 		       pos, len);
349 
350 		return -EINVAL;
351 	}
352 
353 	while (len) {
354 		loff_t mapped_offset;
355 
356 		mapped_offset = pos;
357 		if (!get_mtd_blk_map_address(mtd, &mapped_offset)) {
358 			if (mtd_block_isbad(mtd, pos) || mtd_block_isreserved(mtd, pos)) {
359 				pr_debug("attempt to erase a bad/reserved block @%llx\n",
360 					 pos);
361 				pos += mtd->erasesize;
362 				continue;
363 			}
364 		}
365 
366 		memset(&ei, 0, sizeof(struct erase_info));
367 		ei.addr = mapped_offset;
368 		ei.len  = mtd->erasesize;
369 		ret = mtd_erase(mtd, &ei);
370 		if (ret) {
371 			pr_err("map_erase error %d while erasing %llx\n", ret,
372 			       pos);
373 			return ret;
374 		}
375 
376 		pos += mtd->erasesize;
377 		len -= mtd->erasesize;
378 	}
379 
380 	return 0;
381 }
382 
383 char *mtd_part_parse(struct blk_desc *dev_desc)
384 {
385 	char mtd_part_info_temp[MTD_SINGLE_PART_INFO_MAX_SIZE] = {0};
386 	u32 length, data_len = MTD_PART_INFO_MAX_SIZE;
387 	disk_partition_t info;
388 	char *mtd_part_info_p;
389 	struct mtd_info *mtd;
390 	char *mtd_part_info;
391 	int ret;
392 	int p;
393 
394 #ifndef CONFIG_SPL_BUILD
395 	dev_desc = rockchip_get_bootdev();
396 #endif
397 	if (!dev_desc)
398 		return NULL;
399 
400 	mtd = (struct mtd_info *)dev_desc->bdev->priv;
401 	if (!mtd)
402 		return NULL;
403 
404 	mtd_part_info = (char *)calloc(MTD_PART_INFO_MAX_SIZE, sizeof(char));
405 	if (!mtd_part_info) {
406 		printf("%s: Fail to malloc!", __func__);
407 		return NULL;
408 	}
409 
410 	mtd_part_info_p = mtd_part_info;
411 	snprintf(mtd_part_info_p, data_len - 1, "%s%s:",
412 		 MTD_PART_NAND_HEAD,
413 		 dev_desc->product);
414 	data_len -= strlen(mtd_part_info_p);
415 	mtd_part_info_p = mtd_part_info_p + strlen(mtd_part_info_p);
416 
417 	for (p = 1; p < MAX_SEARCH_PARTITIONS; p++) {
418 		ret = part_get_info(dev_desc, p, &info);
419 		if (ret)
420 			break;
421 
422 		debug("name is %s, start addr is %x\n", info.name,
423 		      (int)(size_t)info.start);
424 
425 		snprintf(mtd_part_info_p, data_len - 1, "0x%x@0x%x(%s)",
426 			 (int)(size_t)info.size << 9,
427 			 (int)(size_t)info.start << 9,
428 			 info.name);
429 		snprintf(mtd_part_info_temp, MTD_SINGLE_PART_INFO_MAX_SIZE - 1,
430 			 "0x%x@0x%x(%s)",
431 			 (int)(size_t)info.size << 9,
432 			 (int)(size_t)info.start << 9,
433 			 info.name);
434 		strcat(mtd_part_info, ",");
435 		if (part_get_info(dev_desc, p + 1, &info)) {
436 			/* Partition with grow tag in parameter will be resized */
437 			if ((info.size + info.start + 64) >= dev_desc->lba ||
438 			    (info.size + info.start - 1) == FACTORY_UNKNOWN_LBA) {
439 				if (dev_desc->devnum == BLK_MTD_SPI_NOR) {
440 					/* Nor is 64KB erase block(kernel) and gpt table just
441 					 * resserve 33 sectors for the last partition. This
442 					 * will erase the backup gpt table by user program,
443 					 * so reserve one block.
444 					 */
445 					snprintf(mtd_part_info_p, data_len - 1, "0x%x@0x%x(%s)",
446 						 (int)(size_t)(info.size -
447 						 (info.size - 1) %
448 						 (0x10000 >> 9) - 1) << 9,
449 						 (int)(size_t)info.start << 9,
450 						 info.name);
451 					break;
452 				} else {
453 					/* Nand flash is erased by block and gpt table just
454 					 * resserve 33 sectors for the last partition. This
455 					 * will erase the backup gpt table by user program,
456 					 * so reserve one block.
457 					 */
458 					snprintf(mtd_part_info_p, data_len - 1, "0x%x@0x%x(%s)",
459 						 (int)(size_t)(info.size -
460 						 (info.size - 1) %
461 						 (mtd->erasesize >> 9) - 1) << 9,
462 						 (int)(size_t)info.start << 9,
463 						 info.name);
464 					break;
465 				}
466 			} else {
467 				snprintf(mtd_part_info_temp, MTD_SINGLE_PART_INFO_MAX_SIZE - 1,
468 					 "0x%x@0x%x(%s)",
469 					 (int)(size_t)info.size << 9,
470 					 (int)(size_t)info.start << 9,
471 					 info.name);
472 				break;
473 			}
474 		}
475 		length = strlen(mtd_part_info_temp);
476 		data_len -= length;
477 		mtd_part_info_p = mtd_part_info_p + length + 1;
478 		memset(mtd_part_info_temp, 0, MTD_SINGLE_PART_INFO_MAX_SIZE);
479 	}
480 
481 	return mtd_part_info;
482 }
483 
484 ulong mtd_dread(struct udevice *udev, lbaint_t start,
485 		lbaint_t blkcnt, void *dst)
486 {
487 	struct blk_desc *desc = dev_get_uclass_platdata(udev);
488 #if defined(CONFIG_NAND) || defined(CONFIG_MTD_SPI_NAND) || defined(CONFIG_SPI_FLASH_MTD)
489 	loff_t off = (loff_t)(start * 512);
490 	size_t rwsize = blkcnt * 512;
491 #endif
492 	struct mtd_info *mtd;
493 	int ret = 0;
494 #ifdef MTD_BLK_VERBOSE
495 	ulong us = 1;
496 #endif
497 
498 	if (!desc)
499 		return ret;
500 
501 	mtd = desc->bdev->priv;
502 	if (!mtd)
503 		return 0;
504 
505 	if (blkcnt == 0)
506 		return 0;
507 
508 #ifdef MTD_BLK_VERBOSE
509 	us = get_ticks();
510 #endif
511 	if (desc->devnum == BLK_MTD_NAND) {
512 		ret = mtd_map_read(mtd, off, &rwsize,
513 				   NULL, mtd->size,
514 				   (u_char *)(dst));
515 		if (!ret)
516 			ret = blkcnt;
517 	} else if (desc->devnum == BLK_MTD_SPI_NAND) {
518 #if defined(CONFIG_MTD_SPI_NAND)
519 		struct spinand_device *spinand = mtd_to_spinand(mtd);
520 		struct spi_slave *spi = spinand->slave;
521 		size_t retlen_nand;
522 
523 		if (desc->op_flag == BLK_PRE_RW) {
524 			spi->mode |= SPI_DMA_PREPARE;
525 			ret = mtd_read(mtd, off, rwsize,
526 				       &retlen_nand, (u_char *)(dst));
527 			spi->mode &= ~SPI_DMA_PREPARE;
528 			if (retlen_nand == rwsize)
529 				ret = blkcnt;
530 		} else {
531 			if (spinand->support_cont_read)
532 				ret = mtd_read(mtd, off, rwsize,
533 					       &retlen_nand,
534 					       (u_char *)(dst));
535 			else
536 				ret = mtd_map_read(mtd, off, &rwsize,
537 						   NULL, mtd->size,
538 						   (u_char *)(dst));
539 			if (!ret)
540 				ret = blkcnt;
541 		}
542 #endif
543 	} else if (desc->devnum == BLK_MTD_SPI_NOR) {
544 #if defined(CONFIG_SPI_FLASH_MTD) || defined(CONFIG_SPL_BUILD)
545 		struct spi_nor *nor = (struct spi_nor *)mtd->priv;
546 		struct spi_slave *spi = nor->spi;
547 		size_t retlen_nor;
548 
549 		if (desc->op_flag == BLK_PRE_RW)
550 			spi->mode |= SPI_DMA_PREPARE;
551 		ret = mtd_read(mtd, off, rwsize, &retlen_nor, dst);
552 		if (desc->op_flag == BLK_PRE_RW)
553 			spi->mode &= ~SPI_DMA_PREPARE;
554 
555 		if (retlen_nor == rwsize)
556 			ret = blkcnt;
557 #endif
558 	}
559 #ifdef MTD_BLK_VERBOSE
560 	us = (get_ticks() - us) / (gd->arch.timer_rate_hz / 1000000);
561 	pr_err("mtd dread %s %lx %lx cost %ldus: %ldMB/s\n\n", mtd->name, start, blkcnt, us, (blkcnt / 2) / ((us + 999) / 1000));
562 #else
563 	pr_debug("mtd dread %s %lx %lx\n\n", mtd->name, start, blkcnt);
564 #endif
565 
566 	return ret;
567 }
568 
569 #if CONFIG_IS_ENABLED(MTD_WRITE)
570 ulong mtd_dwrite(struct udevice *udev, lbaint_t start,
571 		 lbaint_t blkcnt, const void *src)
572 {
573 	struct blk_desc *desc = dev_get_uclass_platdata(udev);
574 #if defined(CONFIG_NAND) || defined(CONFIG_MTD_SPI_NAND) || defined(CONFIG_SPI_FLASH_MTD)
575 	loff_t off = (loff_t)(start * 512);
576 	size_t rwsize = blkcnt * 512;
577 #endif
578 	struct mtd_info *mtd;
579 	int ret = 0;
580 
581 	if (!desc)
582 		return ret;
583 
584 	mtd = desc->bdev->priv;
585 	if (!mtd)
586 		return 0;
587 
588 	pr_debug("mtd dwrite %s %lx %lx\n", mtd->name, start, blkcnt);
589 
590 	if (blkcnt == 0)
591 		return 0;
592 
593 	if (desc->op_flag & BLK_MTD_CONT_WRITE &&
594 	    (start == 1 || ((desc->lba - start) <= 33))) {
595 		printf("Write in GPT area, lba=%ld cnt=%ld\n", start, blkcnt);
596 		desc->op_flag &= ~BLK_MTD_CONT_WRITE;
597 	}
598 
599 	if (desc->devnum == BLK_MTD_NAND ||
600 	    desc->devnum == BLK_MTD_SPI_NAND ||
601 	    desc->devnum == BLK_MTD_SPI_NOR) {
602 		if (desc->op_flag & BLK_MTD_CONT_WRITE) {
603 			ret = mtd_map_write(mtd, off, &rwsize,
604 					    NULL, mtd->size,
605 					    (u_char *)(src), 0);
606 			if (!ret)
607 				return blkcnt;
608 			else
609 				return 0;
610 		} else {
611 			lbaint_t off_aligned, alinged;
612 			size_t rwsize_aligned;
613 			u8 *p_buf;
614 
615 			alinged = off & mtd->erasesize_mask;
616 			off_aligned = off - alinged;
617 			rwsize_aligned = rwsize + alinged;
618 			rwsize_aligned = (rwsize_aligned + mtd->erasesize - 1) &
619 				~(mtd->erasesize - 1);
620 
621 			p_buf = malloc(rwsize_aligned);
622 			if (!p_buf) {
623 				printf("%s: Fail to malloc!", __func__);
624 				return 0;
625 			}
626 
627 			ret = mtd_map_read(mtd, off_aligned, &rwsize_aligned,
628 					   NULL, mtd->size,
629 					   (u_char *)(p_buf));
630 			if (ret) {
631 				free(p_buf);
632 				return 0;
633 			}
634 
635 			memcpy(p_buf + alinged, src, rwsize);
636 
637 			ret = mtd_map_write(mtd, off_aligned, &rwsize_aligned,
638 					    NULL, mtd->size,
639 					    (u_char *)(p_buf), 0);
640 			free(p_buf);
641 			if (!ret)
642 				return blkcnt;
643 			else
644 				return 0;
645 		}
646 	} else {
647 		return 0;
648 	}
649 
650 	return 0;
651 }
652 
653 ulong mtd_derase(struct udevice *udev, lbaint_t start,
654 		 lbaint_t blkcnt)
655 {
656 	struct blk_desc *desc = dev_get_uclass_platdata(udev);
657 #if defined(CONFIG_NAND) || defined(CONFIG_MTD_SPI_NAND) || defined(CONFIG_SPI_FLASH_MTD)
658 	loff_t off = (loff_t)(start * 512);
659 	size_t len = blkcnt * 512;
660 #endif
661 	struct mtd_info *mtd;
662 	int ret = 0;
663 
664 	if (!desc)
665 		return ret;
666 
667 	mtd = desc->bdev->priv;
668 	if (!mtd)
669 		return 0;
670 
671 	pr_debug("mtd derase %s %lx %lx\n", mtd->name, start, blkcnt);
672 	len = round_up(len, mtd->erasesize);
673 
674 	if (blkcnt == 0)
675 		return 0;
676 
677 	if (desc->devnum == BLK_MTD_NAND ||
678 	    desc->devnum == BLK_MTD_SPI_NAND ||
679 	    desc->devnum == BLK_MTD_SPI_NOR) {
680 		ret = mtd_map_erase(mtd, off, len);
681 		if (ret)
682 			return ret;
683 	} else {
684 		return 0;
685 	}
686 
687 	return blkcnt;
688 }
689 #endif
690 
691 static int mtd_blk_probe(struct udevice *udev)
692 {
693 	struct mtd_info *mtd;
694 	struct blk_desc *desc = dev_get_uclass_platdata(udev);
695 	int ret, i = 0;
696 
697 	mtd = dev_get_uclass_priv(udev->parent);
698 	if (mtd->type == MTD_NANDFLASH && desc->devnum == BLK_MTD_NAND) {
699 #ifndef CONFIG_SPL_BUILD
700 		mtd = dev_get_priv(udev->parent);
701 #endif
702 	}
703 
704 	/* Fill mtd devices information */
705 	if (is_power_of_2(mtd->erasesize))
706 		mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
707 	else
708 		mtd->erasesize_shift = 0;
709 
710 	if (is_power_of_2(mtd->writesize))
711 		mtd->writesize_shift = ffs(mtd->writesize) - 1;
712 	else
713 		mtd->writesize_shift = 0;
714 
715 	mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
716 	mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
717 
718 	desc->bdev->priv = mtd;
719 	sprintf(desc->vendor, "0x%.4x", 0x2207);
720 	if (strncmp(mtd->name, "nand", 4) == 0)
721 		memcpy(desc->product, "rk-nand", strlen("rk-nand"));
722 	else
723 		memcpy(desc->product, mtd->name, strlen(mtd->name));
724 	memcpy(desc->revision, "V1.00", sizeof("V1.00"));
725 	if (mtd->type == MTD_NANDFLASH) {
726 #ifdef CONFIG_NAND
727 		if (desc->devnum == BLK_MTD_NAND)
728 			i = NAND_BBT_SCAN_MAXBLOCKS;
729 #endif
730 #ifdef CONFIG_MTD_SPI_NAND
731 		if (desc->devnum == BLK_MTD_SPI_NAND)
732 			i = NANDDEV_BBT_SCAN_MAXBLOCKS;
733 #endif
734 
735 		/*
736 		 * Find the first useful block in the end,
737 		 * and it is the end lba of the nand storage.
738 		 */
739 		for (; i < (mtd->size / mtd->erasesize); i++) {
740 			ret =  mtd_block_isbad(mtd,
741 					       mtd->size - mtd->erasesize * (i + 1));
742 			if (!ret) {
743 				desc->lba = (mtd->size >> 9) -
744 					(mtd->erasesize >> 9) * i;
745 				desc->rawlba = desc->lba;
746 				break;
747 			}
748 		}
749 	} else {
750 		desc->lba = mtd->size >> 9;
751 	}
752 
753 	debug("MTD: desc->lba is %lx\n", desc->lba);
754 
755 	return 0;
756 }
757 
758 static const struct blk_ops mtd_blk_ops = {
759 	.read	= mtd_dread,
760 #if CONFIG_IS_ENABLED(MTD_WRITE)
761 	.write	= mtd_dwrite,
762 	.erase	= mtd_derase,
763 #endif
764 };
765 
766 U_BOOT_DRIVER(mtd_blk) = {
767 	.name		= "mtd_blk",
768 	.id		= UCLASS_BLK,
769 	.ops		= &mtd_blk_ops,
770 	.probe		= mtd_blk_probe,
771 };
772