xref: /rk3399_rockchip-uboot/drivers/mtd/mtd_blk.c (revision ce9d2743bad9006120c201dbdf6bea58757a3d39)
1 /*
2  * (C) Copyright 2019 Rockchip Electronics Co., Ltd
3  *
4  * SPDX-License-Identifier:	GPL-2.0+
5  */
6 
7 #include <common.h>
8 #include <blk.h>
9 #include <boot_rkimg.h>
10 #include <dm.h>
11 #include <errno.h>
12 #include <image.h>
13 #include <malloc.h>
14 #include <nand.h>
15 #include <part.h>
16 #include <spi.h>
17 #include <dm/device-internal.h>
18 #include <linux/mtd/spi-nor.h>
19 #ifdef CONFIG_NAND
20 #include <linux/mtd/nand.h>
21 #endif
22 
23 #define MTD_PART_NAND_HEAD		"mtdparts="
24 #define MTD_ROOT_PART_NUM		"ubi.mtd="
25 #define MTD_ROOT_PART_NAME		"root=ubi0:rootfs"
26 #define MTD_PART_INFO_MAX_SIZE		512
27 #define MTD_SINGLE_PART_INFO_MAX_SIZE	40
28 
29 #define MTD_BLK_TABLE_BLOCK_UNKNOWN	(-2)
30 #define MTD_BLK_TABLE_BLOCK_SHIFT	(-1)
31 
32 static int *mtd_map_blk_table;
33 
34 int mtd_blk_map_table_init(struct blk_desc *desc,
35 			   loff_t offset,
36 			   size_t length)
37 {
38 	u32 blk_total, blk_begin, blk_cnt;
39 	struct mtd_info *mtd = NULL;
40 	int i, j;
41 
42 	if (!desc)
43 		return -ENODEV;
44 
45 	switch (desc->devnum) {
46 	case BLK_MTD_NAND:
47 	case BLK_MTD_SPI_NAND:
48 		mtd = desc->bdev->priv;
49 		break;
50 	default:
51 		break;
52 	}
53 
54 	if (!mtd) {
55 		return -ENODEV;
56 	} else {
57 		blk_total = (mtd->size + mtd->erasesize - 1) >> mtd->erasesize_shift;
58 		if (!mtd_map_blk_table) {
59 			mtd_map_blk_table = (int *)malloc(blk_total * sizeof(int));
60 			if (!mtd_map_blk_table)
61 				return -ENOMEM;
62 			for (i = 0; i < blk_total; i++)
63 				mtd_map_blk_table[i] = MTD_BLK_TABLE_BLOCK_UNKNOWN;
64 		}
65 
66 		blk_begin = (u32)offset >> mtd->erasesize_shift;
67 		blk_cnt = ((u32)((offset & mtd->erasesize_mask) + length + \
68 			mtd->erasesize - 1) >> mtd->erasesize_shift);
69 		if (blk_begin >= blk_total) {
70 			pr_err("map table blk begin[%d] overflow\n", blk_begin);
71 			return -EINVAL;
72 		}
73 		if ((blk_begin + blk_cnt) > blk_total)
74 			blk_cnt = blk_total - blk_begin;
75 
76 		if (mtd_map_blk_table[blk_begin] != MTD_BLK_TABLE_BLOCK_UNKNOWN)
77 			return 0;
78 
79 		j = 0;
80 		 /* should not across blk_cnt */
81 		for (i = 0; i < blk_cnt; i++) {
82 			if (j >= blk_cnt)
83 				mtd_map_blk_table[blk_begin + i] = MTD_BLK_TABLE_BLOCK_SHIFT;
84 			for (; j < blk_cnt; j++) {
85 				if (!mtd_block_isbad(mtd, (blk_begin + j) << mtd->erasesize_shift)) {
86 					mtd_map_blk_table[blk_begin + i] = blk_begin + j;
87 					j++;
88 					if (j == blk_cnt)
89 						j++;
90 					break;
91 				}
92 			}
93 		}
94 
95 		return 0;
96 	}
97 }
98 
99 static bool get_mtd_blk_map_address(struct mtd_info *mtd, loff_t *off)
100 {
101 	bool mapped;
102 	loff_t offset = *off;
103 	size_t block_offset = offset & (mtd->erasesize - 1);
104 
105 	mapped = false;
106 	if (!mtd_map_blk_table ||
107 	    mtd_map_blk_table[(u64)offset >> mtd->erasesize_shift] ==
108 	    MTD_BLK_TABLE_BLOCK_UNKNOWN ||
109 	    mtd_map_blk_table[(u64)offset >> mtd->erasesize_shift] ==
110 	    0xffffffff)
111 		return mapped;
112 
113 	mapped = true;
114 	*off = (loff_t)(((u32)mtd_map_blk_table[(u64)offset >>
115 		mtd->erasesize_shift] << mtd->erasesize_shift) + block_offset);
116 
117 	return mapped;
118 }
119 
120 void mtd_blk_map_partitions(struct blk_desc *desc)
121 {
122 	disk_partition_t info;
123 	int i, ret;
124 
125 	if (!desc)
126 		return;
127 
128 	if (desc->if_type != IF_TYPE_MTD)
129 		return;
130 
131 	for (i = 1; i < MAX_SEARCH_PARTITIONS; i++) {
132 		ret = part_get_info(desc, i, &info);
133 		if (ret != 0)
134 			continue;
135 
136 		if (mtd_blk_map_table_init(desc,
137 					   info.start << 9,
138 					   info.size << 9)) {
139 			pr_debug("mtd block map table fail\n");
140 		}
141 	}
142 }
143 
144 void mtd_blk_map_fit(struct blk_desc *desc, ulong sector, void *fit)
145 {
146 	struct mtd_info *mtd = NULL;
147 	int totalsize = 0;
148 
149 	if (desc->if_type != IF_TYPE_MTD)
150 		return;
151 
152 	if (desc->devnum == BLK_MTD_NAND) {
153 #if defined(CONFIG_NAND)
154 		mtd = dev_get_priv(desc->bdev->parent);
155 #endif
156 	} else if (desc->devnum == BLK_MTD_SPI_NAND) {
157 #if defined(CONFIG_MTD_SPI_NAND)
158 		mtd = desc->bdev->priv;
159 #endif
160 	}
161 
162 #ifdef CONFIG_SPL_FIT
163 	if (fit_get_totalsize(fit, &totalsize))
164 		debug("Can not find /totalsize node.\n");
165 #endif
166 	if (mtd && totalsize) {
167 		if (mtd_blk_map_table_init(desc, sector << 9, totalsize + (size_t)mtd->erasesize))
168 			debug("Map block table fail.\n");
169 	}
170 }
171 
172 static __maybe_unused int mtd_map_read(struct mtd_info *mtd, loff_t offset,
173 				       size_t *length, size_t *actual,
174 				       loff_t lim, u_char *buffer)
175 {
176 	size_t left_to_read = *length;
177 	u_char *p_buffer = buffer;
178 	int rval;
179 
180 	while (left_to_read > 0) {
181 		size_t block_offset = offset & (mtd->erasesize - 1);
182 		size_t read_length;
183 		loff_t mapped_offset;
184 
185 		if (offset >= mtd->size)
186 			return 0;
187 
188 		mapped_offset = offset;
189 		if (!get_mtd_blk_map_address(mtd, &mapped_offset)) {
190 			if (mtd_block_isbad(mtd, mapped_offset &
191 					    ~(mtd->erasesize - 1))) {
192 				printf("Skipping bad block 0x%08llx\n",
193 				       offset & ~(mtd->erasesize - 1));
194 				offset += mtd->erasesize - block_offset;
195 				continue;
196 			}
197 		}
198 
199 		if (left_to_read < (mtd->erasesize - block_offset))
200 			read_length = left_to_read;
201 		else
202 			read_length = mtd->erasesize - block_offset;
203 
204 		rval = mtd_read(mtd, mapped_offset, read_length, &read_length,
205 				p_buffer);
206 		if (rval && rval != -EUCLEAN) {
207 			printf("NAND read from offset %llx failed %d\n",
208 			       offset, rval);
209 			*length -= left_to_read;
210 			return rval;
211 		}
212 
213 		left_to_read -= read_length;
214 		offset       += read_length;
215 		p_buffer     += read_length;
216 	}
217 
218 	return 0;
219 }
220 
221 static __maybe_unused int mtd_map_write(struct mtd_info *mtd, loff_t offset,
222 					size_t *length, size_t *actual,
223 					loff_t lim, u_char *buffer, int flags)
224 {
225 	int rval = 0, blocksize;
226 	size_t left_to_write = *length;
227 	u_char *p_buffer = buffer;
228 	struct erase_info ei;
229 
230 	blocksize = mtd->erasesize;
231 
232 	/*
233 	 * nand_write() handles unaligned, partial page writes.
234 	 *
235 	 * We allow length to be unaligned, for convenience in
236 	 * using the $filesize variable.
237 	 *
238 	 * However, starting at an unaligned offset makes the
239 	 * semantics of bad block skipping ambiguous (really,
240 	 * you should only start a block skipping access at a
241 	 * partition boundary).  So don't try to handle that.
242 	 */
243 	if ((offset & (mtd->writesize - 1)) != 0) {
244 		printf("Attempt to write non page-aligned data\n");
245 		*length = 0;
246 		return -EINVAL;
247 	}
248 
249 	while (left_to_write > 0) {
250 		size_t block_offset = offset & (mtd->erasesize - 1);
251 		size_t write_size, truncated_write_size;
252 		loff_t mapped_offset;
253 
254 		if (offset >= mtd->size)
255 			return 0;
256 
257 		mapped_offset = offset;
258 		if (!get_mtd_blk_map_address(mtd, &mapped_offset)) {
259 			if (mtd_block_isbad(mtd, mapped_offset &
260 					    ~(mtd->erasesize - 1))) {
261 				printf("Skipping bad block 0x%08llx\n",
262 				       offset & ~(mtd->erasesize - 1));
263 				offset += mtd->erasesize - block_offset;
264 				continue;
265 			}
266 		}
267 
268 		if (!(mapped_offset & mtd->erasesize_mask)) {
269 			memset(&ei, 0, sizeof(struct erase_info));
270 			ei.addr = mapped_offset;
271 			ei.len  = mtd->erasesize;
272 			rval = mtd_erase(mtd, &ei);
273 			if (rval) {
274 				pr_info("error %d while erasing %llx\n", rval,
275 					mapped_offset);
276 				return rval;
277 			}
278 		}
279 
280 		if (left_to_write < (blocksize - block_offset))
281 			write_size = left_to_write;
282 		else
283 			write_size = blocksize - block_offset;
284 
285 		truncated_write_size = write_size;
286 		rval = mtd_write(mtd, mapped_offset, truncated_write_size,
287 				 (size_t *)(&truncated_write_size), p_buffer);
288 
289 		offset += write_size;
290 		p_buffer += write_size;
291 
292 		if (rval != 0) {
293 			printf("NAND write to offset %llx failed %d\n",
294 			       offset, rval);
295 			*length -= left_to_write;
296 			return rval;
297 		}
298 
299 		left_to_write -= write_size;
300 	}
301 
302 	return 0;
303 }
304 
305 static __maybe_unused int mtd_map_erase(struct mtd_info *mtd, loff_t offset,
306 					size_t length)
307 {
308 	struct erase_info ei;
309 	loff_t pos, len;
310 	int ret;
311 
312 	pos = offset;
313 	len = length;
314 
315 	if ((pos & mtd->erasesize_mask) || (len & mtd->erasesize_mask)) {
316 		pr_err("Attempt to erase non block-aligned data, pos= %llx, len= %llx\n",
317 		       pos, len);
318 
319 		return -EINVAL;
320 	}
321 
322 	while (len) {
323 		if (mtd_block_isbad(mtd, pos) || mtd_block_isreserved(mtd, pos)) {
324 			pr_debug("attempt to erase a bad/reserved block @%llx\n",
325 				 pos);
326 			pos += mtd->erasesize;
327 			continue;
328 		}
329 
330 		memset(&ei, 0, sizeof(struct erase_info));
331 		ei.addr = pos;
332 		ei.len  = mtd->erasesize;
333 		ret = mtd_erase(mtd, &ei);
334 		if (ret) {
335 			pr_err("map_erase error %d while erasing %llx\n", ret,
336 			       pos);
337 			return ret;
338 		}
339 
340 		pos += mtd->erasesize;
341 		len -= mtd->erasesize;
342 	}
343 
344 	return 0;
345 }
346 
347 char *mtd_part_parse(void)
348 {
349 	char mtd_part_info_temp[MTD_SINGLE_PART_INFO_MAX_SIZE] = {0};
350 	u32 length, data_len = MTD_PART_INFO_MAX_SIZE;
351 	char mtd_root_part_info[30] = {0};
352 	struct blk_desc *dev_desc;
353 	disk_partition_t info;
354 	char *mtd_part_info_p;
355 	struct mtd_info *mtd;
356 	char *mtd_part_info;
357 	int ret;
358 	int p;
359 
360 	dev_desc = rockchip_get_bootdev();
361 	if (!dev_desc)
362 		return NULL;
363 
364 	mtd = (struct mtd_info *)dev_desc->bdev->priv;
365 	if (!mtd)
366 		return NULL;
367 
368 	p = part_get_info_by_name(dev_desc, PART_SYSTEM, &info);
369 	if (p > 0) {
370 		snprintf(mtd_root_part_info, 30, "%s%d %s", MTD_ROOT_PART_NUM, p - 1, MTD_ROOT_PART_NAME);
371 		env_update("bootargs", mtd_root_part_info);
372 	}
373 
374 	mtd_part_info = (char *)calloc(MTD_PART_INFO_MAX_SIZE, sizeof(char));
375 	if (!mtd_part_info) {
376 		printf("%s: Fail to malloc!", __func__);
377 		return NULL;
378 	}
379 
380 	mtd_part_info_p = mtd_part_info;
381 	snprintf(mtd_part_info_p, data_len - 1, "%s%s:",
382 		 MTD_PART_NAND_HEAD,
383 		 dev_desc->product);
384 	data_len -= strlen(mtd_part_info_p);
385 	mtd_part_info_p = mtd_part_info_p + strlen(mtd_part_info_p);
386 
387 	for (p = 1; p < MAX_SEARCH_PARTITIONS; p++) {
388 		ret = part_get_info(dev_desc, p, &info);
389 		if (ret)
390 			break;
391 
392 		debug("name is %s, start addr is %x\n", info.name,
393 		      (int)(size_t)info.start);
394 
395 		snprintf(mtd_part_info_p, data_len - 1, "0x%x@0x%x(%s)",
396 			 (int)(size_t)info.size << 9,
397 			 (int)(size_t)info.start << 9,
398 			 info.name);
399 		snprintf(mtd_part_info_temp, MTD_SINGLE_PART_INFO_MAX_SIZE - 1,
400 			 "0x%x@0x%x(%s)",
401 			 (int)(size_t)info.size << 9,
402 			 (int)(size_t)info.start << 9,
403 			 info.name);
404 		strcat(mtd_part_info, ",");
405 		if (part_get_info(dev_desc, p + 1, &info)) {
406 			if (dev_desc->devnum == BLK_MTD_SPI_NOR) {
407 				/* Nor is 64KB erase block(kernel) and gpt table just
408 				* resserve 33 sectors for the last partition. This
409 				* will erase the backup gpt table by user program,
410 				* so reserve one block.
411 				*/
412 				snprintf(mtd_part_info_p, data_len - 1, "0x%x@0x%x(%s)",
413 					(int)(size_t)(info.size -
414 					(info.size - 1) %
415 					(0x10000 >> 9) - 1) << 9,
416 					(int)(size_t)info.start << 9,
417 					info.name);
418 				break;
419 			} else {
420 				/* Nand flash is erased by block and gpt table just
421 				* resserve 33 sectors for the last partition. This
422 				* will erase the backup gpt table by user program,
423 				* so reserve one block.
424 				*/
425 				snprintf(mtd_part_info_p, data_len - 1, "0x%x@0x%x(%s)",
426 					(int)(size_t)(info.size -
427 					(info.size - 1) %
428 					(mtd->erasesize >> 9) - 1) << 9,
429 					(int)(size_t)info.start << 9,
430 					info.name);
431 				break;
432 			}
433 		}
434 		length = strlen(mtd_part_info_temp);
435 		data_len -= length;
436 		mtd_part_info_p = mtd_part_info_p + length + 1;
437 		memset(mtd_part_info_temp, 0, MTD_SINGLE_PART_INFO_MAX_SIZE);
438 	}
439 
440 	return mtd_part_info;
441 }
442 
443 ulong mtd_dread(struct udevice *udev, lbaint_t start,
444 		lbaint_t blkcnt, void *dst)
445 {
446 	struct blk_desc *desc = dev_get_uclass_platdata(udev);
447 #if defined(CONFIG_NAND) || defined(CONFIG_MTD_SPI_NAND) || defined(CONFIG_SPI_FLASH_MTD)
448 	loff_t off = (loff_t)(start * 512);
449 	size_t rwsize = blkcnt * 512;
450 #endif
451 	struct mtd_info *mtd;
452 	int ret = 0;
453 
454 	if (!desc)
455 		return ret;
456 
457 	mtd = desc->bdev->priv;
458 	if (!mtd)
459 		return 0;
460 
461 	if (blkcnt == 0)
462 		return 0;
463 
464 	pr_debug("mtd dread %s %lx %lx\n", mtd->name, start, blkcnt);
465 
466 	if (desc->devnum == BLK_MTD_NAND) {
467 		ret = mtd_map_read(mtd, off, &rwsize,
468 				   NULL, mtd->size,
469 				   (u_char *)(dst));
470 		if (!ret)
471 			return blkcnt;
472 		else
473 			return 0;
474 	} else if (desc->devnum == BLK_MTD_SPI_NAND) {
475 		ret = mtd_map_read(mtd, off, &rwsize,
476 				   NULL, mtd->size,
477 				   (u_char *)(dst));
478 		if (!ret)
479 			return blkcnt;
480 		else
481 			return 0;
482 	} else if (desc->devnum == BLK_MTD_SPI_NOR) {
483 #if defined(CONFIG_SPI_FLASH_MTD) || defined(CONFIG_SPL_BUILD)
484 		struct spi_nor *nor = (struct spi_nor *)mtd->priv;
485 		struct spi_slave *spi = nor->spi;
486 		size_t retlen_nor;
487 
488 		if (desc->op_flag == BLK_PRE_RW)
489 			spi->mode |= SPI_DMA_PREPARE;
490 		mtd_read(mtd, off, rwsize, &retlen_nor, dst);
491 		if (desc->op_flag == BLK_PRE_RW)
492 			spi->mode |= SPI_DMA_PREPARE;
493 
494 		if (retlen_nor == rwsize)
495 			return blkcnt;
496 		else
497 #endif
498 			return 0;
499 	} else {
500 		return 0;
501 	}
502 }
503 
504 #if CONFIG_IS_ENABLED(MTD_WRITE)
505 ulong mtd_dwrite(struct udevice *udev, lbaint_t start,
506 		 lbaint_t blkcnt, const void *src)
507 {
508 	struct blk_desc *desc = dev_get_uclass_platdata(udev);
509 #if defined(CONFIG_NAND) || defined(CONFIG_MTD_SPI_NAND) || defined(CONFIG_SPI_FLASH_MTD)
510 	loff_t off = (loff_t)(start * 512);
511 	size_t rwsize = blkcnt * 512;
512 #endif
513 	struct mtd_info *mtd;
514 	int ret = 0;
515 
516 	if (!desc)
517 		return ret;
518 
519 	mtd = desc->bdev->priv;
520 	if (!mtd)
521 		return 0;
522 
523 	pr_debug("mtd dwrite %s %lx %lx\n", mtd->name, start, blkcnt);
524 
525 	if (blkcnt == 0)
526 		return 0;
527 
528 	if (desc->devnum == BLK_MTD_NAND ||
529 	    desc->devnum == BLK_MTD_SPI_NAND ||
530 	    desc->devnum == BLK_MTD_SPI_NOR) {
531 		if (desc->op_flag == BLK_MTD_CONT_WRITE) {
532 			ret = mtd_map_write(mtd, off, &rwsize,
533 					    NULL, mtd->size,
534 					    (u_char *)(src), 0);
535 			if (!ret)
536 				return blkcnt;
537 			else
538 				return 0;
539 		} else {
540 			lbaint_t off_aligned, alinged;
541 			size_t rwsize_aligned;
542 			u8 *p_buf;
543 
544 			alinged = off & mtd->erasesize_mask;
545 			off_aligned = off - alinged;
546 			rwsize_aligned = rwsize + alinged;
547 			rwsize_aligned = (rwsize_aligned + mtd->erasesize - 1) &
548 				~(mtd->erasesize - 1);
549 
550 			p_buf = malloc(rwsize_aligned);
551 			if (!p_buf) {
552 				printf("%s: Fail to malloc!", __func__);
553 				return 0;
554 			}
555 
556 			ret = mtd_map_read(mtd, off_aligned, &rwsize_aligned,
557 					   NULL, mtd->size,
558 					   (u_char *)(p_buf));
559 			if (ret) {
560 				free(p_buf);
561 				return 0;
562 			}
563 
564 			memcpy(p_buf + alinged, src, rwsize);
565 
566 			ret = mtd_map_write(mtd, off_aligned, &rwsize_aligned,
567 					    NULL, mtd->size,
568 					    (u_char *)(p_buf), 0);
569 			free(p_buf);
570 			if (!ret)
571 				return blkcnt;
572 			else
573 				return 0;
574 		}
575 	} else {
576 		return 0;
577 	}
578 
579 	return 0;
580 }
581 
582 ulong mtd_derase(struct udevice *udev, lbaint_t start,
583 		 lbaint_t blkcnt)
584 {
585 	struct blk_desc *desc = dev_get_uclass_platdata(udev);
586 #if defined(CONFIG_NAND) || defined(CONFIG_MTD_SPI_NAND) || defined(CONFIG_SPI_FLASH_MTD)
587 	loff_t off = (loff_t)(start * 512);
588 	size_t len = blkcnt * 512;
589 #endif
590 	struct mtd_info *mtd;
591 	int ret = 0;
592 
593 	if (!desc)
594 		return ret;
595 
596 	mtd = desc->bdev->priv;
597 	if (!mtd)
598 		return 0;
599 
600 	pr_debug("mtd derase %s %lx %lx\n", mtd->name, start, blkcnt);
601 
602 	if (blkcnt == 0)
603 		return 0;
604 
605 	if (desc->devnum == BLK_MTD_NAND ||
606 	    desc->devnum == BLK_MTD_SPI_NAND ||
607 	    desc->devnum == BLK_MTD_SPI_NOR) {
608 		ret = mtd_map_erase(mtd, off, len);
609 		if (ret)
610 			return ret;
611 	} else {
612 		return 0;
613 	}
614 
615 	return 0;
616 }
617 #endif
618 
619 static int mtd_blk_probe(struct udevice *udev)
620 {
621 	struct mtd_info *mtd;
622 	struct blk_desc *desc = dev_get_uclass_platdata(udev);
623 	int ret, i = 0;
624 
625 	mtd = dev_get_uclass_priv(udev->parent);
626 	if (mtd->type == MTD_NANDFLASH && desc->devnum == BLK_MTD_NAND) {
627 #ifndef CONFIG_SPL_BUILD
628 		mtd = dev_get_priv(udev->parent);
629 #endif
630 	}
631 
632 	desc->bdev->priv = mtd;
633 	sprintf(desc->vendor, "0x%.4x", 0x2207);
634 	memcpy(desc->product, mtd->name, strlen(mtd->name));
635 	memcpy(desc->revision, "V1.00", sizeof("V1.00"));
636 	if (mtd->type == MTD_NANDFLASH) {
637 #ifdef CONFIG_NAND
638 		if (desc->devnum == BLK_MTD_NAND)
639 			i = NAND_BBT_SCAN_MAXBLOCKS;
640 		else if (desc->devnum == BLK_MTD_SPI_NAND)
641 			i = NANDDEV_BBT_SCAN_MAXBLOCKS;
642 #endif
643 
644 		/*
645 		 * Find the first useful block in the end,
646 		 * and it is the end lba of the nand storage.
647 		 */
648 		for (; i < (mtd->size / mtd->erasesize); i++) {
649 			ret =  mtd_block_isbad(mtd,
650 					       mtd->size - mtd->erasesize * (i + 1));
651 			if (!ret) {
652 				desc->lba = (mtd->size >> 9) -
653 					(mtd->erasesize >> 9) * i;
654 				break;
655 			}
656 		}
657 	} else {
658 		desc->lba = mtd->size >> 9;
659 	}
660 
661 	debug("MTD: desc->lba is %lx\n", desc->lba);
662 
663 	return 0;
664 }
665 
666 static const struct blk_ops mtd_blk_ops = {
667 	.read	= mtd_dread,
668 #if CONFIG_IS_ENABLED(MTD_WRITE)
669 	.write	= mtd_dwrite,
670 	.erase	= mtd_derase,
671 #endif
672 };
673 
674 U_BOOT_DRIVER(mtd_blk) = {
675 	.name		= "mtd_blk",
676 	.id		= UCLASS_BLK,
677 	.ops		= &mtd_blk_ops,
678 	.probe		= mtd_blk_probe,
679 };
680