xref: /rk3399_rockchip-uboot/drivers/mtd/mtd_blk.c (revision 3b2dd5de375e8ce0e0c9a9ffb2c5965a7582c4ea)
1 /*
2  * (C) Copyright 2019 Rockchip Electronics Co., Ltd
3  *
4  * SPDX-License-Identifier:	GPL-2.0+
5  */
6 
7 #include <common.h>
8 #include <blk.h>
9 #include <boot_rkimg.h>
10 #include <dm.h>
11 #include <errno.h>
12 #include <malloc.h>
13 #include <nand.h>
14 #include <part.h>
15 #include <spi.h>
16 #include <dm/device-internal.h>
17 #include <linux/mtd/spi-nor.h>
18 #ifdef CONFIG_NAND
19 #include <linux/mtd/nand.h>
20 #endif
21 
22 #define MTD_PART_NAND_HEAD		"mtdparts="
23 #define MTD_ROOT_PART_NUM		"ubi.mtd="
24 #define MTD_ROOT_PART_NAME		"root=ubi0:rootfs"
25 #define MTD_PART_INFO_MAX_SIZE		512
26 #define MTD_SINGLE_PART_INFO_MAX_SIZE	40
27 
28 #define MTD_BLK_TABLE_BLOCK_UNKNOWN	(-2)
29 #define MTD_BLK_TABLE_BLOCK_SHIFT	(-1)
30 
31 static int *mtd_map_blk_table;
32 
33 int mtd_blk_map_table_init(struct blk_desc *desc,
34 			   loff_t offset,
35 			   size_t length)
36 {
37 	u32 blk_total, blk_begin, blk_cnt;
38 	struct mtd_info *mtd = NULL;
39 	int i, j;
40 
41 	if (!desc)
42 		return -ENODEV;
43 
44 	switch (desc->devnum) {
45 	case BLK_MTD_NAND:
46 	case BLK_MTD_SPI_NAND:
47 		mtd = desc->bdev->priv;
48 		break;
49 	default:
50 		break;
51 	}
52 
53 	if (!mtd) {
54 		return -ENODEV;
55 	} else {
56 		blk_total = (mtd->size + mtd->erasesize - 1) >> mtd->erasesize_shift;
57 		if (!mtd_map_blk_table) {
58 			mtd_map_blk_table = (int *)malloc(blk_total * sizeof(int));
59 			if (!mtd_map_blk_table)
60 				return -ENOMEM;
61 			for (i = 0; i < blk_total; i++)
62 				mtd_map_blk_table[i] = MTD_BLK_TABLE_BLOCK_UNKNOWN;
63 		}
64 
65 		blk_begin = (u32)offset >> mtd->erasesize_shift;
66 		blk_cnt = ((u32)((offset & mtd->erasesize_mask) + length + \
67 			mtd->erasesize - 1) >> mtd->erasesize_shift);
68 		if (blk_begin >= blk_total) {
69 			pr_err("map table blk begin[%d] overflow\n", blk_begin);
70 			return -EINVAL;
71 		}
72 		if ((blk_begin + blk_cnt) > blk_total)
73 			blk_cnt = blk_total - blk_begin;
74 
75 		if (mtd_map_blk_table[blk_begin] != MTD_BLK_TABLE_BLOCK_UNKNOWN)
76 			return 0;
77 
78 		j = 0;
79 		 /* should not across blk_cnt */
80 		for (i = 0; i < blk_cnt; i++) {
81 			if (j >= blk_cnt)
82 				mtd_map_blk_table[blk_begin + i] = MTD_BLK_TABLE_BLOCK_SHIFT;
83 			for (; j < blk_cnt; j++) {
84 				if (!mtd_block_isbad(mtd, (blk_begin + j) << mtd->erasesize_shift)) {
85 					mtd_map_blk_table[blk_begin + i] = blk_begin + j;
86 					j++;
87 					if (j == blk_cnt)
88 						j++;
89 					break;
90 				}
91 			}
92 		}
93 
94 		return 0;
95 	}
96 }
97 
98 static bool get_mtd_blk_map_address(struct mtd_info *mtd, loff_t *off)
99 {
100 	bool mapped;
101 	loff_t offset = *off;
102 	size_t block_offset = offset & (mtd->erasesize - 1);
103 
104 	mapped = false;
105 	if (!mtd_map_blk_table ||
106 	    mtd_map_blk_table[(u64)offset >> mtd->erasesize_shift] ==
107 	    MTD_BLK_TABLE_BLOCK_UNKNOWN ||
108 	    mtd_map_blk_table[(u64)offset >> mtd->erasesize_shift] ==
109 	    0xffffffff)
110 		return mapped;
111 
112 	mapped = true;
113 	*off = (loff_t)(((u32)mtd_map_blk_table[(u64)offset >>
114 		mtd->erasesize_shift] << mtd->erasesize_shift) + block_offset);
115 
116 	return mapped;
117 }
118 
119 void mtd_blk_map_partitions(struct blk_desc *desc)
120 {
121 	disk_partition_t info;
122 	int i, ret;
123 
124 	if (!desc)
125 		return;
126 
127 	if (desc->if_type != IF_TYPE_MTD)
128 		return;
129 
130 	for (i = 1; i < MAX_SEARCH_PARTITIONS; i++) {
131 		ret = part_get_info(desc, i, &info);
132 		if (ret != 0)
133 			continue;
134 
135 		if (mtd_blk_map_table_init(desc,
136 					   info.start << 9,
137 					   info.size << 9)) {
138 			pr_debug("mtd block map table fail\n");
139 		}
140 	}
141 }
142 
143 static __maybe_unused int mtd_map_read(struct mtd_info *mtd, loff_t offset,
144 				       size_t *length, size_t *actual,
145 				       loff_t lim, u_char *buffer)
146 {
147 	size_t left_to_read = *length;
148 	u_char *p_buffer = buffer;
149 	int rval;
150 
151 	while (left_to_read > 0) {
152 		size_t block_offset = offset & (mtd->erasesize - 1);
153 		size_t read_length;
154 		loff_t mapped_offset;
155 
156 		if (offset >= mtd->size)
157 			return 0;
158 
159 		mapped_offset = offset;
160 		if (!get_mtd_blk_map_address(mtd, &mapped_offset)) {
161 			if (mtd_block_isbad(mtd, mapped_offset &
162 					    ~(mtd->erasesize - 1))) {
163 				printf("Skipping bad block 0x%08llx\n",
164 				       offset & ~(mtd->erasesize - 1));
165 				offset += mtd->erasesize - block_offset;
166 				continue;
167 			}
168 		}
169 
170 		if (left_to_read < (mtd->erasesize - block_offset))
171 			read_length = left_to_read;
172 		else
173 			read_length = mtd->erasesize - block_offset;
174 
175 		rval = mtd_read(mtd, mapped_offset, read_length, &read_length,
176 				p_buffer);
177 		if (rval && rval != -EUCLEAN) {
178 			printf("NAND read from offset %llx failed %d\n",
179 			       offset, rval);
180 			*length -= left_to_read;
181 			return rval;
182 		}
183 
184 		left_to_read -= read_length;
185 		offset       += read_length;
186 		p_buffer     += read_length;
187 	}
188 
189 	return 0;
190 }
191 
192 static __maybe_unused int mtd_map_write(struct mtd_info *mtd, loff_t offset,
193 					size_t *length, size_t *actual,
194 					loff_t lim, u_char *buffer, int flags)
195 {
196 	int rval = 0, blocksize;
197 	size_t left_to_write = *length;
198 	u_char *p_buffer = buffer;
199 	struct erase_info ei;
200 
201 	blocksize = mtd->erasesize;
202 
203 	/*
204 	 * nand_write() handles unaligned, partial page writes.
205 	 *
206 	 * We allow length to be unaligned, for convenience in
207 	 * using the $filesize variable.
208 	 *
209 	 * However, starting at an unaligned offset makes the
210 	 * semantics of bad block skipping ambiguous (really,
211 	 * you should only start a block skipping access at a
212 	 * partition boundary).  So don't try to handle that.
213 	 */
214 	if ((offset & (mtd->writesize - 1)) != 0) {
215 		printf("Attempt to write non page-aligned data\n");
216 		*length = 0;
217 		return -EINVAL;
218 	}
219 
220 	while (left_to_write > 0) {
221 		size_t block_offset = offset & (mtd->erasesize - 1);
222 		size_t write_size, truncated_write_size;
223 		loff_t mapped_offset;
224 
225 		if (offset >= mtd->size)
226 			return 0;
227 
228 		mapped_offset = offset;
229 		if (!get_mtd_blk_map_address(mtd, &mapped_offset)) {
230 			if (mtd_block_isbad(mtd, mapped_offset &
231 					    ~(mtd->erasesize - 1))) {
232 				printf("Skipping bad block 0x%08llx\n",
233 				       offset & ~(mtd->erasesize - 1));
234 				offset += mtd->erasesize - block_offset;
235 				continue;
236 			}
237 		}
238 
239 		if (!(mapped_offset & mtd->erasesize_mask)) {
240 			memset(&ei, 0, sizeof(struct erase_info));
241 			ei.addr = mapped_offset;
242 			ei.len  = mtd->erasesize;
243 			rval = mtd_erase(mtd, &ei);
244 			if (rval) {
245 				pr_info("error %d while erasing %llx\n", rval,
246 					mapped_offset);
247 				return rval;
248 			}
249 		}
250 
251 		if (left_to_write < (blocksize - block_offset))
252 			write_size = left_to_write;
253 		else
254 			write_size = blocksize - block_offset;
255 
256 		truncated_write_size = write_size;
257 		rval = mtd_write(mtd, mapped_offset, truncated_write_size,
258 				 (size_t *)(&truncated_write_size), p_buffer);
259 
260 		offset += write_size;
261 		p_buffer += write_size;
262 
263 		if (rval != 0) {
264 			printf("NAND write to offset %llx failed %d\n",
265 			       offset, rval);
266 			*length -= left_to_write;
267 			return rval;
268 		}
269 
270 		left_to_write -= write_size;
271 	}
272 
273 	return 0;
274 }
275 
276 static __maybe_unused int mtd_map_erase(struct mtd_info *mtd, loff_t offset,
277 					size_t length)
278 {
279 	struct erase_info ei;
280 	loff_t pos, len;
281 	int ret;
282 
283 	pos = offset;
284 	len = length;
285 
286 	if ((pos & mtd->erasesize_mask) || (len & mtd->erasesize_mask)) {
287 		pr_err("Attempt to erase non block-aligned data, pos= %llx, len= %llx\n",
288 		       pos, len);
289 
290 		return -EINVAL;
291 	}
292 
293 	while (len) {
294 		if (mtd_block_isbad(mtd, pos) || mtd_block_isreserved(mtd, pos)) {
295 			pr_debug("attempt to erase a bad/reserved block @%llx\n",
296 				 pos);
297 			pos += mtd->erasesize;
298 			continue;
299 		}
300 
301 		memset(&ei, 0, sizeof(struct erase_info));
302 		ei.addr = pos;
303 		ei.len  = mtd->erasesize;
304 		ret = mtd_erase(mtd, &ei);
305 		if (ret) {
306 			pr_err("map_erase error %d while erasing %llx\n", ret,
307 			       pos);
308 			return ret;
309 		}
310 
311 		pos += mtd->erasesize;
312 		len -= mtd->erasesize;
313 	}
314 
315 	return 0;
316 }
317 
318 char *mtd_part_parse(void)
319 {
320 	char mtd_part_info_temp[MTD_SINGLE_PART_INFO_MAX_SIZE] = {0};
321 	u32 length, data_len = MTD_PART_INFO_MAX_SIZE;
322 	char mtd_root_part_info[30] = {0};
323 	struct blk_desc *dev_desc;
324 	disk_partition_t info;
325 	char *mtd_part_info_p;
326 	struct mtd_info *mtd;
327 	char *mtd_part_info;
328 	int ret;
329 	int p;
330 
331 	dev_desc = rockchip_get_bootdev();
332 	if (!dev_desc)
333 		return NULL;
334 
335 	mtd = (struct mtd_info *)dev_desc->bdev->priv;
336 	if (!mtd)
337 		return NULL;
338 
339 	p = part_get_info_by_name(dev_desc, PART_SYSTEM, &info);
340 	if (p > 0) {
341 		snprintf(mtd_root_part_info, 30, "%s%d %s", MTD_ROOT_PART_NUM, p - 1, MTD_ROOT_PART_NAME);
342 		env_update("bootargs", mtd_root_part_info);
343 	}
344 
345 	mtd_part_info = (char *)calloc(MTD_PART_INFO_MAX_SIZE, sizeof(char));
346 	if (!mtd_part_info) {
347 		printf("%s: Fail to malloc!", __func__);
348 		return NULL;
349 	}
350 
351 	mtd_part_info_p = mtd_part_info;
352 	snprintf(mtd_part_info_p, data_len - 1, "%s%s:",
353 		 MTD_PART_NAND_HEAD,
354 		 dev_desc->product);
355 	data_len -= strlen(mtd_part_info_p);
356 	mtd_part_info_p = mtd_part_info_p + strlen(mtd_part_info_p);
357 
358 	for (p = 1; p < MAX_SEARCH_PARTITIONS; p++) {
359 		ret = part_get_info(dev_desc, p, &info);
360 		if (ret)
361 			break;
362 
363 		debug("name is %s, start addr is %x\n", info.name,
364 		      (int)(size_t)info.start);
365 
366 		snprintf(mtd_part_info_p, data_len - 1, "0x%x@0x%x(%s)",
367 			 (int)(size_t)info.size << 9,
368 			 (int)(size_t)info.start << 9,
369 			 info.name);
370 		snprintf(mtd_part_info_temp, MTD_SINGLE_PART_INFO_MAX_SIZE - 1,
371 			 "0x%x@0x%x(%s)",
372 			 (int)(size_t)info.size << 9,
373 			 (int)(size_t)info.start << 9,
374 			 info.name);
375 		strcat(mtd_part_info, ",");
376 		if (part_get_info(dev_desc, p + 1, &info)) {
377 			/* Nand flash is erased by block and gpt table just
378 			 * resserve 33 sectors for the last partition. This
379 			 * will erase the backup gpt table by user program,
380 			 * so reserve one block.
381 			 */
382 			snprintf(mtd_part_info_p, data_len - 1, "0x%x@0x%x(%s)",
383 				 (int)(size_t)(info.size -
384 				 (info.size - 1) %
385 				 (mtd->erasesize >> 9) - 1) << 9,
386 				 (int)(size_t)info.start << 9,
387 				 info.name);
388 			break;
389 		}
390 		length = strlen(mtd_part_info_temp);
391 		data_len -= length;
392 		mtd_part_info_p = mtd_part_info_p + length + 1;
393 		memset(mtd_part_info_temp, 0, MTD_SINGLE_PART_INFO_MAX_SIZE);
394 	}
395 
396 	return mtd_part_info;
397 }
398 
399 ulong mtd_dread(struct udevice *udev, lbaint_t start,
400 		lbaint_t blkcnt, void *dst)
401 {
402 	struct blk_desc *desc = dev_get_uclass_platdata(udev);
403 #if defined(CONFIG_NAND) || defined(CONFIG_MTD_SPI_NAND) || defined(CONFIG_SPI_FLASH_MTD)
404 	loff_t off = (loff_t)(start * 512);
405 	size_t rwsize = blkcnt * 512;
406 #endif
407 	struct mtd_info *mtd;
408 	int ret = 0;
409 
410 	if (!desc)
411 		return ret;
412 
413 	mtd = desc->bdev->priv;
414 	if (!mtd)
415 		return 0;
416 
417 	if (blkcnt == 0)
418 		return 0;
419 
420 	pr_debug("mtd dread %s %lx %lx\n", mtd->name, start, blkcnt);
421 
422 	if (desc->devnum == BLK_MTD_NAND) {
423 		ret = mtd_map_read(mtd, off, &rwsize,
424 				   NULL, mtd->size,
425 				   (u_char *)(dst));
426 		if (!ret)
427 			return blkcnt;
428 		else
429 			return 0;
430 	} else if (desc->devnum == BLK_MTD_SPI_NAND) {
431 		ret = mtd_map_read(mtd, off, &rwsize,
432 				   NULL, mtd->size,
433 				   (u_char *)(dst));
434 		if (!ret)
435 			return blkcnt;
436 		else
437 			return 0;
438 	} else if (desc->devnum == BLK_MTD_SPI_NOR) {
439 #if defined(CONFIG_SPI_FLASH_MTD) || defined(CONFIG_SPL_BUILD)
440 		struct spi_nor *nor = (struct spi_nor *)mtd->priv;
441 		struct spi_slave *spi = nor->spi;
442 		size_t retlen_nor;
443 
444 		if (desc->op_flag == BLK_PRE_RW)
445 			spi->mode |= SPI_DMA_PREPARE;
446 		mtd_read(mtd, off, rwsize, &retlen_nor, dst);
447 		if (desc->op_flag == BLK_PRE_RW)
448 			spi->mode |= SPI_DMA_PREPARE;
449 
450 		if (retlen_nor == rwsize)
451 			return blkcnt;
452 		else
453 #endif
454 			return 0;
455 	} else {
456 		return 0;
457 	}
458 }
459 
460 ulong mtd_dwrite(struct udevice *udev, lbaint_t start,
461 		 lbaint_t blkcnt, const void *src)
462 {
463 	struct blk_desc *desc = dev_get_uclass_platdata(udev);
464 #if defined(CONFIG_NAND) || defined(CONFIG_MTD_SPI_NAND) || defined(CONFIG_SPI_FLASH_MTD)
465 	loff_t off = (loff_t)(start * 512);
466 	size_t rwsize = blkcnt * 512;
467 #endif
468 	struct mtd_info *mtd;
469 	int ret = 0;
470 
471 	if (!desc)
472 		return ret;
473 
474 	mtd = desc->bdev->priv;
475 	if (!mtd)
476 		return 0;
477 
478 	pr_debug("mtd dwrite %s %lx %lx\n", mtd->name, start, blkcnt);
479 
480 	if (blkcnt == 0)
481 		return 0;
482 
483 	if (desc->devnum == BLK_MTD_NAND ||
484 	    desc->devnum == BLK_MTD_SPI_NAND ||
485 	    desc->devnum == BLK_MTD_SPI_NOR) {
486 		if (desc->op_flag == BLK_MTD_CONT_WRITE) {
487 			ret = mtd_map_write(mtd, off, &rwsize,
488 					    NULL, mtd->size,
489 					    (u_char *)(src), 0);
490 			if (!ret)
491 				return blkcnt;
492 			else
493 				return 0;
494 		} else {
495 			lbaint_t off_aligned, alinged;
496 			size_t rwsize_aligned;
497 			u8 *p_buf;
498 
499 			alinged = off & mtd->erasesize_mask;
500 			off_aligned = off - alinged;
501 			rwsize_aligned = rwsize + alinged;
502 			rwsize_aligned = (rwsize_aligned + mtd->erasesize - 1) &
503 				~(mtd->erasesize - 1);
504 
505 			p_buf = malloc(rwsize_aligned);
506 			if (!p_buf) {
507 				printf("%s: Fail to malloc!", __func__);
508 				return 0;
509 			}
510 
511 			ret = mtd_map_read(mtd, off_aligned, &rwsize_aligned,
512 					   NULL, mtd->size,
513 					   (u_char *)(p_buf));
514 			if (ret) {
515 				free(p_buf);
516 				return 0;
517 			}
518 
519 			memcpy(p_buf + alinged, src, rwsize);
520 
521 			ret = mtd_map_write(mtd, off_aligned, &rwsize_aligned,
522 					    NULL, mtd->size,
523 					    (u_char *)(p_buf), 0);
524 			free(p_buf);
525 			if (!ret)
526 				return blkcnt;
527 			else
528 				return 0;
529 		}
530 	} else {
531 		return 0;
532 	}
533 
534 	return 0;
535 }
536 
537 ulong mtd_derase(struct udevice *udev, lbaint_t start,
538 		 lbaint_t blkcnt)
539 {
540 	struct blk_desc *desc = dev_get_uclass_platdata(udev);
541 #if defined(CONFIG_NAND) || defined(CONFIG_MTD_SPI_NAND) || defined(CONFIG_SPI_FLASH_MTD)
542 	loff_t off = (loff_t)(start * 512);
543 	size_t len = blkcnt * 512;
544 #endif
545 	struct mtd_info *mtd;
546 	int ret = 0;
547 
548 	if (!desc)
549 		return ret;
550 
551 	mtd = desc->bdev->priv;
552 	if (!mtd)
553 		return 0;
554 
555 	pr_debug("mtd derase %s %lx %lx\n", mtd->name, start, blkcnt);
556 
557 	if (blkcnt == 0)
558 		return 0;
559 
560 	if (desc->devnum == BLK_MTD_NAND ||
561 	    desc->devnum == BLK_MTD_SPI_NAND) {
562 		ret = mtd_map_erase(mtd, off, len);
563 		if (ret)
564 			return ret;
565 	} else {
566 		return 0;
567 	}
568 
569 	return 0;
570 }
571 
572 static int mtd_blk_probe(struct udevice *udev)
573 {
574 	struct mtd_info *mtd;
575 	struct blk_desc *desc = dev_get_uclass_platdata(udev);
576 	int ret, i = 0;
577 
578 	mtd = dev_get_uclass_priv(udev->parent);
579 	if (mtd->type == MTD_NANDFLASH && desc->devnum == BLK_MTD_NAND) {
580 #ifndef CONFIG_SPL_BUILD
581 		mtd = dev_get_priv(udev->parent);
582 #endif
583 	}
584 
585 	desc->bdev->priv = mtd;
586 	sprintf(desc->vendor, "0x%.4x", 0x2207);
587 	memcpy(desc->product, mtd->name, strlen(mtd->name));
588 	memcpy(desc->revision, "V1.00", sizeof("V1.00"));
589 	if (mtd->type == MTD_NANDFLASH) {
590 #ifdef CONFIG_NAND
591 		if (desc->devnum == BLK_MTD_NAND)
592 			i = NAND_BBT_SCAN_MAXBLOCKS;
593 		else if (desc->devnum == BLK_MTD_SPI_NAND)
594 			i = NANDDEV_BBT_SCAN_MAXBLOCKS;
595 #endif
596 
597 		/*
598 		 * Find the first useful block in the end,
599 		 * and it is the end lba of the nand storage.
600 		 */
601 		for (; i < (mtd->size / mtd->erasesize); i++) {
602 			ret =  mtd_block_isbad(mtd,
603 					       mtd->size - mtd->erasesize * (i + 1));
604 			if (!ret) {
605 				desc->lba = (mtd->size >> 9) -
606 					(mtd->erasesize >> 9) * i;
607 				break;
608 			}
609 		}
610 	} else {
611 		desc->lba = mtd->size >> 9;
612 	}
613 
614 	debug("MTD: desc->lba is %lx\n", desc->lba);
615 
616 	return 0;
617 }
618 
619 static const struct blk_ops mtd_blk_ops = {
620 	.read	= mtd_dread,
621 #ifndef CONFIG_SPL_BUILD
622 	.write	= mtd_dwrite,
623 	.erase	= mtd_derase,
624 #endif
625 };
626 
627 U_BOOT_DRIVER(mtd_blk) = {
628 	.name		= "mtd_blk",
629 	.id		= UCLASS_BLK,
630 	.ops		= &mtd_blk_ops,
631 	.probe		= mtd_blk_probe,
632 };
633