xref: /rk3399_rockchip-uboot/drivers/mtd/mtd_blk.c (revision effae6d71544d6cab5ae01aa7160bb709b3a3e6e)
1 /*
2  * (C) Copyright 2019 Rockchip Electronics Co., Ltd
3  *
4  * SPDX-License-Identifier:	GPL-2.0+
5  */
6 
7 #include <common.h>
8 #include <blk.h>
9 #include <boot_rkimg.h>
10 #include <dm.h>
11 #include <errno.h>
12 #include <image.h>
13 #include <malloc.h>
14 #include <nand.h>
15 #include <part.h>
16 #include <spi.h>
17 #include <dm/device-internal.h>
18 #include <linux/mtd/spi-nor.h>
19 #ifdef CONFIG_NAND
20 #include <linux/mtd/nand.h>
21 #endif
22 
23 #define MTD_PART_NAND_HEAD		"mtdparts="
24 #define MTD_ROOT_PART_NUM		"ubi.mtd="
25 #define MTD_ROOT_PART_NAME		"root=ubi0:rootfs"
26 #define MTD_PART_INFO_MAX_SIZE		512
27 #define MTD_SINGLE_PART_INFO_MAX_SIZE	40
28 
29 #define MTD_BLK_TABLE_BLOCK_UNKNOWN	(-2)
30 #define MTD_BLK_TABLE_BLOCK_SHIFT	(-1)
31 
32 static int *mtd_map_blk_table;
33 
34 int mtd_blk_map_table_init(struct blk_desc *desc,
35 			   loff_t offset,
36 			   size_t length)
37 {
38 	u32 blk_total, blk_begin, blk_cnt;
39 	struct mtd_info *mtd = NULL;
40 	int i, j;
41 
42 	if (!desc)
43 		return -ENODEV;
44 
45 	switch (desc->devnum) {
46 	case BLK_MTD_NAND:
47 	case BLK_MTD_SPI_NAND:
48 		mtd = desc->bdev->priv;
49 		break;
50 	default:
51 		break;
52 	}
53 
54 	if (!mtd) {
55 		return -ENODEV;
56 	} else {
57 		blk_total = (mtd->size + mtd->erasesize - 1) >> mtd->erasesize_shift;
58 		if (!mtd_map_blk_table) {
59 			mtd_map_blk_table = (int *)malloc(blk_total * sizeof(int));
60 			if (!mtd_map_blk_table)
61 				return -ENOMEM;
62 			for (i = 0; i < blk_total; i++)
63 				mtd_map_blk_table[i] = MTD_BLK_TABLE_BLOCK_UNKNOWN;
64 		}
65 
66 		blk_begin = (u32)offset >> mtd->erasesize_shift;
67 		blk_cnt = ((u32)((offset & mtd->erasesize_mask) + length + \
68 			mtd->erasesize - 1) >> mtd->erasesize_shift);
69 		if (blk_begin >= blk_total) {
70 			pr_err("map table blk begin[%d] overflow\n", blk_begin);
71 			return -EINVAL;
72 		}
73 		if ((blk_begin + blk_cnt) > blk_total)
74 			blk_cnt = blk_total - blk_begin;
75 
76 		if (mtd_map_blk_table[blk_begin] != MTD_BLK_TABLE_BLOCK_UNKNOWN)
77 			return 0;
78 
79 		j = 0;
80 		 /* should not across blk_cnt */
81 		for (i = 0; i < blk_cnt; i++) {
82 			if (j >= blk_cnt)
83 				mtd_map_blk_table[blk_begin + i] = MTD_BLK_TABLE_BLOCK_SHIFT;
84 			for (; j < blk_cnt; j++) {
85 				if (!mtd_block_isbad(mtd, (blk_begin + j) << mtd->erasesize_shift)) {
86 					mtd_map_blk_table[blk_begin + i] = blk_begin + j;
87 					j++;
88 					if (j == blk_cnt)
89 						j++;
90 					break;
91 				}
92 			}
93 		}
94 
95 		return 0;
96 	}
97 }
98 
99 static bool get_mtd_blk_map_address(struct mtd_info *mtd, loff_t *off)
100 {
101 	bool mapped;
102 	loff_t offset = *off;
103 	size_t block_offset = offset & (mtd->erasesize - 1);
104 
105 	mapped = false;
106 	if (!mtd_map_blk_table ||
107 	    mtd_map_blk_table[(u64)offset >> mtd->erasesize_shift] ==
108 	    MTD_BLK_TABLE_BLOCK_UNKNOWN ||
109 	    mtd_map_blk_table[(u64)offset >> mtd->erasesize_shift] ==
110 	    0xffffffff)
111 		return mapped;
112 
113 	mapped = true;
114 	*off = (loff_t)(((u32)mtd_map_blk_table[(u64)offset >>
115 		mtd->erasesize_shift] << mtd->erasesize_shift) + block_offset);
116 
117 	return mapped;
118 }
119 
120 void mtd_blk_map_partitions(struct blk_desc *desc)
121 {
122 	disk_partition_t info;
123 	int i, ret;
124 
125 	if (!desc)
126 		return;
127 
128 	if (desc->if_type != IF_TYPE_MTD)
129 		return;
130 
131 	for (i = 1; i < MAX_SEARCH_PARTITIONS; i++) {
132 		ret = part_get_info(desc, i, &info);
133 		if (ret != 0)
134 			continue;
135 
136 		if (mtd_blk_map_table_init(desc,
137 					   info.start << 9,
138 					   info.size << 9)) {
139 			pr_debug("mtd block map table fail\n");
140 		}
141 	}
142 }
143 
144 void mtd_blk_map_fit(struct blk_desc *desc, ulong sector, void *fit)
145 {
146 	struct mtd_info *mtd = NULL;
147 	int totalsize = 0;
148 
149 	if (desc->if_type != IF_TYPE_MTD)
150 		return;
151 
152 	if (desc->devnum == BLK_MTD_NAND) {
153 #if defined(CONFIG_NAND)
154 		mtd = dev_get_priv(desc->bdev->parent);
155 #endif
156 	} else if (desc->devnum == BLK_MTD_SPI_NAND) {
157 #if defined(CONFIG_MTD_SPI_NAND)
158 		mtd = desc->bdev->priv;
159 #endif
160 	}
161 
162 #ifdef CONFIG_SPL_FIT
163 	if (fit_get_totalsize(fit, &totalsize))
164 		debug("Can not find /totalsize node.\n");
165 #endif
166 	if (mtd && totalsize) {
167 		if (mtd_blk_map_table_init(desc, sector << 9, totalsize + (size_t)mtd->erasesize))
168 			debug("Map block table fail.\n");
169 	}
170 }
171 
172 static __maybe_unused int mtd_map_read(struct mtd_info *mtd, loff_t offset,
173 				       size_t *length, size_t *actual,
174 				       loff_t lim, u_char *buffer)
175 {
176 	size_t left_to_read = *length;
177 	u_char *p_buffer = buffer;
178 	int rval;
179 
180 	while (left_to_read > 0) {
181 		size_t block_offset = offset & (mtd->erasesize - 1);
182 		size_t read_length;
183 		loff_t mapped_offset;
184 
185 		if (offset >= mtd->size)
186 			return 0;
187 
188 		mapped_offset = offset;
189 		if (!get_mtd_blk_map_address(mtd, &mapped_offset)) {
190 			if (mtd_block_isbad(mtd, mapped_offset &
191 					    ~(mtd->erasesize - 1))) {
192 				printf("Skipping bad block 0x%08llx\n",
193 				       offset & ~(mtd->erasesize - 1));
194 				offset += mtd->erasesize - block_offset;
195 				continue;
196 			}
197 		}
198 
199 		if (left_to_read < (mtd->erasesize - block_offset))
200 			read_length = left_to_read;
201 		else
202 			read_length = mtd->erasesize - block_offset;
203 
204 		rval = mtd_read(mtd, mapped_offset, read_length, &read_length,
205 				p_buffer);
206 		if (rval && rval != -EUCLEAN) {
207 			printf("NAND read from offset %llx failed %d\n",
208 			       offset, rval);
209 			*length -= left_to_read;
210 			return rval;
211 		}
212 
213 		left_to_read -= read_length;
214 		offset       += read_length;
215 		p_buffer     += read_length;
216 	}
217 
218 	return 0;
219 }
220 
221 static __maybe_unused int mtd_map_write(struct mtd_info *mtd, loff_t offset,
222 					size_t *length, size_t *actual,
223 					loff_t lim, u_char *buffer, int flags)
224 {
225 	int rval = 0, blocksize;
226 	size_t left_to_write = *length;
227 	u_char *p_buffer = buffer;
228 	struct erase_info ei;
229 
230 	blocksize = mtd->erasesize;
231 
232 	/*
233 	 * nand_write() handles unaligned, partial page writes.
234 	 *
235 	 * We allow length to be unaligned, for convenience in
236 	 * using the $filesize variable.
237 	 *
238 	 * However, starting at an unaligned offset makes the
239 	 * semantics of bad block skipping ambiguous (really,
240 	 * you should only start a block skipping access at a
241 	 * partition boundary).  So don't try to handle that.
242 	 */
243 	if ((offset & (mtd->writesize - 1)) != 0) {
244 		printf("Attempt to write non page-aligned data\n");
245 		*length = 0;
246 		return -EINVAL;
247 	}
248 
249 	while (left_to_write > 0) {
250 		size_t block_offset = offset & (mtd->erasesize - 1);
251 		size_t write_size, truncated_write_size;
252 		loff_t mapped_offset;
253 
254 		if (offset >= mtd->size)
255 			return 0;
256 
257 		mapped_offset = offset;
258 		if (!get_mtd_blk_map_address(mtd, &mapped_offset)) {
259 			if (mtd_block_isbad(mtd, mapped_offset &
260 					    ~(mtd->erasesize - 1))) {
261 				printf("Skipping bad block 0x%08llx\n",
262 				       offset & ~(mtd->erasesize - 1));
263 				offset += mtd->erasesize - block_offset;
264 				continue;
265 			}
266 		}
267 
268 		if (!(mapped_offset & mtd->erasesize_mask)) {
269 			memset(&ei, 0, sizeof(struct erase_info));
270 			ei.addr = mapped_offset;
271 			ei.len  = mtd->erasesize;
272 			rval = mtd_erase(mtd, &ei);
273 			if (rval) {
274 				pr_info("error %d while erasing %llx\n", rval,
275 					mapped_offset);
276 				return rval;
277 			}
278 		}
279 
280 		if (left_to_write < (blocksize - block_offset))
281 			write_size = left_to_write;
282 		else
283 			write_size = blocksize - block_offset;
284 
285 		truncated_write_size = write_size;
286 		rval = mtd_write(mtd, mapped_offset, truncated_write_size,
287 				 (size_t *)(&truncated_write_size), p_buffer);
288 
289 		offset += write_size;
290 		p_buffer += write_size;
291 
292 		if (rval != 0) {
293 			printf("NAND write to offset %llx failed %d\n",
294 			       offset, rval);
295 			*length -= left_to_write;
296 			return rval;
297 		}
298 
299 		left_to_write -= write_size;
300 	}
301 
302 	return 0;
303 }
304 
305 static __maybe_unused int mtd_map_erase(struct mtd_info *mtd, loff_t offset,
306 					size_t length)
307 {
308 	struct erase_info ei;
309 	loff_t pos, len;
310 	int ret;
311 
312 	pos = offset;
313 	len = length;
314 
315 	if ((pos & mtd->erasesize_mask) || (len & mtd->erasesize_mask)) {
316 		pr_err("Attempt to erase non block-aligned data, pos= %llx, len= %llx\n",
317 		       pos, len);
318 
319 		return -EINVAL;
320 	}
321 
322 	while (len) {
323 		if (mtd_block_isbad(mtd, pos) || mtd_block_isreserved(mtd, pos)) {
324 			pr_debug("attempt to erase a bad/reserved block @%llx\n",
325 				 pos);
326 			pos += mtd->erasesize;
327 			continue;
328 		}
329 
330 		memset(&ei, 0, sizeof(struct erase_info));
331 		ei.addr = pos;
332 		ei.len  = mtd->erasesize;
333 		ret = mtd_erase(mtd, &ei);
334 		if (ret) {
335 			pr_err("map_erase error %d while erasing %llx\n", ret,
336 			       pos);
337 			return ret;
338 		}
339 
340 		pos += mtd->erasesize;
341 		len -= mtd->erasesize;
342 	}
343 
344 	return 0;
345 }
346 
347 char *mtd_part_parse(void)
348 {
349 	char mtd_part_info_temp[MTD_SINGLE_PART_INFO_MAX_SIZE] = {0};
350 	u32 length, data_len = MTD_PART_INFO_MAX_SIZE;
351 	char mtd_root_part_info[30] = {0};
352 	struct blk_desc *dev_desc;
353 	disk_partition_t info;
354 	char *mtd_part_info_p;
355 	struct mtd_info *mtd;
356 	char *mtd_part_info;
357 	int ret;
358 	int p;
359 
360 	dev_desc = rockchip_get_bootdev();
361 	if (!dev_desc)
362 		return NULL;
363 
364 	mtd = (struct mtd_info *)dev_desc->bdev->priv;
365 	if (!mtd)
366 		return NULL;
367 
368 	p = part_get_info_by_name(dev_desc, PART_SYSTEM, &info);
369 	if (p > 0) {
370 		snprintf(mtd_root_part_info, 30, "%s%d %s", MTD_ROOT_PART_NUM, p - 1, MTD_ROOT_PART_NAME);
371 		env_update("bootargs", mtd_root_part_info);
372 	}
373 
374 	mtd_part_info = (char *)calloc(MTD_PART_INFO_MAX_SIZE, sizeof(char));
375 	if (!mtd_part_info) {
376 		printf("%s: Fail to malloc!", __func__);
377 		return NULL;
378 	}
379 
380 	mtd_part_info_p = mtd_part_info;
381 	snprintf(mtd_part_info_p, data_len - 1, "%s%s:",
382 		 MTD_PART_NAND_HEAD,
383 		 dev_desc->product);
384 	data_len -= strlen(mtd_part_info_p);
385 	mtd_part_info_p = mtd_part_info_p + strlen(mtd_part_info_p);
386 
387 	for (p = 1; p < MAX_SEARCH_PARTITIONS; p++) {
388 		ret = part_get_info(dev_desc, p, &info);
389 		if (ret)
390 			break;
391 
392 		debug("name is %s, start addr is %x\n", info.name,
393 		      (int)(size_t)info.start);
394 
395 		snprintf(mtd_part_info_p, data_len - 1, "0x%x@0x%x(%s)",
396 			 (int)(size_t)info.size << 9,
397 			 (int)(size_t)info.start << 9,
398 			 info.name);
399 		snprintf(mtd_part_info_temp, MTD_SINGLE_PART_INFO_MAX_SIZE - 1,
400 			 "0x%x@0x%x(%s)",
401 			 (int)(size_t)info.size << 9,
402 			 (int)(size_t)info.start << 9,
403 			 info.name);
404 		strcat(mtd_part_info, ",");
405 		if (part_get_info(dev_desc, p + 1, &info)) {
406 			/* Nand flash is erased by block and gpt table just
407 			 * resserve 33 sectors for the last partition. This
408 			 * will erase the backup gpt table by user program,
409 			 * so reserve one block.
410 			 */
411 			snprintf(mtd_part_info_p, data_len - 1, "0x%x@0x%x(%s)",
412 				 (int)(size_t)(info.size -
413 				 (info.size - 1) %
414 				 (mtd->erasesize >> 9) - 1) << 9,
415 				 (int)(size_t)info.start << 9,
416 				 info.name);
417 			break;
418 		}
419 		length = strlen(mtd_part_info_temp);
420 		data_len -= length;
421 		mtd_part_info_p = mtd_part_info_p + length + 1;
422 		memset(mtd_part_info_temp, 0, MTD_SINGLE_PART_INFO_MAX_SIZE);
423 	}
424 
425 	return mtd_part_info;
426 }
427 
428 ulong mtd_dread(struct udevice *udev, lbaint_t start,
429 		lbaint_t blkcnt, void *dst)
430 {
431 	struct blk_desc *desc = dev_get_uclass_platdata(udev);
432 #if defined(CONFIG_NAND) || defined(CONFIG_MTD_SPI_NAND) || defined(CONFIG_SPI_FLASH_MTD)
433 	loff_t off = (loff_t)(start * 512);
434 	size_t rwsize = blkcnt * 512;
435 #endif
436 	struct mtd_info *mtd;
437 	int ret = 0;
438 
439 	if (!desc)
440 		return ret;
441 
442 	mtd = desc->bdev->priv;
443 	if (!mtd)
444 		return 0;
445 
446 	if (blkcnt == 0)
447 		return 0;
448 
449 	pr_debug("mtd dread %s %lx %lx\n", mtd->name, start, blkcnt);
450 
451 	if (desc->devnum == BLK_MTD_NAND) {
452 		ret = mtd_map_read(mtd, off, &rwsize,
453 				   NULL, mtd->size,
454 				   (u_char *)(dst));
455 		if (!ret)
456 			return blkcnt;
457 		else
458 			return 0;
459 	} else if (desc->devnum == BLK_MTD_SPI_NAND) {
460 		ret = mtd_map_read(mtd, off, &rwsize,
461 				   NULL, mtd->size,
462 				   (u_char *)(dst));
463 		if (!ret)
464 			return blkcnt;
465 		else
466 			return 0;
467 	} else if (desc->devnum == BLK_MTD_SPI_NOR) {
468 #if defined(CONFIG_SPI_FLASH_MTD) || defined(CONFIG_SPL_BUILD)
469 		struct spi_nor *nor = (struct spi_nor *)mtd->priv;
470 		struct spi_slave *spi = nor->spi;
471 		size_t retlen_nor;
472 
473 		if (desc->op_flag == BLK_PRE_RW)
474 			spi->mode |= SPI_DMA_PREPARE;
475 		mtd_read(mtd, off, rwsize, &retlen_nor, dst);
476 		if (desc->op_flag == BLK_PRE_RW)
477 			spi->mode |= SPI_DMA_PREPARE;
478 
479 		if (retlen_nor == rwsize)
480 			return blkcnt;
481 		else
482 #endif
483 			return 0;
484 	} else {
485 		return 0;
486 	}
487 }
488 
489 ulong mtd_dwrite(struct udevice *udev, lbaint_t start,
490 		 lbaint_t blkcnt, const void *src)
491 {
492 	struct blk_desc *desc = dev_get_uclass_platdata(udev);
493 #if defined(CONFIG_NAND) || defined(CONFIG_MTD_SPI_NAND) || defined(CONFIG_SPI_FLASH_MTD)
494 	loff_t off = (loff_t)(start * 512);
495 	size_t rwsize = blkcnt * 512;
496 #endif
497 	struct mtd_info *mtd;
498 	int ret = 0;
499 
500 	if (!desc)
501 		return ret;
502 
503 	mtd = desc->bdev->priv;
504 	if (!mtd)
505 		return 0;
506 
507 	pr_debug("mtd dwrite %s %lx %lx\n", mtd->name, start, blkcnt);
508 
509 	if (blkcnt == 0)
510 		return 0;
511 
512 	if (desc->devnum == BLK_MTD_NAND ||
513 	    desc->devnum == BLK_MTD_SPI_NAND ||
514 	    desc->devnum == BLK_MTD_SPI_NOR) {
515 		if (desc->op_flag == BLK_MTD_CONT_WRITE) {
516 			ret = mtd_map_write(mtd, off, &rwsize,
517 					    NULL, mtd->size,
518 					    (u_char *)(src), 0);
519 			if (!ret)
520 				return blkcnt;
521 			else
522 				return 0;
523 		} else {
524 			lbaint_t off_aligned, alinged;
525 			size_t rwsize_aligned;
526 			u8 *p_buf;
527 
528 			alinged = off & mtd->erasesize_mask;
529 			off_aligned = off - alinged;
530 			rwsize_aligned = rwsize + alinged;
531 			rwsize_aligned = (rwsize_aligned + mtd->erasesize - 1) &
532 				~(mtd->erasesize - 1);
533 
534 			p_buf = malloc(rwsize_aligned);
535 			if (!p_buf) {
536 				printf("%s: Fail to malloc!", __func__);
537 				return 0;
538 			}
539 
540 			ret = mtd_map_read(mtd, off_aligned, &rwsize_aligned,
541 					   NULL, mtd->size,
542 					   (u_char *)(p_buf));
543 			if (ret) {
544 				free(p_buf);
545 				return 0;
546 			}
547 
548 			memcpy(p_buf + alinged, src, rwsize);
549 
550 			ret = mtd_map_write(mtd, off_aligned, &rwsize_aligned,
551 					    NULL, mtd->size,
552 					    (u_char *)(p_buf), 0);
553 			free(p_buf);
554 			if (!ret)
555 				return blkcnt;
556 			else
557 				return 0;
558 		}
559 	} else {
560 		return 0;
561 	}
562 
563 	return 0;
564 }
565 
566 ulong mtd_derase(struct udevice *udev, lbaint_t start,
567 		 lbaint_t blkcnt)
568 {
569 	struct blk_desc *desc = dev_get_uclass_platdata(udev);
570 #if defined(CONFIG_NAND) || defined(CONFIG_MTD_SPI_NAND) || defined(CONFIG_SPI_FLASH_MTD)
571 	loff_t off = (loff_t)(start * 512);
572 	size_t len = blkcnt * 512;
573 #endif
574 	struct mtd_info *mtd;
575 	int ret = 0;
576 
577 	if (!desc)
578 		return ret;
579 
580 	mtd = desc->bdev->priv;
581 	if (!mtd)
582 		return 0;
583 
584 	pr_debug("mtd derase %s %lx %lx\n", mtd->name, start, blkcnt);
585 
586 	if (blkcnt == 0)
587 		return 0;
588 
589 	if (desc->devnum == BLK_MTD_NAND ||
590 	    desc->devnum == BLK_MTD_SPI_NAND ||
591 	    desc->devnum == BLK_MTD_SPI_NOR) {
592 		ret = mtd_map_erase(mtd, off, len);
593 		if (ret)
594 			return ret;
595 	} else {
596 		return 0;
597 	}
598 
599 	return 0;
600 }
601 
602 static int mtd_blk_probe(struct udevice *udev)
603 {
604 	struct mtd_info *mtd;
605 	struct blk_desc *desc = dev_get_uclass_platdata(udev);
606 	int ret, i = 0;
607 
608 	mtd = dev_get_uclass_priv(udev->parent);
609 	if (mtd->type == MTD_NANDFLASH && desc->devnum == BLK_MTD_NAND) {
610 #ifndef CONFIG_SPL_BUILD
611 		mtd = dev_get_priv(udev->parent);
612 #endif
613 	}
614 
615 	desc->bdev->priv = mtd;
616 	sprintf(desc->vendor, "0x%.4x", 0x2207);
617 	memcpy(desc->product, mtd->name, strlen(mtd->name));
618 	memcpy(desc->revision, "V1.00", sizeof("V1.00"));
619 	if (mtd->type == MTD_NANDFLASH) {
620 #ifdef CONFIG_NAND
621 		if (desc->devnum == BLK_MTD_NAND)
622 			i = NAND_BBT_SCAN_MAXBLOCKS;
623 		else if (desc->devnum == BLK_MTD_SPI_NAND)
624 			i = NANDDEV_BBT_SCAN_MAXBLOCKS;
625 #endif
626 
627 		/*
628 		 * Find the first useful block in the end,
629 		 * and it is the end lba of the nand storage.
630 		 */
631 		for (; i < (mtd->size / mtd->erasesize); i++) {
632 			ret =  mtd_block_isbad(mtd,
633 					       mtd->size - mtd->erasesize * (i + 1));
634 			if (!ret) {
635 				desc->lba = (mtd->size >> 9) -
636 					(mtd->erasesize >> 9) * i;
637 				break;
638 			}
639 		}
640 	} else {
641 		desc->lba = mtd->size >> 9;
642 	}
643 
644 	debug("MTD: desc->lba is %lx\n", desc->lba);
645 
646 	return 0;
647 }
648 
649 static const struct blk_ops mtd_blk_ops = {
650 	.read	= mtd_dread,
651 	.write	= mtd_dwrite,
652 #ifndef CONFIG_SPL_BUILD
653 	.erase	= mtd_derase,
654 #endif
655 };
656 
657 U_BOOT_DRIVER(mtd_blk) = {
658 	.name		= "mtd_blk",
659 	.id		= UCLASS_BLK,
660 	.ops		= &mtd_blk_ops,
661 	.probe		= mtd_blk_probe,
662 };
663