xref: /rk3399_rockchip-uboot/drivers/mtd/mtd_blk.c (revision d3acdc96e2fd0ed34beb32b26ba57131a1e04ea3)
1 /*
2  * (C) Copyright 2019 Rockchip Electronics Co., Ltd
3  *
4  * SPDX-License-Identifier:	GPL-2.0+
5  */
6 
7 #include <common.h>
8 #include <blk.h>
9 #include <boot_rkimg.h>
10 #include <dm.h>
11 #include <errno.h>
12 #include <malloc.h>
13 #include <nand.h>
14 #include <part.h>
15 #include <spi.h>
16 #include <dm/device-internal.h>
17 #include <linux/mtd/spi-nor.h>
18 
19 #define MTD_PART_NAND_HEAD		"mtdparts="
20 #define MTD_ROOT_PART_NUM		"ubi.mtd="
21 #define MTD_ROOT_PART_NAME		"root=ubi0:rootfs"
22 #define MTD_PART_INFO_MAX_SIZE		512
23 #define MTD_SINGLE_PART_INFO_MAX_SIZE	40
24 
25 #define MTD_BLK_TABLE_BLOCK_UNKNOWN	(-2)
26 #define MTD_BLK_TABLE_BLOCK_SHIFT	(-1)
27 
28 static int *mtd_map_blk_table;
29 
30 int mtd_blk_map_table_init(struct blk_desc *desc,
31 			   loff_t offset,
32 			   size_t length)
33 {
34 	u32 blk_total, blk_begin, blk_cnt;
35 	struct mtd_info *mtd = NULL;
36 	int i, j;
37 
38 	if (!desc)
39 		return -ENODEV;
40 
41 	if (desc->devnum == BLK_MTD_NAND) {
42 #if defined(CONFIG_NAND) && !defined(CONFIG_SPL_BUILD)
43 		mtd = dev_get_priv(desc->bdev->parent);
44 #endif
45 	} else if (desc->devnum == BLK_MTD_SPI_NAND) {
46 #if defined(CONFIG_MTD_SPI_NAND) && !defined(CONFIG_SPL_BUILD)
47 		mtd = desc->bdev->priv;
48 #endif
49 	}
50 
51 	if (!mtd) {
52 		return -ENODEV;
53 	} else {
54 		blk_total = (mtd->size + mtd->erasesize - 1) >> mtd->erasesize_shift;
55 		if (!mtd_map_blk_table) {
56 			mtd_map_blk_table = (int *)malloc(blk_total * sizeof(int));
57 			for (i = 0; i < blk_total; i++)
58 				mtd_map_blk_table[i] = MTD_BLK_TABLE_BLOCK_UNKNOWN;
59 		}
60 
61 		blk_begin = (u32)offset >> mtd->erasesize_shift;
62 		blk_cnt = ((u32)((offset & mtd->erasesize_mask) + length) >> mtd->erasesize_shift);
63 		if (blk_begin >= blk_total) {
64 			pr_err("map table blk begin[%d] overflow\n", blk_begin);
65 			return -EINVAL;
66 		}
67 		if ((blk_begin + blk_cnt) > blk_total)
68 			blk_cnt = blk_total - blk_begin;
69 
70 		if (mtd_map_blk_table[blk_begin] != MTD_BLK_TABLE_BLOCK_UNKNOWN)
71 			return 0;
72 
73 		j = 0;
74 		 /* should not across blk_cnt */
75 		for (i = 0; i < blk_cnt; i++) {
76 			if (j >= blk_cnt)
77 				mtd_map_blk_table[blk_begin + i] = MTD_BLK_TABLE_BLOCK_SHIFT;
78 			for (; j < blk_cnt; j++) {
79 				if (!mtd_block_isbad(mtd, (blk_begin + j) << mtd->erasesize_shift)) {
80 					mtd_map_blk_table[blk_begin + i] = blk_begin + j;
81 					j++;
82 					if (j == blk_cnt)
83 						j++;
84 					break;
85 				}
86 			}
87 		}
88 
89 		return 0;
90 	}
91 }
92 
93 static bool get_mtd_blk_map_address(struct mtd_info *mtd, loff_t *off)
94 {
95 	bool mapped;
96 	loff_t offset = *off;
97 	size_t block_offset = offset & (mtd->erasesize - 1);
98 
99 	mapped = false;
100 	if (!mtd_map_blk_table ||
101 	    mtd_map_blk_table[(u64)offset >> mtd->erasesize_shift] ==
102 	    MTD_BLK_TABLE_BLOCK_UNKNOWN ||
103 	    mtd_map_blk_table[(u64)offset >> mtd->erasesize_shift] ==
104 	    0xffffffff)
105 		return mapped;
106 
107 	mapped = true;
108 	*off = (loff_t)(((u32)mtd_map_blk_table[(u64)offset >>
109 		mtd->erasesize_shift] << mtd->erasesize_shift) + block_offset);
110 
111 	return mapped;
112 }
113 
114 void mtd_blk_map_partitions(struct blk_desc *desc)
115 {
116 	disk_partition_t info;
117 	int i, ret;
118 
119 	if (!desc)
120 		return;
121 
122 	if (desc->if_type != IF_TYPE_MTD)
123 		return;
124 
125 	for (i = 1; i < MAX_SEARCH_PARTITIONS; i++) {
126 		ret = part_get_info(desc, i, &info);
127 		if (ret != 0)
128 			continue;
129 
130 		if (mtd_blk_map_table_init(desc,
131 					   info.start << 9,
132 					   info.size << 9)) {
133 			pr_debug("mtd block map table fail\n");
134 		}
135 	}
136 }
137 
138 static __maybe_unused int mtd_map_read(struct mtd_info *mtd, loff_t offset,
139 				       size_t *length, size_t *actual,
140 				       loff_t lim, u_char *buffer)
141 {
142 	size_t left_to_read = *length;
143 	u_char *p_buffer = buffer;
144 	int rval;
145 
146 	while (left_to_read > 0) {
147 		size_t block_offset = offset & (mtd->erasesize - 1);
148 		size_t read_length;
149 		loff_t mapped_offset;
150 
151 		if (offset >= mtd->size)
152 			return 0;
153 
154 		mapped_offset = offset;
155 		if (!get_mtd_blk_map_address(mtd, &mapped_offset)) {
156 			if (mtd_block_isbad(mtd, mapped_offset &
157 					    ~(mtd->erasesize - 1))) {
158 				printf("Skipping bad block 0x%08llx\n",
159 				       offset & ~(mtd->erasesize - 1));
160 				offset += mtd->erasesize - block_offset;
161 				continue;
162 			}
163 		}
164 
165 		if (left_to_read < (mtd->erasesize - block_offset))
166 			read_length = left_to_read;
167 		else
168 			read_length = mtd->erasesize - block_offset;
169 
170 		rval = mtd_read(mtd, mapped_offset, read_length, &read_length,
171 				p_buffer);
172 		if (rval && rval != -EUCLEAN) {
173 			printf("NAND read from offset %llx failed %d\n",
174 			       offset, rval);
175 			*length -= left_to_read;
176 			return rval;
177 		}
178 
179 		left_to_read -= read_length;
180 		offset       += read_length;
181 		p_buffer     += read_length;
182 	}
183 
184 	return 0;
185 }
186 
187 static __maybe_unused int mtd_map_write(struct mtd_info *mtd, loff_t offset,
188 					size_t *length, size_t *actual,
189 					loff_t lim, u_char *buffer, int flags)
190 {
191 	int rval = 0, blocksize;
192 	size_t left_to_write = *length;
193 	u_char *p_buffer = buffer;
194 	struct erase_info ei;
195 
196 	blocksize = mtd->erasesize;
197 
198 	/*
199 	 * nand_write() handles unaligned, partial page writes.
200 	 *
201 	 * We allow length to be unaligned, for convenience in
202 	 * using the $filesize variable.
203 	 *
204 	 * However, starting at an unaligned offset makes the
205 	 * semantics of bad block skipping ambiguous (really,
206 	 * you should only start a block skipping access at a
207 	 * partition boundary).  So don't try to handle that.
208 	 */
209 	if ((offset & (mtd->writesize - 1)) != 0) {
210 		printf("Attempt to write non page-aligned data\n");
211 		*length = 0;
212 		return -EINVAL;
213 	}
214 
215 	while (left_to_write > 0) {
216 		size_t block_offset = offset & (mtd->erasesize - 1);
217 		size_t write_size, truncated_write_size;
218 		loff_t mapped_offset;
219 
220 		if (offset >= mtd->size)
221 			return 0;
222 
223 		mapped_offset = offset;
224 		if (!get_mtd_blk_map_address(mtd, &mapped_offset)) {
225 			if (mtd_block_isbad(mtd, mapped_offset &
226 					    ~(mtd->erasesize - 1))) {
227 				printf("Skipping bad block 0x%08llx\n",
228 				       offset & ~(mtd->erasesize - 1));
229 				offset += mtd->erasesize - block_offset;
230 				continue;
231 			}
232 		}
233 
234 		if (!(mapped_offset & mtd->erasesize_mask)) {
235 			memset(&ei, 0, sizeof(struct erase_info));
236 			ei.addr = mapped_offset;
237 			ei.len  = mtd->erasesize;
238 			rval = mtd_erase(mtd, &ei);
239 			if (rval) {
240 				pr_info("error %d while erasing %llx\n", rval,
241 					mapped_offset);
242 				return rval;
243 			}
244 		}
245 
246 		if (left_to_write < (blocksize - block_offset))
247 			write_size = left_to_write;
248 		else
249 			write_size = blocksize - block_offset;
250 
251 		truncated_write_size = write_size;
252 		rval = mtd_write(mtd, mapped_offset, truncated_write_size,
253 				 (size_t *)(&truncated_write_size), p_buffer);
254 
255 		offset += write_size;
256 		p_buffer += write_size;
257 
258 		if (rval != 0) {
259 			printf("NAND write to offset %llx failed %d\n",
260 			       offset, rval);
261 			*length -= left_to_write;
262 			return rval;
263 		}
264 
265 		left_to_write -= write_size;
266 	}
267 
268 	return 0;
269 }
270 
271 char *mtd_part_parse(void)
272 {
273 	char mtd_part_info_temp[MTD_SINGLE_PART_INFO_MAX_SIZE] = {0};
274 	u32 length, data_len = MTD_PART_INFO_MAX_SIZE;
275 	char mtd_root_part_info[30] = {0};
276 	struct blk_desc *dev_desc;
277 	disk_partition_t info;
278 	char *mtd_part_info_p;
279 	struct mtd_info *mtd;
280 	char *mtd_part_info;
281 	int ret;
282 	int p;
283 
284 	dev_desc = rockchip_get_bootdev();
285 	if (!dev_desc)
286 		return NULL;
287 
288 	mtd = (struct mtd_info *)dev_desc->bdev->priv;
289 	if (!mtd)
290 		return NULL;
291 
292 	p = part_get_info_by_name(dev_desc, PART_SYSTEM, &info);
293 	if (p > 0) {
294 		snprintf(mtd_root_part_info, 30, "%s%d %s", MTD_ROOT_PART_NUM, p - 1, MTD_ROOT_PART_NAME);
295 		env_update("bootargs", mtd_root_part_info);
296 	}
297 
298 	mtd_part_info = (char *)calloc(MTD_PART_INFO_MAX_SIZE, sizeof(char));
299 	if (!mtd_part_info) {
300 		printf("%s: Fail to malloc!", __func__);
301 		return NULL;
302 	}
303 
304 	mtd_part_info_p = mtd_part_info;
305 	snprintf(mtd_part_info_p, data_len - 1, "%s%s:",
306 		 MTD_PART_NAND_HEAD,
307 		 dev_desc->product);
308 	data_len -= strlen(mtd_part_info_p);
309 	mtd_part_info_p = mtd_part_info_p + strlen(mtd_part_info_p);
310 
311 	for (p = 1; p < MAX_SEARCH_PARTITIONS; p++) {
312 		ret = part_get_info(dev_desc, p, &info);
313 		if (ret)
314 			break;
315 
316 		debug("name is %s, start addr is %x\n", info.name,
317 		      (int)(size_t)info.start);
318 
319 		snprintf(mtd_part_info_p, data_len - 1, "0x%x@0x%x(%s)",
320 			 (int)(size_t)info.size << 9,
321 			 (int)(size_t)info.start << 9,
322 			 info.name);
323 		snprintf(mtd_part_info_temp, MTD_SINGLE_PART_INFO_MAX_SIZE - 1,
324 			 "0x%x@0x%x(%s)",
325 			 (int)(size_t)info.size << 9,
326 			 (int)(size_t)info.start << 9,
327 			 info.name);
328 		strcat(mtd_part_info, ",");
329 		if (part_get_info(dev_desc, p + 1, &info)) {
330 			/* Nand flash is erased by block and gpt table just
331 			 * resserve 33 sectors for the last partition. This
332 			 * will erase the backup gpt table by user program,
333 			 * so reserve one block.
334 			 */
335 			snprintf(mtd_part_info_p, data_len - 1, "0x%x@0x%x(%s)",
336 				 (int)(size_t)(info.size -
337 				 (info.size - 1) %
338 				 (mtd->erasesize >> 9) - 1) << 9,
339 				 (int)(size_t)info.start << 9,
340 				 info.name);
341 			break;
342 		}
343 		length = strlen(mtd_part_info_temp);
344 		data_len -= length;
345 		mtd_part_info_p = mtd_part_info_p + length + 1;
346 		memset(mtd_part_info_temp, 0, MTD_SINGLE_PART_INFO_MAX_SIZE);
347 	}
348 
349 	return mtd_part_info;
350 }
351 
352 ulong mtd_dread(struct udevice *udev, lbaint_t start,
353 		lbaint_t blkcnt, void *dst)
354 {
355 	struct blk_desc *desc = dev_get_uclass_platdata(udev);
356 #if defined(CONFIG_NAND) || defined(CONFIG_MTD_SPI_NAND) || defined(CONFIG_SPI_FLASH_MTD)
357 	loff_t off = (loff_t)(start * 512);
358 	size_t rwsize = blkcnt * 512;
359 #endif
360 	struct mtd_info *mtd;
361 	int ret = 0;
362 
363 	if (!desc)
364 		return ret;
365 
366 	mtd = desc->bdev->priv;
367 	if (!mtd)
368 		return 0;
369 
370 	if (blkcnt == 0)
371 		return 0;
372 
373 	pr_debug("mtd dread %s %lx %lx\n", mtd->name, start, blkcnt);
374 
375 	if (desc->devnum == BLK_MTD_NAND) {
376 #if defined(CONFIG_NAND) && !defined(CONFIG_SPL_BUILD)
377 		mtd = dev_get_priv(udev->parent);
378 		if (!mtd)
379 			return 0;
380 
381 		ret = nand_read_skip_bad(mtd, off, &rwsize,
382 					 NULL, mtd->size,
383 					 (u_char *)(dst));
384 #else
385 		ret = mtd_map_read(mtd, off, &rwsize,
386 				   NULL, mtd->size,
387 				   (u_char *)(dst));
388 #endif
389 		if (!ret)
390 			return blkcnt;
391 		else
392 			return 0;
393 	} else if (desc->devnum == BLK_MTD_SPI_NAND) {
394 		ret = mtd_map_read(mtd, off, &rwsize,
395 				   NULL, mtd->size,
396 				   (u_char *)(dst));
397 		if (!ret)
398 			return blkcnt;
399 		else
400 			return 0;
401 	} else if (desc->devnum == BLK_MTD_SPI_NOR) {
402 #if defined(CONFIG_SPI_FLASH_MTD) || defined(CONFIG_SPL_BUILD)
403 		struct spi_nor *nor = (struct spi_nor *)mtd->priv;
404 		struct spi_slave *spi = nor->spi;
405 		size_t retlen_nor;
406 
407 		if (desc->op_flag == BLK_PRE_RW)
408 			spi->mode |= SPI_DMA_PREPARE;
409 		mtd_read(mtd, off, rwsize, &retlen_nor, dst);
410 		if (desc->op_flag == BLK_PRE_RW)
411 			spi->mode |= SPI_DMA_PREPARE;
412 
413 		if (retlen_nor == rwsize)
414 			return blkcnt;
415 		else
416 #endif
417 			return 0;
418 	} else {
419 		return 0;
420 	}
421 }
422 
423 ulong mtd_dwrite(struct udevice *udev, lbaint_t start,
424 		 lbaint_t blkcnt, const void *src)
425 {
426 	struct blk_desc *desc = dev_get_uclass_platdata(udev);
427 #if defined(CONFIG_NAND) || defined(CONFIG_MTD_SPI_NAND) || defined(CONFIG_SPI_FLASH_MTD)
428 	loff_t off = (loff_t)(start * 512);
429 	size_t rwsize = blkcnt * 512;
430 #endif
431 	struct mtd_info *mtd;
432 	int ret = 0;
433 
434 	if (!desc)
435 		return ret;
436 
437 	mtd = desc->bdev->priv;
438 	if (!mtd)
439 		return 0;
440 
441 	pr_debug("mtd dwrite %s %lx %lx\n", mtd->name, start, blkcnt);
442 
443 	if (blkcnt == 0)
444 		return 0;
445 
446 	if (desc->devnum == BLK_MTD_NAND ||
447 	    desc->devnum == BLK_MTD_SPI_NAND ||
448 	    desc->devnum == BLK_MTD_SPI_NOR) {
449 		if (desc->op_flag == BLK_MTD_NBA_RW) {
450 			lbaint_t off_aligned, alinged;
451 			size_t rwsize_aligned;
452 			u8 *p_buf;
453 
454 			alinged = off & mtd->erasesize_mask;
455 			off_aligned = off - alinged;
456 			rwsize_aligned = rwsize + alinged;
457 			rwsize_aligned = (rwsize_aligned + mtd->erasesize - 1) &
458 				~(mtd->erasesize - 1);
459 
460 			p_buf = malloc(rwsize_aligned);
461 			if (!p_buf) {
462 				printf("%s: Fail to malloc!", __func__);
463 				return 0;
464 			}
465 
466 			ret = mtd_map_read(mtd, off_aligned, &rwsize_aligned,
467 					   NULL, mtd->size,
468 					   (u_char *)(p_buf));
469 			if (ret) {
470 				free(p_buf);
471 				return 0;
472 			}
473 
474 			memcpy(p_buf + alinged, src, rwsize);
475 
476 			ret = mtd_map_write(mtd, off_aligned, &rwsize_aligned,
477 					    NULL, mtd->size,
478 					    (u_char *)(p_buf), 0);
479 			free(p_buf);
480 			if (!ret)
481 				return blkcnt;
482 			else
483 				return 0;
484 		} else {
485 			ret = mtd_map_write(mtd, off, &rwsize,
486 					    NULL, mtd->size,
487 					    (u_char *)(src), 0);
488 			if (!ret)
489 				return blkcnt;
490 			else
491 				return 0;
492 		}
493 	} else {
494 		return 0;
495 	}
496 
497 	return 0;
498 }
499 
500 ulong mtd_derase(struct udevice *udev, lbaint_t start,
501 		 lbaint_t blkcnt)
502 {
503 	/* Not implemented */
504 	return 0;
505 }
506 
507 static int mtd_blk_probe(struct udevice *udev)
508 {
509 	struct mtd_info *mtd = dev_get_uclass_priv(udev->parent);
510 	struct blk_desc *desc = dev_get_uclass_platdata(udev);
511 	int ret, i;
512 
513 	desc->bdev->priv = mtd;
514 	sprintf(desc->vendor, "0x%.4x", 0x2207);
515 	memcpy(desc->product, mtd->name, strlen(mtd->name));
516 	memcpy(desc->revision, "V1.00", sizeof("V1.00"));
517 	if (mtd->type == MTD_NANDFLASH) {
518 		if (desc->devnum == BLK_MTD_NAND)
519 			mtd = dev_get_priv(udev->parent);
520 		/*
521 		 * Find the first useful block in the end,
522 		 * and it is the end lba of the nand storage.
523 		 */
524 		for (i = 0; i < (mtd->size / mtd->erasesize); i++) {
525 			ret =  mtd_block_isbad(mtd,
526 					       mtd->size - mtd->erasesize * (i + 1));
527 			if (!ret) {
528 				desc->lba = (mtd->size >> 9) -
529 					(mtd->erasesize >> 9) * i;
530 				break;
531 			}
532 		}
533 	} else {
534 		desc->lba = mtd->size >> 9;
535 	}
536 
537 	debug("MTD: desc->lba is %lx\n", desc->lba);
538 
539 	return 0;
540 }
541 
542 static const struct blk_ops mtd_blk_ops = {
543 	.read	= mtd_dread,
544 #ifndef CONFIG_SPL_BUILD
545 	.write	= mtd_dwrite,
546 	.erase	= mtd_derase,
547 #endif
548 };
549 
550 U_BOOT_DRIVER(mtd_blk) = {
551 	.name		= "mtd_blk",
552 	.id		= UCLASS_BLK,
553 	.ops		= &mtd_blk_ops,
554 	.probe		= mtd_blk_probe,
555 };
556