xref: /rk3399_rockchip-uboot/drivers/mtd/mtd_blk.c (revision 74eb6027432600de60ed1c8bf892f1f8243c2c8a)
1 /*
2  * (C) Copyright 2019 Rockchip Electronics Co., Ltd
3  *
4  * SPDX-License-Identifier:	GPL-2.0+
5  */
6 
7 #include <common.h>
8 #include <blk.h>
9 #include <boot_rkimg.h>
10 #include <dm.h>
11 #include <errno.h>
12 #include <malloc.h>
13 #include <nand.h>
14 #include <part.h>
15 #include <dm/device-internal.h>
16 
17 #define MTD_PART_NAND_HEAD		"mtdparts="
18 #define MTD_ROOT_PART_NUM		"ubi.mtd="
19 #define MTD_ROOT_PART_NAME		"root=ubi0:rootfs"
20 #define MTD_PART_INFO_MAX_SIZE		512
21 #define MTD_SINGLE_PART_INFO_MAX_SIZE	40
22 
23 #define MTD_BLK_TABLE_BLOCK_UNKNOWN	(-2)
24 #define MTD_BLK_TABLE_BLOCK_SHIFT	(-1)
25 
26 static int *mtd_map_blk_table;
27 
28 int mtd_blk_map_table_init(struct blk_desc *desc,
29 			   loff_t offset,
30 			   size_t length)
31 {
32 	u32 blk_total, blk_begin, blk_cnt;
33 	struct mtd_info *mtd = NULL;
34 	int i, j;
35 
36 	if (!desc)
37 		return -ENODEV;
38 
39 	if (desc->devnum == BLK_MTD_NAND) {
40 #if defined(CONFIG_NAND) && !defined(CONFIG_SPL_BUILD)
41 		mtd = dev_get_priv(desc->bdev->parent);
42 #endif
43 	} else if (desc->devnum == BLK_MTD_SPI_NAND) {
44 #if defined(CONFIG_MTD_SPI_NAND) && !defined(CONFIG_SPL_BUILD)
45 		mtd = desc->bdev->priv;
46 #endif
47 	}
48 
49 	if (!mtd) {
50 		return -ENODEV;
51 	} else {
52 		blk_total = (mtd->size + mtd->erasesize - 1) >> mtd->erasesize_shift;
53 		if (!mtd_map_blk_table) {
54 			mtd_map_blk_table = (int *)malloc(blk_total * sizeof(int));
55 			for (i = 0; i < blk_total; i++)
56 				mtd_map_blk_table[i] = MTD_BLK_TABLE_BLOCK_UNKNOWN;
57 		}
58 
59 		blk_begin = (u32)offset >> mtd->erasesize_shift;
60 		blk_cnt = ((u32)((offset & mtd->erasesize_mask) + length) >> mtd->erasesize_shift);
61 		if (blk_begin >= blk_total) {
62 			pr_err("map table blk begin[%d] overflow\n", blk_begin);
63 			return -EINVAL;
64 		}
65 		if ((blk_begin + blk_cnt) > blk_total)
66 			blk_cnt = blk_total - blk_begin;
67 
68 		if (mtd_map_blk_table[blk_begin] != MTD_BLK_TABLE_BLOCK_UNKNOWN)
69 			return 0;
70 
71 		j = 0;
72 		 /* should not across blk_cnt */
73 		for (i = 0; i < blk_cnt; i++) {
74 			if (j >= blk_cnt)
75 				mtd_map_blk_table[blk_begin + i] = MTD_BLK_TABLE_BLOCK_SHIFT;
76 			for (; j < blk_cnt; j++) {
77 				if (!mtd_block_isbad(mtd, (blk_begin + j) << mtd->erasesize_shift)) {
78 					mtd_map_blk_table[blk_begin + i] = blk_begin + j;
79 					j++;
80 					if (j == blk_cnt)
81 						j++;
82 					break;
83 				}
84 			}
85 		}
86 
87 		return 0;
88 	}
89 }
90 
91 static bool get_mtd_blk_map_address(struct mtd_info *mtd, loff_t *off)
92 {
93 	bool mapped;
94 	loff_t offset = *off;
95 	size_t block_offset = offset & (mtd->erasesize - 1);
96 
97 	mapped = false;
98 	if (!mtd_map_blk_table ||
99 	    mtd_map_blk_table[(u64)offset >> mtd->erasesize_shift] ==
100 	    MTD_BLK_TABLE_BLOCK_UNKNOWN ||
101 	    mtd_map_blk_table[(u64)offset >> mtd->erasesize_shift] ==
102 	    0xffffffff)
103 		return mapped;
104 
105 	mapped = true;
106 	*off = (loff_t)(((u32)mtd_map_blk_table[(u64)offset >>
107 		mtd->erasesize_shift] << mtd->erasesize_shift) + block_offset);
108 
109 	return mapped;
110 }
111 
112 void mtd_blk_map_partitions(struct blk_desc *desc)
113 {
114 	disk_partition_t info;
115 	int i, ret;
116 
117 	if (!desc)
118 		return;
119 
120 	if (desc->if_type != IF_TYPE_MTD)
121 		return;
122 
123 	for (i = 1; i < MAX_SEARCH_PARTITIONS; i++) {
124 		ret = part_get_info(desc, i, &info);
125 		if (ret != 0)
126 			continue;
127 
128 		if (mtd_blk_map_table_init(desc,
129 					   info.start << 9,
130 					   info.size << 9)) {
131 			pr_debug("mtd block map table fail\n");
132 		}
133 	}
134 }
135 
136 static __maybe_unused int mtd_map_read(struct mtd_info *mtd, loff_t offset,
137 				       size_t *length, size_t *actual,
138 				       loff_t lim, u_char *buffer)
139 {
140 	size_t left_to_read = *length;
141 	u_char *p_buffer = buffer;
142 	int rval;
143 
144 	while (left_to_read > 0) {
145 		size_t block_offset = offset & (mtd->erasesize - 1);
146 		size_t read_length;
147 		loff_t mapped_offset;
148 
149 		if (offset >= mtd->size)
150 			return 0;
151 
152 		mapped_offset = offset;
153 		if (!get_mtd_blk_map_address(mtd, &mapped_offset)) {
154 			if (mtd_block_isbad(mtd, mapped_offset &
155 					    ~(mtd->erasesize - 1))) {
156 				printf("Skipping bad block 0x%08llx\n",
157 				       offset & ~(mtd->erasesize - 1));
158 				offset += mtd->erasesize - block_offset;
159 				continue;
160 			}
161 		}
162 
163 		if (left_to_read < (mtd->erasesize - block_offset))
164 			read_length = left_to_read;
165 		else
166 			read_length = mtd->erasesize - block_offset;
167 
168 		rval = mtd_read(mtd, mapped_offset, read_length, &read_length,
169 				p_buffer);
170 		if (rval && rval != -EUCLEAN) {
171 			printf("NAND read from offset %llx failed %d\n",
172 			       offset, rval);
173 			*length -= left_to_read;
174 			return rval;
175 		}
176 
177 		left_to_read -= read_length;
178 		offset       += read_length;
179 		p_buffer     += read_length;
180 	}
181 
182 	return 0;
183 }
184 
185 static __maybe_unused int mtd_map_write(struct mtd_info *mtd, loff_t offset,
186 					size_t *length, size_t *actual,
187 					loff_t lim, u_char *buffer, int flags)
188 {
189 	int rval = 0, blocksize;
190 	size_t left_to_write = *length;
191 	u_char *p_buffer = buffer;
192 	struct erase_info ei;
193 
194 	blocksize = mtd->erasesize;
195 
196 	/*
197 	 * nand_write() handles unaligned, partial page writes.
198 	 *
199 	 * We allow length to be unaligned, for convenience in
200 	 * using the $filesize variable.
201 	 *
202 	 * However, starting at an unaligned offset makes the
203 	 * semantics of bad block skipping ambiguous (really,
204 	 * you should only start a block skipping access at a
205 	 * partition boundary).  So don't try to handle that.
206 	 */
207 	if ((offset & (mtd->writesize - 1)) != 0) {
208 		printf("Attempt to write non page-aligned data\n");
209 		*length = 0;
210 		return -EINVAL;
211 	}
212 
213 	while (left_to_write > 0) {
214 		size_t block_offset = offset & (mtd->erasesize - 1);
215 		size_t write_size, truncated_write_size;
216 		loff_t mapped_offset;
217 
218 		if (offset >= mtd->size)
219 			return 0;
220 
221 		mapped_offset = offset;
222 		if (!get_mtd_blk_map_address(mtd, &mapped_offset)) {
223 			if (mtd_block_isbad(mtd, mapped_offset &
224 					    ~(mtd->erasesize - 1))) {
225 				printf("Skipping bad block 0x%08llx\n",
226 				       offset & ~(mtd->erasesize - 1));
227 				offset += mtd->erasesize - block_offset;
228 				continue;
229 			}
230 		}
231 
232 		if (!(mapped_offset & mtd->erasesize_mask)) {
233 			memset(&ei, 0, sizeof(struct erase_info));
234 			ei.addr = mapped_offset;
235 			ei.len  = mtd->erasesize;
236 			rval = mtd_erase(mtd, &ei);
237 			if (rval) {
238 				pr_info("error %d while erasing %llx\n", rval,
239 					mapped_offset);
240 				return rval;
241 			}
242 		}
243 
244 		if (left_to_write < (blocksize - block_offset))
245 			write_size = left_to_write;
246 		else
247 			write_size = blocksize - block_offset;
248 
249 		truncated_write_size = write_size;
250 		rval = mtd_write(mtd, mapped_offset, truncated_write_size,
251 				 (size_t *)(&truncated_write_size), p_buffer);
252 
253 		offset += write_size;
254 		p_buffer += write_size;
255 
256 		if (rval != 0) {
257 			printf("NAND write to offset %llx failed %d\n",
258 			       offset, rval);
259 			*length -= left_to_write;
260 			return rval;
261 		}
262 
263 		left_to_write -= write_size;
264 	}
265 
266 	return 0;
267 }
268 
269 char *mtd_part_parse(void)
270 {
271 	char mtd_part_info_temp[MTD_SINGLE_PART_INFO_MAX_SIZE] = {0};
272 	u32 length, data_len = MTD_PART_INFO_MAX_SIZE;
273 	char mtd_root_part_info[30] = {0};
274 	struct blk_desc *dev_desc;
275 	disk_partition_t info;
276 	char *mtd_part_info_p;
277 	struct mtd_info *mtd;
278 	char *mtd_part_info;
279 	int ret;
280 	int p;
281 
282 	dev_desc = rockchip_get_bootdev();
283 	if (!dev_desc)
284 		return NULL;
285 
286 	mtd = (struct mtd_info *)dev_desc->bdev->priv;
287 	if (!mtd)
288 		return NULL;
289 
290 	p = part_get_info_by_name(dev_desc, PART_SYSTEM, &info);
291 	if (p > 0) {
292 		snprintf(mtd_root_part_info, 30, "%s%d %s", MTD_ROOT_PART_NUM, p - 1, MTD_ROOT_PART_NAME);
293 		env_update("bootargs", mtd_root_part_info);
294 	}
295 
296 	mtd_part_info = (char *)calloc(MTD_PART_INFO_MAX_SIZE, sizeof(char));
297 	if (!mtd_part_info) {
298 		printf("%s: Fail to malloc!", __func__);
299 		return NULL;
300 	}
301 
302 	mtd_part_info_p = mtd_part_info;
303 	snprintf(mtd_part_info_p, data_len - 1, "%s%s:",
304 		 MTD_PART_NAND_HEAD,
305 		 dev_desc->product);
306 	data_len -= strlen(mtd_part_info_p);
307 	mtd_part_info_p = mtd_part_info_p + strlen(mtd_part_info_p);
308 
309 	for (p = 1; p < MAX_SEARCH_PARTITIONS; p++) {
310 		ret = part_get_info(dev_desc, p, &info);
311 		if (ret)
312 			break;
313 
314 		debug("name is %s, start addr is %x\n", info.name,
315 		      (int)(size_t)info.start);
316 
317 		snprintf(mtd_part_info_p, data_len - 1, "0x%x@0x%x(%s)",
318 			 (int)(size_t)info.size << 9,
319 			 (int)(size_t)info.start << 9,
320 			 info.name);
321 		snprintf(mtd_part_info_temp, MTD_SINGLE_PART_INFO_MAX_SIZE - 1,
322 			 "0x%x@0x%x(%s)",
323 			 (int)(size_t)info.size << 9,
324 			 (int)(size_t)info.start << 9,
325 			 info.name);
326 		strcat(mtd_part_info, ",");
327 		if (part_get_info(dev_desc, p + 1, &info)) {
328 			/* Nand flash is erased by block and gpt table just
329 			 * resserve 33 sectors for the last partition. This
330 			 * will erase the backup gpt table by user program,
331 			 * so reserve one block.
332 			 */
333 			snprintf(mtd_part_info_p, data_len - 1, "0x%x@0x%x(%s)",
334 				 (int)(size_t)(info.size -
335 				 (info.size - 1) %
336 				 (mtd->erasesize >> 9) - 1) << 9,
337 				 (int)(size_t)info.start << 9,
338 				 info.name);
339 			break;
340 		}
341 		length = strlen(mtd_part_info_temp);
342 		data_len -= length;
343 		mtd_part_info_p = mtd_part_info_p + length + 1;
344 		memset(mtd_part_info_temp, 0, MTD_SINGLE_PART_INFO_MAX_SIZE);
345 	}
346 
347 	return mtd_part_info;
348 }
349 
350 ulong mtd_dread(struct udevice *udev, lbaint_t start,
351 		lbaint_t blkcnt, void *dst)
352 {
353 	struct blk_desc *desc = dev_get_uclass_platdata(udev);
354 #if defined(CONFIG_NAND) || defined(CONFIG_MTD_SPI_NAND) || defined(CONFIG_SPI_FLASH_MTD)
355 	loff_t off = (loff_t)(start * 512);
356 	size_t rwsize = blkcnt * 512;
357 #endif
358 	struct mtd_info *mtd;
359 	int ret = 0;
360 
361 	if (!desc)
362 		return ret;
363 
364 	mtd = desc->bdev->priv;
365 	if (!mtd)
366 		return 0;
367 
368 	if (blkcnt == 0)
369 		return 0;
370 
371 	pr_debug("mtd dread %s %lx %lx\n", mtd->name, start, blkcnt);
372 
373 	if (desc->devnum == BLK_MTD_NAND) {
374 #if defined(CONFIG_NAND) && !defined(CONFIG_SPL_BUILD)
375 		mtd = dev_get_priv(udev->parent);
376 		if (!mtd)
377 			return 0;
378 
379 		ret = nand_read_skip_bad(mtd, off, &rwsize,
380 					 NULL, mtd->size,
381 					 (u_char *)(dst));
382 #else
383 		ret = mtd_map_read(mtd, off, &rwsize,
384 				   NULL, mtd->size,
385 				   (u_char *)(dst));
386 #endif
387 		if (!ret)
388 			return blkcnt;
389 		else
390 			return 0;
391 	} else if (desc->devnum == BLK_MTD_SPI_NAND) {
392 		ret = mtd_map_read(mtd, off, &rwsize,
393 				   NULL, mtd->size,
394 				   (u_char *)(dst));
395 		if (!ret)
396 			return blkcnt;
397 		else
398 			return 0;
399 	} else if (desc->devnum == BLK_MTD_SPI_NOR) {
400 #if defined(CONFIG_SPI_FLASH_MTD) || defined(CONFIG_SPL_BUILD)
401 		size_t retlen_nor;
402 
403 		mtd_read(mtd, off, rwsize, &retlen_nor, dst);
404 		if (retlen_nor == rwsize)
405 			return blkcnt;
406 		else
407 #endif
408 			return 0;
409 	} else {
410 		return 0;
411 	}
412 }
413 
414 ulong mtd_dwrite(struct udevice *udev, lbaint_t start,
415 		 lbaint_t blkcnt, const void *src)
416 {
417 	struct blk_desc *desc = dev_get_uclass_platdata(udev);
418 #if defined(CONFIG_NAND) || defined(CONFIG_MTD_SPI_NAND) || defined(CONFIG_SPI_FLASH_MTD)
419 	loff_t off = (loff_t)(start * 512);
420 	size_t rwsize = blkcnt * 512;
421 #endif
422 	struct mtd_info *mtd;
423 	int ret = 0;
424 
425 	if (!desc)
426 		return ret;
427 
428 	mtd = desc->bdev->priv;
429 	if (!mtd)
430 		return 0;
431 
432 	pr_debug("mtd dwrite %s %lx %lx\n", mtd->name, start, blkcnt);
433 
434 	if (blkcnt == 0)
435 		return 0;
436 
437 	if (desc->devnum == BLK_MTD_NAND ||
438 	    desc->devnum == BLK_MTD_SPI_NAND ||
439 	    desc->devnum == BLK_MTD_SPI_NOR) {
440 		ret = mtd_map_write(mtd, off, &rwsize,
441 				    NULL, mtd->size,
442 				    (u_char *)(src), 0);
443 		if (!ret)
444 			return blkcnt;
445 		else
446 			return 0;
447 	} else {
448 		return 0;
449 	}
450 
451 	return 0;
452 }
453 
454 ulong mtd_derase(struct udevice *udev, lbaint_t start,
455 		 lbaint_t blkcnt)
456 {
457 	/* Not implemented */
458 	return 0;
459 }
460 
461 static int mtd_blk_probe(struct udevice *udev)
462 {
463 	struct mtd_info *mtd = dev_get_uclass_priv(udev->parent);
464 	struct blk_desc *desc = dev_get_uclass_platdata(udev);
465 	int ret, i;
466 
467 	desc->bdev->priv = mtd;
468 	sprintf(desc->vendor, "0x%.4x", 0x2207);
469 	memcpy(desc->product, mtd->name, strlen(mtd->name));
470 	memcpy(desc->revision, "V1.00", sizeof("V1.00"));
471 	if (mtd->type == MTD_NANDFLASH) {
472 		if (desc->devnum == BLK_MTD_NAND)
473 			mtd = dev_get_priv(udev->parent);
474 		/*
475 		 * Find the first useful block in the end,
476 		 * and it is the end lba of the nand storage.
477 		 */
478 		for (i = 0; i < (mtd->size / mtd->erasesize); i++) {
479 			ret =  mtd_block_isbad(mtd,
480 					       mtd->size - mtd->erasesize * (i + 1));
481 			if (!ret) {
482 				desc->lba = (mtd->size >> 9) -
483 					(mtd->erasesize >> 9) * i;
484 				break;
485 			}
486 		}
487 	} else {
488 		desc->lba = mtd->size >> 9;
489 	}
490 
491 	debug("MTD: desc->lba is %lx\n", desc->lba);
492 
493 	return 0;
494 }
495 
496 static const struct blk_ops mtd_blk_ops = {
497 	.read	= mtd_dread,
498 #ifndef CONFIG_SPL_BUILD
499 	.write	= mtd_dwrite,
500 	.erase	= mtd_derase,
501 #endif
502 };
503 
504 U_BOOT_DRIVER(mtd_blk) = {
505 	.name		= "mtd_blk",
506 	.id		= UCLASS_BLK,
507 	.ops		= &mtd_blk_ops,
508 	.probe		= mtd_blk_probe,
509 };
510