xref: /rk3399_rockchip-uboot/drivers/mtd/mtd_blk.c (revision cd1c982e9a20e1f221cc1158f81fc40d9d0af0c2)
1 /*
2  * (C) Copyright 2019 Rockchip Electronics Co., Ltd
3  *
4  * SPDX-License-Identifier:	GPL-2.0+
5  */
6 
7 #include <common.h>
8 #include <blk.h>
9 #include <boot_rkimg.h>
10 #include <dm.h>
11 #include <errno.h>
12 #include <malloc.h>
13 #include <nand.h>
14 #include <part.h>
15 #include <dm/device-internal.h>
16 
17 #define MTD_PART_NAND_HEAD		"mtdparts="
18 #define MTD_PART_INFO_MAX_SIZE		512
19 #define MTD_SINGLE_PART_INFO_MAX_SIZE	40
20 
21 #define MTD_BLK_TABLE_BLOCK_UNKNOWN	(-2)
22 #define MTD_BLK_TABLE_BLOCK_SHIFT	(-1)
23 
24 static int *mtd_map_blk_table;
25 
26 int mtd_blk_map_table_init(struct blk_desc *desc,
27 			   loff_t offset,
28 			   size_t length)
29 {
30 	u32 blk_total, blk_begin, blk_cnt;
31 	struct mtd_info *mtd = NULL;
32 	int i, j;
33 
34 	if (!desc)
35 		return -ENODEV;
36 
37 	if (desc->devnum == BLK_MTD_NAND) {
38 #if defined(CONFIG_NAND) && !defined(CONFIG_SPL_BUILD)
39 		mtd = dev_get_priv(desc->bdev->parent);
40 #endif
41 	} else if (desc->devnum == BLK_MTD_SPI_NAND) {
42 #if defined(CONFIG_MTD_SPI_NAND) && !defined(CONFIG_SPL_BUILD)
43 		mtd = desc->bdev->priv;
44 #endif
45 	}
46 
47 	if (!mtd) {
48 		return -ENODEV;
49 	} else {
50 		blk_total = (mtd->size + mtd->erasesize - 1) >> mtd->erasesize_shift;
51 		if (!mtd_map_blk_table) {
52 			mtd_map_blk_table = (int *)malloc(blk_total * sizeof(int));
53 			for (i = 0; i < blk_total; i++)
54 				mtd_map_blk_table[i] = MTD_BLK_TABLE_BLOCK_UNKNOWN;
55 		}
56 
57 		blk_begin = (u32)offset >> mtd->erasesize_shift;
58 		blk_cnt = ((u32)((offset & mtd->erasesize_mask) + length) >> mtd->erasesize_shift);
59 		if (blk_begin >= blk_total) {
60 			pr_err("map table blk begin[%d] overflow\n", blk_begin);
61 			return -EINVAL;
62 		}
63 		if ((blk_begin + blk_cnt) > blk_total)
64 			blk_cnt = blk_total - blk_begin;
65 
66 		if (mtd_map_blk_table[blk_begin] != MTD_BLK_TABLE_BLOCK_UNKNOWN)
67 			return 0;
68 
69 		j = 0;
70 		 /* should not across blk_cnt */
71 		for (i = 0; i < blk_cnt; i++) {
72 			if (j >= blk_cnt)
73 				mtd_map_blk_table[blk_begin + i] = MTD_BLK_TABLE_BLOCK_SHIFT;
74 			for (; j < blk_cnt; j++) {
75 				if (!mtd_block_isbad(mtd, (blk_begin + j) << mtd->erasesize_shift)) {
76 					mtd_map_blk_table[blk_begin + i] = blk_begin + j;
77 					j++;
78 					if (j == blk_cnt)
79 						j++;
80 					break;
81 				}
82 			}
83 		}
84 
85 		return 0;
86 	}
87 }
88 
89 static bool get_mtd_blk_map_address(struct mtd_info *mtd, loff_t *off)
90 {
91 	bool mapped;
92 	loff_t offset = *off;
93 	size_t block_offset = offset & (mtd->erasesize - 1);
94 
95 	mapped = false;
96 	if (!mtd_map_blk_table ||
97 	    mtd_map_blk_table[(u64)offset >> mtd->erasesize_shift] ==
98 	    MTD_BLK_TABLE_BLOCK_UNKNOWN ||
99 	    mtd_map_blk_table[(u64)offset >> mtd->erasesize_shift] ==
100 	    0xffffffff)
101 		return mapped;
102 
103 	mapped = true;
104 	*off = (loff_t)(((u32)mtd_map_blk_table[(u64)offset >>
105 		mtd->erasesize_shift] << mtd->erasesize_shift) + block_offset);
106 
107 	return mapped;
108 }
109 
110 void mtd_blk_map_partitions(struct blk_desc *desc)
111 {
112 	disk_partition_t info;
113 	int i, ret;
114 
115 	if (!desc)
116 		return;
117 
118 	if (desc->if_type != IF_TYPE_MTD)
119 		return;
120 
121 	for (i = 1; i < MAX_SEARCH_PARTITIONS; i++) {
122 		ret = part_get_info(desc, i, &info);
123 		if (ret != 0)
124 			continue;
125 
126 		if (mtd_blk_map_table_init(desc,
127 					   info.start << 9,
128 					   info.size << 9)) {
129 			pr_debug("mtd block map table fail\n");
130 		}
131 	}
132 }
133 
134 static __maybe_unused int mtd_map_read(struct mtd_info *mtd, loff_t offset,
135 				       size_t *length, size_t *actual,
136 				       loff_t lim, u_char *buffer)
137 {
138 	size_t left_to_read = *length;
139 	u_char *p_buffer = buffer;
140 	int rval;
141 
142 	while (left_to_read > 0) {
143 		size_t block_offset = offset & (mtd->erasesize - 1);
144 		size_t read_length;
145 		loff_t mapped_offset;
146 
147 		if (offset >= mtd->size)
148 			return 0;
149 
150 		mapped_offset = offset;
151 		if (!get_mtd_blk_map_address(mtd, &mapped_offset)) {
152 			if (mtd_block_isbad(mtd, mapped_offset &
153 					    ~(mtd->erasesize - 1))) {
154 				printf("Skipping bad block 0x%08llx\n",
155 				       offset & ~(mtd->erasesize - 1));
156 				offset += mtd->erasesize - block_offset;
157 				continue;
158 			}
159 		}
160 
161 		if (left_to_read < (mtd->erasesize - block_offset))
162 			read_length = left_to_read;
163 		else
164 			read_length = mtd->erasesize - block_offset;
165 
166 		rval = mtd_read(mtd, mapped_offset, read_length, &read_length,
167 				p_buffer);
168 		if (rval && rval != -EUCLEAN) {
169 			printf("NAND read from offset %llx failed %d\n",
170 			       offset, rval);
171 			*length -= left_to_read;
172 			return rval;
173 		}
174 
175 		left_to_read -= read_length;
176 		offset       += read_length;
177 		p_buffer     += read_length;
178 	}
179 
180 	return 0;
181 }
182 
183 static __maybe_unused int mtd_map_write(struct mtd_info *mtd, loff_t offset,
184 					size_t *length, size_t *actual,
185 					loff_t lim, u_char *buffer, int flags)
186 {
187 	int rval = 0, blocksize;
188 	size_t left_to_write = *length;
189 	u_char *p_buffer = buffer;
190 	struct erase_info ei;
191 
192 	blocksize = mtd->erasesize;
193 
194 	/*
195 	 * nand_write() handles unaligned, partial page writes.
196 	 *
197 	 * We allow length to be unaligned, for convenience in
198 	 * using the $filesize variable.
199 	 *
200 	 * However, starting at an unaligned offset makes the
201 	 * semantics of bad block skipping ambiguous (really,
202 	 * you should only start a block skipping access at a
203 	 * partition boundary).  So don't try to handle that.
204 	 */
205 	if ((offset & (mtd->writesize - 1)) != 0) {
206 		printf("Attempt to write non page-aligned data\n");
207 		*length = 0;
208 		return -EINVAL;
209 	}
210 
211 	while (left_to_write > 0) {
212 		size_t block_offset = offset & (mtd->erasesize - 1);
213 		size_t write_size, truncated_write_size;
214 		loff_t mapped_offset;
215 
216 		if (offset >= mtd->size)
217 			return 0;
218 
219 		mapped_offset = offset;
220 		if (!get_mtd_blk_map_address(mtd, &mapped_offset)) {
221 			if (mtd_block_isbad(mtd, mapped_offset &
222 					    ~(mtd->erasesize - 1))) {
223 				printf("Skipping bad block 0x%08llx\n",
224 				       offset & ~(mtd->erasesize - 1));
225 				offset += mtd->erasesize - block_offset;
226 				continue;
227 			}
228 		}
229 
230 		if (!(mapped_offset & mtd->erasesize_mask)) {
231 			memset(&ei, 0, sizeof(struct erase_info));
232 			ei.addr = mapped_offset;
233 			ei.len  = mtd->erasesize;
234 			rval = mtd_erase(mtd, &ei);
235 			if (rval) {
236 				pr_info("error %d while erasing %llx\n", rval,
237 					mapped_offset);
238 				return rval;
239 			}
240 		}
241 
242 		if (left_to_write < (blocksize - block_offset))
243 			write_size = left_to_write;
244 		else
245 			write_size = blocksize - block_offset;
246 
247 		truncated_write_size = write_size;
248 		rval = mtd_write(mtd, mapped_offset, truncated_write_size,
249 				 (size_t *)(&truncated_write_size), p_buffer);
250 
251 		offset += write_size;
252 		p_buffer += write_size;
253 
254 		if (rval != 0) {
255 			printf("NAND write to offset %llx failed %d\n",
256 			       offset, rval);
257 			*length -= left_to_write;
258 			return rval;
259 		}
260 
261 		left_to_write -= write_size;
262 	}
263 
264 	return 0;
265 }
266 
267 char *mtd_part_parse(void)
268 {
269 	char mtd_part_info_temp[MTD_SINGLE_PART_INFO_MAX_SIZE] = {0};
270 	u32 length, data_len = MTD_PART_INFO_MAX_SIZE;
271 	struct blk_desc *dev_desc;
272 	disk_partition_t info;
273 	char *mtd_part_info_p;
274 	struct mtd_info *mtd;
275 	char *mtd_part_info;
276 	int ret;
277 	int p;
278 
279 	dev_desc = rockchip_get_bootdev();
280 	if (!dev_desc)
281 		return NULL;
282 
283 	mtd = (struct mtd_info *)dev_desc->bdev->priv;
284 	if (!mtd)
285 		return NULL;
286 
287 	mtd_part_info = (char *)calloc(MTD_PART_INFO_MAX_SIZE, sizeof(char));
288 	if (!mtd_part_info) {
289 		printf("%s: Fail to malloc!", __func__);
290 		return NULL;
291 	}
292 
293 	mtd_part_info_p = mtd_part_info;
294 	snprintf(mtd_part_info_p, data_len - 1, "%s%s:",
295 		 MTD_PART_NAND_HEAD,
296 		 dev_desc->product);
297 	data_len -= strlen(mtd_part_info_p);
298 	mtd_part_info_p = mtd_part_info_p + strlen(mtd_part_info_p);
299 
300 	for (p = 1; p < MAX_SEARCH_PARTITIONS; p++) {
301 		ret = part_get_info(dev_desc, p, &info);
302 		if (ret)
303 			break;
304 
305 		debug("name is %s, start addr is %x\n", info.name,
306 		      (int)(size_t)info.start);
307 
308 		snprintf(mtd_part_info_p, data_len - 1, "0x%x@0x%x(%s)",
309 			 (int)(size_t)info.size << 9,
310 			 (int)(size_t)info.start << 9,
311 			 info.name);
312 		snprintf(mtd_part_info_temp, MTD_SINGLE_PART_INFO_MAX_SIZE - 1,
313 			 "0x%x@0x%x(%s)",
314 			 (int)(size_t)info.size << 9,
315 			 (int)(size_t)info.start << 9,
316 			 info.name);
317 		strcat(mtd_part_info, ",");
318 		if (part_get_info(dev_desc, p + 1, &info)) {
319 			/* Nand flash is erased by block and gpt table just
320 			 * resserve 33 sectors for the last partition. This
321 			 * will erase the backup gpt table by user program,
322 			 * so reserve one block.
323 			 */
324 			snprintf(mtd_part_info_p, data_len - 1, "0x%x@0x%x(%s)",
325 				 (int)(size_t)(info.size -
326 				 (info.size - 1) %
327 				 (mtd->erasesize >> 9) - 1) << 9,
328 				 (int)(size_t)info.start << 9,
329 				 info.name);
330 			break;
331 		}
332 		length = strlen(mtd_part_info_temp);
333 		data_len -= length;
334 		mtd_part_info_p = mtd_part_info_p + length + 1;
335 		memset(mtd_part_info_temp, 0, MTD_SINGLE_PART_INFO_MAX_SIZE);
336 	}
337 
338 	return mtd_part_info;
339 }
340 
341 ulong mtd_dread(struct udevice *udev, lbaint_t start,
342 		lbaint_t blkcnt, void *dst)
343 {
344 	struct blk_desc *desc = dev_get_uclass_platdata(udev);
345 #if defined(CONFIG_NAND) || defined(CONFIG_MTD_SPI_NAND) || defined(CONFIG_SPI_FLASH_MTD)
346 	loff_t off = (loff_t)(start * 512);
347 	size_t rwsize = blkcnt * 512;
348 #endif
349 	struct mtd_info *mtd;
350 	int ret = 0;
351 
352 	if (!desc)
353 		return ret;
354 
355 	mtd = desc->bdev->priv;
356 	if (!mtd)
357 		return 0;
358 
359 	if (blkcnt == 0)
360 		return 0;
361 
362 	pr_debug("mtd dread %s %lx %lx\n", mtd->name, start, blkcnt);
363 
364 	if (desc->devnum == BLK_MTD_NAND) {
365 #if defined(CONFIG_NAND) && !defined(CONFIG_SPL_BUILD)
366 		mtd = dev_get_priv(udev->parent);
367 		if (!mtd)
368 			return 0;
369 
370 		ret = nand_read_skip_bad(mtd, off, &rwsize,
371 					 NULL, mtd->size,
372 					 (u_char *)(dst));
373 #else
374 		ret = mtd_map_read(mtd, off, &rwsize,
375 				   NULL, mtd->size,
376 				   (u_char *)(dst));
377 #endif
378 		if (!ret)
379 			return blkcnt;
380 		else
381 			return 0;
382 	} else if (desc->devnum == BLK_MTD_SPI_NAND) {
383 		ret = mtd_map_read(mtd, off, &rwsize,
384 				   NULL, mtd->size,
385 				   (u_char *)(dst));
386 		if (!ret)
387 			return blkcnt;
388 		else
389 			return 0;
390 	} else if (desc->devnum == BLK_MTD_SPI_NOR) {
391 #if defined(CONFIG_SPI_FLASH_MTD) || defined(CONFIG_SPL_BUILD)
392 		size_t retlen_nor;
393 
394 		mtd_read(mtd, off, rwsize, &retlen_nor, dst);
395 		if (retlen_nor == rwsize)
396 			return blkcnt;
397 		else
398 #endif
399 			return 0;
400 	} else {
401 		return 0;
402 	}
403 }
404 
405 ulong mtd_dwrite(struct udevice *udev, lbaint_t start,
406 		 lbaint_t blkcnt, const void *src)
407 {
408 	struct blk_desc *desc = dev_get_uclass_platdata(udev);
409 #if defined(CONFIG_NAND) || defined(CONFIG_MTD_SPI_NAND) || defined(CONFIG_SPI_FLASH_MTD)
410 	loff_t off = (loff_t)(start * 512);
411 	size_t rwsize = blkcnt * 512;
412 #endif
413 	struct mtd_info *mtd;
414 	int ret = 0;
415 
416 	if (!desc)
417 		return ret;
418 
419 	mtd = desc->bdev->priv;
420 	if (!mtd)
421 		return 0;
422 
423 	pr_debug("mtd dwrite %s %lx %lx\n", mtd->name, start, blkcnt);
424 
425 	if (blkcnt == 0)
426 		return 0;
427 
428 	if (desc->devnum == BLK_MTD_NAND ||
429 	    desc->devnum == BLK_MTD_SPI_NAND ||
430 	    desc->devnum == BLK_MTD_SPI_NOR) {
431 		ret = mtd_map_write(mtd, off, &rwsize,
432 				    NULL, mtd->size,
433 				    (u_char *)(src), 0);
434 		if (!ret)
435 			return blkcnt;
436 		else
437 			return 0;
438 	} else {
439 		return 0;
440 	}
441 
442 	return 0;
443 }
444 
445 ulong mtd_derase(struct udevice *udev, lbaint_t start,
446 		 lbaint_t blkcnt)
447 {
448 	/* Not implemented */
449 	return 0;
450 }
451 
452 static int mtd_blk_probe(struct udevice *udev)
453 {
454 	struct mtd_info *mtd = dev_get_uclass_priv(udev->parent);
455 	struct blk_desc *desc = dev_get_uclass_platdata(udev);
456 	int ret, i;
457 
458 	desc->bdev->priv = mtd;
459 	sprintf(desc->vendor, "0x%.4x", 0x2207);
460 	memcpy(desc->product, mtd->name, strlen(mtd->name));
461 	memcpy(desc->revision, "V1.00", sizeof("V1.00"));
462 	if (mtd->type == MTD_NANDFLASH) {
463 		if (desc->devnum == BLK_MTD_NAND)
464 			mtd = dev_get_priv(udev->parent);
465 		/*
466 		 * Find the first useful block in the end,
467 		 * and it is the end lba of the nand storage.
468 		 */
469 		for (i = 0; i < (mtd->size / mtd->erasesize); i++) {
470 			ret =  mtd_block_isbad(mtd,
471 					       mtd->size - mtd->erasesize * (i + 1));
472 			if (!ret) {
473 				desc->lba = (mtd->size >> 9) -
474 					(mtd->erasesize >> 9) * i;
475 				break;
476 			}
477 		}
478 	} else {
479 		desc->lba = mtd->size >> 9;
480 	}
481 
482 	debug("MTD: desc->lba is %lx\n", desc->lba);
483 
484 	return 0;
485 }
486 
487 static const struct blk_ops mtd_blk_ops = {
488 	.read	= mtd_dread,
489 #ifndef CONFIG_SPL_BUILD
490 	.write	= mtd_dwrite,
491 	.erase	= mtd_derase,
492 #endif
493 };
494 
495 U_BOOT_DRIVER(mtd_blk) = {
496 	.name		= "mtd_blk",
497 	.id		= UCLASS_BLK,
498 	.ops		= &mtd_blk_ops,
499 	.probe		= mtd_blk_probe,
500 };
501