xref: /rk3399_rockchip-uboot/drivers/mtd/mtd_blk.c (revision 2e93c98a37ccc8f66d17cc6fd4ca12a68bb4ac4f)
1 /*
2  * (C) Copyright 2019 Rockchip Electronics Co., Ltd
3  *
4  * SPDX-License-Identifier:	GPL-2.0+
5  */
6 
7 #include <common.h>
8 #include <blk.h>
9 #include <boot_rkimg.h>
10 #include <dm.h>
11 #include <errno.h>
12 #include <malloc.h>
13 #include <nand.h>
14 #include <part.h>
15 #include <dm/device-internal.h>
16 
17 #define MTD_PART_NAND_HEAD		"mtdparts="
18 #define MTD_PART_INFO_MAX_SIZE		512
19 #define MTD_SINGLE_PART_INFO_MAX_SIZE	40
20 
21 #define MTD_BLK_TABLE_BLOCK_UNKNOWN	(-2)
22 #define MTD_BLK_TABLE_BLOCK_SHIFT	(-1)
23 
24 static int *mtd_map_blk_table;
25 
26 int mtd_blk_map_table_init(struct blk_desc *desc,
27 			   loff_t offset,
28 			   size_t length)
29 {
30 	u32 blk_total, blk_begin, blk_cnt;
31 	struct mtd_info *mtd = NULL;
32 	int i, j;
33 
34 	if (!desc)
35 		return -ENODEV;
36 
37 	if (desc->devnum == BLK_MTD_NAND) {
38 #if defined(CONFIG_NAND) && !defined(CONFIG_SPL_BUILD)
39 		mtd = dev_get_priv(desc->bdev->parent);
40 #endif
41 	} else if (desc->devnum == BLK_MTD_SPI_NAND) {
42 #if defined(CONFIG_MTD_SPI_NAND) && !defined(CONFIG_SPL_BUILD)
43 		mtd = desc->bdev->priv;
44 #endif
45 	}
46 
47 	if (!mtd) {
48 		return -ENODEV;
49 	} else {
50 		blk_total = (mtd->size + mtd->erasesize - 1) >> mtd->erasesize_shift;
51 		if (!mtd_map_blk_table) {
52 			mtd_map_blk_table = (int *)malloc(blk_total * 4);
53 			memset(mtd_map_blk_table, MTD_BLK_TABLE_BLOCK_UNKNOWN,
54 			       blk_total * 4);
55 		}
56 
57 		blk_begin = (u32)offset >> mtd->erasesize_shift;
58 		blk_cnt = ((u32)((offset & mtd->erasesize_mask) + length) >> mtd->erasesize_shift);
59 		if ((blk_begin + blk_cnt) > blk_total)
60 			blk_cnt = blk_total - blk_begin;
61 
62 		if (mtd_map_blk_table[blk_begin] != MTD_BLK_TABLE_BLOCK_UNKNOWN)
63 			return 0;
64 
65 		j = 0;
66 		 /* should not across blk_cnt */
67 		for (i = 0; i < blk_cnt; i++) {
68 			if (j >= blk_cnt)
69 				mtd_map_blk_table[blk_begin + i] = MTD_BLK_TABLE_BLOCK_SHIFT;
70 			for (; j < blk_cnt; j++) {
71 				if (!mtd_block_isbad(mtd, (blk_begin + j) << mtd->erasesize_shift)) {
72 					mtd_map_blk_table[blk_begin + i] = blk_begin + j;
73 					j++;
74 					if (j == blk_cnt)
75 						j++;
76 					break;
77 				}
78 			}
79 		}
80 
81 		return 0;
82 	}
83 }
84 
85 static bool get_mtd_blk_map_address(struct mtd_info *mtd, loff_t *off)
86 {
87 	bool mapped;
88 	loff_t offset = *off;
89 	size_t block_offset = offset & (mtd->erasesize - 1);
90 
91 	mapped = false;
92 	if (!mtd_map_blk_table ||
93 	    mtd_map_blk_table[(u64)offset >> mtd->erasesize_shift] ==
94 	    MTD_BLK_TABLE_BLOCK_UNKNOWN ||
95 	    mtd_map_blk_table[(u64)offset >> mtd->erasesize_shift] ==
96 	    0xffffffff)
97 		return mapped;
98 
99 	mapped = true;
100 	*off = (loff_t)(((u32)mtd_map_blk_table[(u64)offset >>
101 		mtd->erasesize_shift] << mtd->erasesize_shift) + block_offset);
102 
103 	return mapped;
104 }
105 
106 void mtd_blk_map_partitions(struct blk_desc *desc)
107 {
108 	disk_partition_t info;
109 	int i, ret;
110 
111 	if (!desc)
112 		return;
113 
114 	if (desc->if_type != IF_TYPE_MTD)
115 		return;
116 
117 	for (i = 1; i < MAX_SEARCH_PARTITIONS; i++) {
118 		ret = part_get_info(desc, i, &info);
119 		if (ret != 0)
120 			continue;
121 
122 		if (mtd_blk_map_table_init(desc,
123 					   info.start << 9,
124 					   info.size << 9)) {
125 			pr_debug("mtd block map table fail\n");
126 		}
127 	}
128 }
129 
130 static __maybe_unused int mtd_map_read(struct mtd_info *mtd, loff_t offset,
131 				       size_t *length, size_t *actual,
132 				       loff_t lim, u_char *buffer)
133 {
134 	size_t left_to_read = *length;
135 	u_char *p_buffer = buffer;
136 	int rval;
137 
138 	while (left_to_read > 0) {
139 		size_t block_offset = offset & (mtd->erasesize - 1);
140 		size_t read_length;
141 		loff_t mapped_offset;
142 
143 		if (offset >= mtd->size)
144 			return 0;
145 
146 		mapped_offset = offset;
147 		if (!get_mtd_blk_map_address(mtd, &mapped_offset)) {
148 			if (mtd_block_isbad(mtd, mapped_offset &
149 					    ~(mtd->erasesize - 1))) {
150 				printf("Skipping bad block 0x%08llx\n",
151 				       offset & ~(mtd->erasesize - 1));
152 				offset += mtd->erasesize - block_offset;
153 				continue;
154 			}
155 		}
156 
157 		if (left_to_read < (mtd->erasesize - block_offset))
158 			read_length = left_to_read;
159 		else
160 			read_length = mtd->erasesize - block_offset;
161 
162 		rval = mtd_read(mtd, mapped_offset, read_length, &read_length,
163 				p_buffer);
164 		if (rval && rval != -EUCLEAN) {
165 			printf("NAND read from offset %llx failed %d\n",
166 			       offset, rval);
167 			*length -= left_to_read;
168 			return rval;
169 		}
170 
171 		left_to_read -= read_length;
172 		offset       += read_length;
173 		p_buffer     += read_length;
174 	}
175 
176 	return 0;
177 }
178 
179 static __maybe_unused int mtd_map_write(struct mtd_info *mtd, loff_t offset,
180 					size_t *length, size_t *actual,
181 					loff_t lim, u_char *buffer, int flags)
182 {
183 	int rval = 0, blocksize;
184 	size_t left_to_write = *length;
185 	u_char *p_buffer = buffer;
186 	struct erase_info ei;
187 
188 	blocksize = mtd->erasesize;
189 
190 	/*
191 	 * nand_write() handles unaligned, partial page writes.
192 	 *
193 	 * We allow length to be unaligned, for convenience in
194 	 * using the $filesize variable.
195 	 *
196 	 * However, starting at an unaligned offset makes the
197 	 * semantics of bad block skipping ambiguous (really,
198 	 * you should only start a block skipping access at a
199 	 * partition boundary).  So don't try to handle that.
200 	 */
201 	if ((offset & (mtd->writesize - 1)) != 0) {
202 		printf("Attempt to write non page-aligned data\n");
203 		*length = 0;
204 		return -EINVAL;
205 	}
206 
207 	while (left_to_write > 0) {
208 		size_t block_offset = offset & (mtd->erasesize - 1);
209 		size_t write_size, truncated_write_size;
210 		loff_t mapped_offset;
211 
212 		if (offset >= mtd->size)
213 			return 0;
214 
215 		mapped_offset = offset;
216 		if (!get_mtd_blk_map_address(mtd, &mapped_offset)) {
217 			if (mtd_block_isbad(mtd, mapped_offset &
218 					    ~(mtd->erasesize - 1))) {
219 				printf("Skipping bad block 0x%08llx\n",
220 				       offset & ~(mtd->erasesize - 1));
221 				offset += mtd->erasesize - block_offset;
222 				continue;
223 			}
224 		}
225 
226 		if (!(mapped_offset & mtd->erasesize_mask)) {
227 			memset(&ei, 0, sizeof(struct erase_info));
228 			ei.addr = mapped_offset;
229 			ei.len  = mtd->erasesize;
230 			rval = mtd_erase(mtd, &ei);
231 			if (rval) {
232 				pr_info("error %d while erasing %llx\n", rval,
233 					mapped_offset);
234 				return rval;
235 			}
236 		}
237 
238 		if (left_to_write < (blocksize - block_offset))
239 			write_size = left_to_write;
240 		else
241 			write_size = blocksize - block_offset;
242 
243 		truncated_write_size = write_size;
244 		rval = mtd_write(mtd, mapped_offset, truncated_write_size,
245 				 (size_t *)(&truncated_write_size), p_buffer);
246 
247 		offset += write_size;
248 		p_buffer += write_size;
249 
250 		if (rval != 0) {
251 			printf("NAND write to offset %llx failed %d\n",
252 			       offset, rval);
253 			*length -= left_to_write;
254 			return rval;
255 		}
256 
257 		left_to_write -= write_size;
258 	}
259 
260 	return 0;
261 }
262 
263 char *mtd_part_parse(void)
264 {
265 	char mtd_part_info_temp[MTD_SINGLE_PART_INFO_MAX_SIZE] = {0};
266 	u32 length, data_len = MTD_PART_INFO_MAX_SIZE;
267 	struct blk_desc *dev_desc;
268 	disk_partition_t info;
269 	char *mtd_part_info_p;
270 	struct mtd_info *mtd;
271 	char *mtd_part_info;
272 	int ret;
273 	int p;
274 
275 	dev_desc = rockchip_get_bootdev();
276 	if (!dev_desc)
277 		return NULL;
278 
279 	mtd = (struct mtd_info *)dev_desc->bdev->priv;
280 	if (!mtd)
281 		return NULL;
282 
283 	mtd_part_info = (char *)calloc(MTD_PART_INFO_MAX_SIZE, sizeof(char));
284 	if (!mtd_part_info) {
285 		printf("%s: Fail to malloc!", __func__);
286 		return NULL;
287 	}
288 
289 	mtd_part_info_p = mtd_part_info;
290 	snprintf(mtd_part_info_p, data_len - 1, "%s%s:",
291 		 MTD_PART_NAND_HEAD,
292 		 dev_desc->product);
293 	data_len -= strlen(mtd_part_info_p);
294 	mtd_part_info_p = mtd_part_info_p + strlen(mtd_part_info_p);
295 
296 	for (p = 1; p < MAX_SEARCH_PARTITIONS; p++) {
297 		ret = part_get_info(dev_desc, p, &info);
298 		if (ret)
299 			break;
300 
301 		debug("name is %s, start addr is %x\n", info.name,
302 		      (int)(size_t)info.start);
303 
304 		snprintf(mtd_part_info_p, data_len - 1, "0x%x@0x%x(%s)",
305 			 (int)(size_t)info.size << 9,
306 			 (int)(size_t)info.start << 9,
307 			 info.name);
308 		snprintf(mtd_part_info_temp, MTD_SINGLE_PART_INFO_MAX_SIZE - 1,
309 			 "0x%x@0x%x(%s)",
310 			 (int)(size_t)info.size << 9,
311 			 (int)(size_t)info.start << 9,
312 			 info.name);
313 		strcat(mtd_part_info, ",");
314 		if (part_get_info(dev_desc, p + 1, &info)) {
315 			/* Nand flash is erased by block and gpt table just
316 			 * resserve 33 sectors for the last partition. This
317 			 * will erase the backup gpt table by user program,
318 			 * so reserve one block.
319 			 */
320 			snprintf(mtd_part_info_p, data_len - 1, "0x%x@0x%x(%s)",
321 				 (int)(size_t)(info.size -
322 				 (info.size - 1) %
323 				 (mtd->erasesize >> 9) - 1) << 9,
324 				 (int)(size_t)info.start << 9,
325 				 info.name);
326 			break;
327 		}
328 		length = strlen(mtd_part_info_temp);
329 		data_len -= length;
330 		mtd_part_info_p = mtd_part_info_p + length + 1;
331 		memset(mtd_part_info_temp, 0, MTD_SINGLE_PART_INFO_MAX_SIZE);
332 	}
333 
334 	return mtd_part_info;
335 }
336 
337 ulong mtd_dread(struct udevice *udev, lbaint_t start,
338 		lbaint_t blkcnt, void *dst)
339 {
340 	struct blk_desc *desc = dev_get_uclass_platdata(udev);
341 #if defined(CONFIG_NAND) || defined(CONFIG_MTD_SPI_NAND) || defined(CONFIG_SPI_FLASH_MTD)
342 	loff_t off = (loff_t)(start * 512);
343 	size_t rwsize = blkcnt * 512;
344 #endif
345 	struct mtd_info *mtd;
346 	int ret = 0;
347 
348 	if (!desc)
349 		return ret;
350 
351 	mtd = desc->bdev->priv;
352 	if (!mtd)
353 		return 0;
354 
355 	if (blkcnt == 0)
356 		return 0;
357 
358 	pr_debug("mtd dread %s %lx %lx\n", mtd->name, start, blkcnt);
359 
360 	if (desc->devnum == BLK_MTD_NAND) {
361 #if defined(CONFIG_NAND) && !defined(CONFIG_SPL_BUILD)
362 		mtd = dev_get_priv(udev->parent);
363 		if (!mtd)
364 			return 0;
365 
366 		ret = nand_read_skip_bad(mtd, off, &rwsize,
367 					 NULL, mtd->size,
368 					 (u_char *)(dst));
369 #else
370 		ret = mtd_map_read(mtd, off, &rwsize,
371 				   NULL, mtd->size,
372 				   (u_char *)(dst));
373 #endif
374 		if (!ret)
375 			return blkcnt;
376 		else
377 			return 0;
378 	} else if (desc->devnum == BLK_MTD_SPI_NAND) {
379 		ret = mtd_map_read(mtd, off, &rwsize,
380 				   NULL, mtd->size,
381 				   (u_char *)(dst));
382 		if (!ret)
383 			return blkcnt;
384 		else
385 			return 0;
386 	} else if (desc->devnum == BLK_MTD_SPI_NOR) {
387 #if defined(CONFIG_SPI_FLASH_MTD) || defined(CONFIG_SPL_BUILD)
388 		size_t retlen_nor;
389 
390 		mtd_read(mtd, off, rwsize, &retlen_nor, dst);
391 		if (retlen_nor == rwsize)
392 			return blkcnt;
393 		else
394 #endif
395 			return 0;
396 	} else {
397 		return 0;
398 	}
399 }
400 
401 ulong mtd_dwrite(struct udevice *udev, lbaint_t start,
402 		 lbaint_t blkcnt, const void *src)
403 {
404 	struct blk_desc *desc = dev_get_uclass_platdata(udev);
405 #if defined(CONFIG_NAND) || defined(CONFIG_MTD_SPI_NAND) || defined(CONFIG_SPI_FLASH_MTD)
406 	loff_t off = (loff_t)(start * 512);
407 	size_t rwsize = blkcnt * 512;
408 #endif
409 	struct mtd_info *mtd;
410 	int ret = 0;
411 
412 	if (!desc)
413 		return ret;
414 
415 	mtd = desc->bdev->priv;
416 	if (!mtd)
417 		return 0;
418 
419 	pr_debug("mtd dwrite %s %lx %lx\n", mtd->name, start, blkcnt);
420 
421 	if (blkcnt == 0)
422 		return 0;
423 
424 	if (desc->devnum == BLK_MTD_NAND ||
425 	    desc->devnum == BLK_MTD_SPI_NAND ||
426 	    desc->devnum == BLK_MTD_SPI_NOR) {
427 		ret = mtd_map_write(mtd, off, &rwsize,
428 				    NULL, mtd->size,
429 				    (u_char *)(src), 0);
430 		if (!ret)
431 			return blkcnt;
432 		else
433 			return 0;
434 	} else {
435 		return 0;
436 	}
437 
438 	return 0;
439 }
440 
441 ulong mtd_derase(struct udevice *udev, lbaint_t start,
442 		 lbaint_t blkcnt)
443 {
444 	/* Not implemented */
445 	return 0;
446 }
447 
448 static int mtd_blk_probe(struct udevice *udev)
449 {
450 	struct mtd_info *mtd = dev_get_uclass_priv(udev->parent);
451 	struct blk_desc *desc = dev_get_uclass_platdata(udev);
452 	int ret, i;
453 
454 	desc->bdev->priv = mtd;
455 	sprintf(desc->vendor, "0x%.4x", 0x2207);
456 	memcpy(desc->product, mtd->name, strlen(mtd->name));
457 	memcpy(desc->revision, "V1.00", sizeof("V1.00"));
458 	if (mtd->type == MTD_NANDFLASH) {
459 		if (desc->devnum == BLK_MTD_NAND)
460 			mtd = dev_get_priv(udev->parent);
461 		/*
462 		 * Find the first useful block in the end,
463 		 * and it is the end lba of the nand storage.
464 		 */
465 		for (i = 0; i < (mtd->size / mtd->erasesize); i++) {
466 			ret =  mtd_block_isbad(mtd,
467 					       mtd->size - mtd->erasesize * (i + 1));
468 			if (!ret) {
469 				desc->lba = (mtd->size >> 9) -
470 					(mtd->erasesize >> 9) * i;
471 				break;
472 			}
473 		}
474 	} else {
475 		desc->lba = mtd->size >> 9;
476 	}
477 
478 	debug("MTD: desc->lba is %lx\n", desc->lba);
479 
480 	return 0;
481 }
482 
483 static const struct blk_ops mtd_blk_ops = {
484 	.read	= mtd_dread,
485 #ifndef CONFIG_SPL_BUILD
486 	.write	= mtd_dwrite,
487 	.erase	= mtd_derase,
488 #endif
489 };
490 
491 U_BOOT_DRIVER(mtd_blk) = {
492 	.name		= "mtd_blk",
493 	.id		= UCLASS_BLK,
494 	.ops		= &mtd_blk_ops,
495 	.probe		= mtd_blk_probe,
496 };
497