xref: /OK3568_Linux_fs/u-boot/drivers/mtd/mtd_blk.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * (C) Copyright 2019 Rockchip Electronics Co., Ltd
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * SPDX-License-Identifier:	GPL-2.0+
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include <common.h>
8*4882a593Smuzhiyun #include <blk.h>
9*4882a593Smuzhiyun #include <boot_rkimg.h>
10*4882a593Smuzhiyun #include <dm.h>
11*4882a593Smuzhiyun #include <errno.h>
12*4882a593Smuzhiyun #include <image.h>
13*4882a593Smuzhiyun #include <linux/log2.h>
14*4882a593Smuzhiyun #include <malloc.h>
15*4882a593Smuzhiyun #include <nand.h>
16*4882a593Smuzhiyun #include <part.h>
17*4882a593Smuzhiyun #include <spi.h>
18*4882a593Smuzhiyun #include <dm/device-internal.h>
19*4882a593Smuzhiyun #include <linux/mtd/spi-nor.h>
20*4882a593Smuzhiyun #ifdef CONFIG_NAND
21*4882a593Smuzhiyun #include <linux/mtd/nand.h>
22*4882a593Smuzhiyun #endif
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #define MTD_PART_NAND_HEAD		"mtdparts="
25*4882a593Smuzhiyun #define MTD_PART_INFO_MAX_SIZE		512
26*4882a593Smuzhiyun #define MTD_SINGLE_PART_INFO_MAX_SIZE	40
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun #define MTD_BLK_TABLE_BLOCK_UNKNOWN	(-2)
29*4882a593Smuzhiyun #define MTD_BLK_TABLE_BLOCK_SHIFT	(-1)
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun static int *mtd_map_blk_table;
32*4882a593Smuzhiyun 
mtd_blk_map_table_init(struct blk_desc * desc,loff_t offset,size_t length)33*4882a593Smuzhiyun int mtd_blk_map_table_init(struct blk_desc *desc,
34*4882a593Smuzhiyun 			   loff_t offset,
35*4882a593Smuzhiyun 			   size_t length)
36*4882a593Smuzhiyun {
37*4882a593Smuzhiyun 	u32 blk_total, blk_begin, blk_cnt;
38*4882a593Smuzhiyun 	struct mtd_info *mtd = NULL;
39*4882a593Smuzhiyun 	int i, j;
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun 	if (!desc)
42*4882a593Smuzhiyun 		return -ENODEV;
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun 	switch (desc->devnum) {
45*4882a593Smuzhiyun 	case BLK_MTD_NAND:
46*4882a593Smuzhiyun 	case BLK_MTD_SPI_NAND:
47*4882a593Smuzhiyun 		mtd = desc->bdev->priv;
48*4882a593Smuzhiyun 		break;
49*4882a593Smuzhiyun 	default:
50*4882a593Smuzhiyun 		break;
51*4882a593Smuzhiyun 	}
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun 	if (!mtd) {
54*4882a593Smuzhiyun 		return -ENODEV;
55*4882a593Smuzhiyun 	} else {
56*4882a593Smuzhiyun 		blk_total = (mtd->size + mtd->erasesize - 1) >> mtd->erasesize_shift;
57*4882a593Smuzhiyun 		if (!mtd_map_blk_table) {
58*4882a593Smuzhiyun 			mtd_map_blk_table = (int *)malloc(blk_total * sizeof(int));
59*4882a593Smuzhiyun 			if (!mtd_map_blk_table)
60*4882a593Smuzhiyun 				return -ENOMEM;
61*4882a593Smuzhiyun 			for (i = 0; i < blk_total; i++)
62*4882a593Smuzhiyun 				mtd_map_blk_table[i] = MTD_BLK_TABLE_BLOCK_UNKNOWN;
63*4882a593Smuzhiyun 		}
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 		blk_begin = (u32)offset >> mtd->erasesize_shift;
66*4882a593Smuzhiyun 		blk_cnt = ((u32)((offset & mtd->erasesize_mask) + length + \
67*4882a593Smuzhiyun 			mtd->erasesize - 1) >> mtd->erasesize_shift);
68*4882a593Smuzhiyun 		if (blk_begin >= blk_total) {
69*4882a593Smuzhiyun 			pr_err("map table blk begin[%d] overflow\n", blk_begin);
70*4882a593Smuzhiyun 			return -EINVAL;
71*4882a593Smuzhiyun 		}
72*4882a593Smuzhiyun 		if ((blk_begin + blk_cnt) > blk_total)
73*4882a593Smuzhiyun 			blk_cnt = blk_total - blk_begin;
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 		if (mtd_map_blk_table[blk_begin] != MTD_BLK_TABLE_BLOCK_UNKNOWN)
76*4882a593Smuzhiyun 			return 0;
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun 		j = 0;
79*4882a593Smuzhiyun 		 /* should not across blk_cnt */
80*4882a593Smuzhiyun 		for (i = 0; i < blk_cnt; i++) {
81*4882a593Smuzhiyun 			if (j >= blk_cnt)
82*4882a593Smuzhiyun 				mtd_map_blk_table[blk_begin + i] = MTD_BLK_TABLE_BLOCK_SHIFT;
83*4882a593Smuzhiyun 			for (; j < blk_cnt; j++) {
84*4882a593Smuzhiyun 				if (!mtd_block_isbad(mtd, (blk_begin + j) << mtd->erasesize_shift)) {
85*4882a593Smuzhiyun 					mtd_map_blk_table[blk_begin + i] = blk_begin + j;
86*4882a593Smuzhiyun 					j++;
87*4882a593Smuzhiyun 					if (j == blk_cnt)
88*4882a593Smuzhiyun 						j++;
89*4882a593Smuzhiyun 					break;
90*4882a593Smuzhiyun 				}
91*4882a593Smuzhiyun 			}
92*4882a593Smuzhiyun 		}
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 		return 0;
95*4882a593Smuzhiyun 	}
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun 
get_mtd_blk_map_address(struct mtd_info * mtd,loff_t * off)98*4882a593Smuzhiyun static bool get_mtd_blk_map_address(struct mtd_info *mtd, loff_t *off)
99*4882a593Smuzhiyun {
100*4882a593Smuzhiyun 	bool mapped;
101*4882a593Smuzhiyun 	loff_t offset = *off;
102*4882a593Smuzhiyun 	size_t block_offset = offset & (mtd->erasesize - 1);
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 	mapped = false;
105*4882a593Smuzhiyun 	if (!mtd_map_blk_table ||
106*4882a593Smuzhiyun 	    mtd_map_blk_table[(u64)offset >> mtd->erasesize_shift] ==
107*4882a593Smuzhiyun 	    MTD_BLK_TABLE_BLOCK_UNKNOWN ||
108*4882a593Smuzhiyun 	    mtd_map_blk_table[(u64)offset >> mtd->erasesize_shift] ==
109*4882a593Smuzhiyun 	    0xffffffff)
110*4882a593Smuzhiyun 		return mapped;
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	mapped = true;
113*4882a593Smuzhiyun 	*off = (loff_t)(((u32)mtd_map_blk_table[(u64)offset >>
114*4882a593Smuzhiyun 		mtd->erasesize_shift] << mtd->erasesize_shift) + block_offset);
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 	return mapped;
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun 
mtd_blk_map_partitions(struct blk_desc * desc)119*4882a593Smuzhiyun void mtd_blk_map_partitions(struct blk_desc *desc)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun 	disk_partition_t info;
122*4882a593Smuzhiyun 	int i, ret;
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	if (!desc)
125*4882a593Smuzhiyun 		return;
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	if (desc->if_type != IF_TYPE_MTD)
128*4882a593Smuzhiyun 		return;
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	for (i = 1; i < MAX_SEARCH_PARTITIONS; i++) {
131*4882a593Smuzhiyun 		ret = part_get_info(desc, i, &info);
132*4882a593Smuzhiyun 		if (ret != 0)
133*4882a593Smuzhiyun 			break;
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 		if (mtd_blk_map_table_init(desc,
136*4882a593Smuzhiyun 					   info.start << 9,
137*4882a593Smuzhiyun 					   info.size << 9)) {
138*4882a593Smuzhiyun 			pr_debug("mtd block map table fail\n");
139*4882a593Smuzhiyun 		}
140*4882a593Smuzhiyun 	}
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun 
mtd_blk_map_fit(struct blk_desc * desc,ulong sector,void * fit)143*4882a593Smuzhiyun void mtd_blk_map_fit(struct blk_desc *desc, ulong sector, void *fit)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun 	struct mtd_info *mtd = NULL;
146*4882a593Smuzhiyun 	int totalsize = 0;
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	if (desc->if_type != IF_TYPE_MTD)
149*4882a593Smuzhiyun 		return;
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	if (desc->devnum == BLK_MTD_NAND) {
152*4882a593Smuzhiyun #if defined(CONFIG_NAND)
153*4882a593Smuzhiyun 		mtd = dev_get_priv(desc->bdev->parent);
154*4882a593Smuzhiyun #endif
155*4882a593Smuzhiyun 	} else if (desc->devnum == BLK_MTD_SPI_NAND) {
156*4882a593Smuzhiyun #if defined(CONFIG_MTD_SPI_NAND)
157*4882a593Smuzhiyun 		mtd = desc->bdev->priv;
158*4882a593Smuzhiyun #endif
159*4882a593Smuzhiyun 	}
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun #ifdef CONFIG_SPL_FIT
162*4882a593Smuzhiyun 	if (fit_get_totalsize(fit, &totalsize))
163*4882a593Smuzhiyun 		debug("Can not find /totalsize node.\n");
164*4882a593Smuzhiyun #endif
165*4882a593Smuzhiyun 	if (mtd && totalsize) {
166*4882a593Smuzhiyun 		if (mtd_blk_map_table_init(desc, sector << 9, totalsize + (size_t)mtd->erasesize))
167*4882a593Smuzhiyun 			debug("Map block table fail.\n");
168*4882a593Smuzhiyun 	}
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun 
mtd_map_read(struct mtd_info * mtd,loff_t offset,size_t * length,size_t * actual,loff_t lim,u_char * buffer)171*4882a593Smuzhiyun static __maybe_unused int mtd_map_read(struct mtd_info *mtd, loff_t offset,
172*4882a593Smuzhiyun 				       size_t *length, size_t *actual,
173*4882a593Smuzhiyun 				       loff_t lim, u_char *buffer)
174*4882a593Smuzhiyun {
175*4882a593Smuzhiyun 	size_t left_to_read = *length;
176*4882a593Smuzhiyun 	u_char *p_buffer = buffer;
177*4882a593Smuzhiyun 	int rval;
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	while (left_to_read > 0) {
180*4882a593Smuzhiyun 		size_t block_offset = offset & (mtd->erasesize - 1);
181*4882a593Smuzhiyun 		size_t read_length;
182*4882a593Smuzhiyun 		loff_t mapped_offset;
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 		if (offset >= mtd->size)
185*4882a593Smuzhiyun 			return 0;
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 		mapped_offset = offset;
188*4882a593Smuzhiyun 		if (!get_mtd_blk_map_address(mtd, &mapped_offset)) {
189*4882a593Smuzhiyun 			if (mtd_block_isbad(mtd, mapped_offset &
190*4882a593Smuzhiyun 					    ~(mtd->erasesize - 1))) {
191*4882a593Smuzhiyun 				printf("Skipping bad block 0x%08llx\n",
192*4882a593Smuzhiyun 				       offset & ~(mtd->erasesize - 1));
193*4882a593Smuzhiyun 				offset += mtd->erasesize - block_offset;
194*4882a593Smuzhiyun 				continue;
195*4882a593Smuzhiyun 			}
196*4882a593Smuzhiyun 		}
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 		if (left_to_read < (mtd->erasesize - block_offset))
199*4882a593Smuzhiyun 			read_length = left_to_read;
200*4882a593Smuzhiyun 		else
201*4882a593Smuzhiyun 			read_length = mtd->erasesize - block_offset;
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 		rval = mtd_read(mtd, mapped_offset, read_length, &read_length,
204*4882a593Smuzhiyun 				p_buffer);
205*4882a593Smuzhiyun 		if (rval && rval != -EUCLEAN) {
206*4882a593Smuzhiyun 			printf("NAND read from offset %llx failed %d\n",
207*4882a593Smuzhiyun 			       offset, rval);
208*4882a593Smuzhiyun 			*length -= left_to_read;
209*4882a593Smuzhiyun 			return rval;
210*4882a593Smuzhiyun 		}
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 		left_to_read -= read_length;
213*4882a593Smuzhiyun 		offset       += read_length;
214*4882a593Smuzhiyun 		p_buffer     += read_length;
215*4882a593Smuzhiyun 	}
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 	return 0;
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun 
mtd_map_write(struct mtd_info * mtd,loff_t offset,size_t * length,size_t * actual,loff_t lim,u_char * buffer,int flags)220*4882a593Smuzhiyun static __maybe_unused int mtd_map_write(struct mtd_info *mtd, loff_t offset,
221*4882a593Smuzhiyun 					size_t *length, size_t *actual,
222*4882a593Smuzhiyun 					loff_t lim, u_char *buffer, int flags)
223*4882a593Smuzhiyun {
224*4882a593Smuzhiyun 	int rval = 0, blocksize;
225*4882a593Smuzhiyun 	size_t left_to_write = *length;
226*4882a593Smuzhiyun 	u_char *p_buffer = buffer;
227*4882a593Smuzhiyun 	struct erase_info ei;
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	blocksize = mtd->erasesize;
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	/*
232*4882a593Smuzhiyun 	 * nand_write() handles unaligned, partial page writes.
233*4882a593Smuzhiyun 	 *
234*4882a593Smuzhiyun 	 * We allow length to be unaligned, for convenience in
235*4882a593Smuzhiyun 	 * using the $filesize variable.
236*4882a593Smuzhiyun 	 *
237*4882a593Smuzhiyun 	 * However, starting at an unaligned offset makes the
238*4882a593Smuzhiyun 	 * semantics of bad block skipping ambiguous (really,
239*4882a593Smuzhiyun 	 * you should only start a block skipping access at a
240*4882a593Smuzhiyun 	 * partition boundary).  So don't try to handle that.
241*4882a593Smuzhiyun 	 */
242*4882a593Smuzhiyun 	if ((offset & (mtd->writesize - 1)) != 0) {
243*4882a593Smuzhiyun 		printf("Attempt to write non page-aligned data\n");
244*4882a593Smuzhiyun 		*length = 0;
245*4882a593Smuzhiyun 		return -EINVAL;
246*4882a593Smuzhiyun 	}
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	while (left_to_write > 0) {
249*4882a593Smuzhiyun 		size_t block_offset = offset & (mtd->erasesize - 1);
250*4882a593Smuzhiyun 		size_t write_size, truncated_write_size;
251*4882a593Smuzhiyun 		loff_t mapped_offset;
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 		if (offset >= mtd->size)
254*4882a593Smuzhiyun 			return 0;
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 		mapped_offset = offset;
257*4882a593Smuzhiyun 		if (!get_mtd_blk_map_address(mtd, &mapped_offset)) {
258*4882a593Smuzhiyun 			if (mtd_block_isbad(mtd, mapped_offset &
259*4882a593Smuzhiyun 					    ~(mtd->erasesize - 1))) {
260*4882a593Smuzhiyun 				printf("Skipping bad block 0x%08llx\n",
261*4882a593Smuzhiyun 				       offset & ~(mtd->erasesize - 1));
262*4882a593Smuzhiyun 				offset += mtd->erasesize - block_offset;
263*4882a593Smuzhiyun 				continue;
264*4882a593Smuzhiyun 			}
265*4882a593Smuzhiyun 		}
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun 		if (!(mapped_offset & mtd->erasesize_mask)) {
268*4882a593Smuzhiyun 			memset(&ei, 0, sizeof(struct erase_info));
269*4882a593Smuzhiyun 			ei.addr = mapped_offset;
270*4882a593Smuzhiyun 			ei.len  = mtd->erasesize;
271*4882a593Smuzhiyun 			rval = mtd_erase(mtd, &ei);
272*4882a593Smuzhiyun 			if (rval) {
273*4882a593Smuzhiyun 				pr_info("error %d while erasing %llx\n", rval,
274*4882a593Smuzhiyun 					mapped_offset);
275*4882a593Smuzhiyun 				return rval;
276*4882a593Smuzhiyun 			}
277*4882a593Smuzhiyun 		}
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 		if (left_to_write < (blocksize - block_offset))
280*4882a593Smuzhiyun 			write_size = left_to_write;
281*4882a593Smuzhiyun 		else
282*4882a593Smuzhiyun 			write_size = blocksize - block_offset;
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 		truncated_write_size = write_size;
285*4882a593Smuzhiyun 		rval = mtd_write(mtd, mapped_offset, truncated_write_size,
286*4882a593Smuzhiyun 				 (size_t *)(&truncated_write_size), p_buffer);
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 		offset += write_size;
289*4882a593Smuzhiyun 		p_buffer += write_size;
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 		if (rval != 0) {
292*4882a593Smuzhiyun 			printf("NAND write to offset %llx failed %d\n",
293*4882a593Smuzhiyun 			       offset, rval);
294*4882a593Smuzhiyun 			*length -= left_to_write;
295*4882a593Smuzhiyun 			return rval;
296*4882a593Smuzhiyun 		}
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 		left_to_write -= write_size;
299*4882a593Smuzhiyun 	}
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 	return 0;
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun 
mtd_map_erase(struct mtd_info * mtd,loff_t offset,size_t length)304*4882a593Smuzhiyun static __maybe_unused int mtd_map_erase(struct mtd_info *mtd, loff_t offset,
305*4882a593Smuzhiyun 					size_t length)
306*4882a593Smuzhiyun {
307*4882a593Smuzhiyun 	struct erase_info ei;
308*4882a593Smuzhiyun 	loff_t pos, len;
309*4882a593Smuzhiyun 	int ret;
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 	pos = offset;
312*4882a593Smuzhiyun 	len = length;
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 	if ((pos & mtd->erasesize_mask) || (len & mtd->erasesize_mask)) {
315*4882a593Smuzhiyun 		pr_err("Attempt to erase non block-aligned data, pos= %llx, len= %llx\n",
316*4882a593Smuzhiyun 		       pos, len);
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 		return -EINVAL;
319*4882a593Smuzhiyun 	}
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 	while (len) {
322*4882a593Smuzhiyun 		loff_t mapped_offset;
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 		mapped_offset = pos;
325*4882a593Smuzhiyun 		if (!get_mtd_blk_map_address(mtd, &mapped_offset)) {
326*4882a593Smuzhiyun 			if (mtd_block_isbad(mtd, pos) || mtd_block_isreserved(mtd, pos)) {
327*4882a593Smuzhiyun 				pr_debug("attempt to erase a bad/reserved block @%llx\n",
328*4882a593Smuzhiyun 					 pos);
329*4882a593Smuzhiyun 				pos += mtd->erasesize;
330*4882a593Smuzhiyun 				continue;
331*4882a593Smuzhiyun 			}
332*4882a593Smuzhiyun 		}
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun 		memset(&ei, 0, sizeof(struct erase_info));
335*4882a593Smuzhiyun 		ei.addr = mapped_offset;
336*4882a593Smuzhiyun 		ei.len  = mtd->erasesize;
337*4882a593Smuzhiyun 		ret = mtd_erase(mtd, &ei);
338*4882a593Smuzhiyun 		if (ret) {
339*4882a593Smuzhiyun 			pr_err("map_erase error %d while erasing %llx\n", ret,
340*4882a593Smuzhiyun 			       pos);
341*4882a593Smuzhiyun 			return ret;
342*4882a593Smuzhiyun 		}
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 		pos += mtd->erasesize;
345*4882a593Smuzhiyun 		len -= mtd->erasesize;
346*4882a593Smuzhiyun 	}
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 	return 0;
349*4882a593Smuzhiyun }
350*4882a593Smuzhiyun 
mtd_part_parse(struct blk_desc * dev_desc)351*4882a593Smuzhiyun char *mtd_part_parse(struct blk_desc *dev_desc)
352*4882a593Smuzhiyun {
353*4882a593Smuzhiyun 	char mtd_part_info_temp[MTD_SINGLE_PART_INFO_MAX_SIZE] = {0};
354*4882a593Smuzhiyun 	u32 length, data_len = MTD_PART_INFO_MAX_SIZE;
355*4882a593Smuzhiyun 	disk_partition_t info;
356*4882a593Smuzhiyun 	char *mtd_part_info_p;
357*4882a593Smuzhiyun 	struct mtd_info *mtd;
358*4882a593Smuzhiyun 	char *mtd_part_info;
359*4882a593Smuzhiyun 	int ret;
360*4882a593Smuzhiyun 	int p;
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun #ifndef CONFIG_SPL_BUILD
363*4882a593Smuzhiyun 	dev_desc = rockchip_get_bootdev();
364*4882a593Smuzhiyun #endif
365*4882a593Smuzhiyun 	if (!dev_desc)
366*4882a593Smuzhiyun 		return NULL;
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 	mtd = (struct mtd_info *)dev_desc->bdev->priv;
369*4882a593Smuzhiyun 	if (!mtd)
370*4882a593Smuzhiyun 		return NULL;
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 	mtd_part_info = (char *)calloc(MTD_PART_INFO_MAX_SIZE, sizeof(char));
373*4882a593Smuzhiyun 	if (!mtd_part_info) {
374*4882a593Smuzhiyun 		printf("%s: Fail to malloc!", __func__);
375*4882a593Smuzhiyun 		return NULL;
376*4882a593Smuzhiyun 	}
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 	mtd_part_info_p = mtd_part_info;
379*4882a593Smuzhiyun 	snprintf(mtd_part_info_p, data_len - 1, "%s%s:",
380*4882a593Smuzhiyun 		 MTD_PART_NAND_HEAD,
381*4882a593Smuzhiyun 		 dev_desc->product);
382*4882a593Smuzhiyun 	data_len -= strlen(mtd_part_info_p);
383*4882a593Smuzhiyun 	mtd_part_info_p = mtd_part_info_p + strlen(mtd_part_info_p);
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 	for (p = 1; p < MAX_SEARCH_PARTITIONS; p++) {
386*4882a593Smuzhiyun 		ret = part_get_info(dev_desc, p, &info);
387*4882a593Smuzhiyun 		if (ret)
388*4882a593Smuzhiyun 			break;
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun 		debug("name is %s, start addr is %x\n", info.name,
391*4882a593Smuzhiyun 		      (int)(size_t)info.start);
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun 		snprintf(mtd_part_info_p, data_len - 1, "0x%x@0x%x(%s)",
394*4882a593Smuzhiyun 			 (int)(size_t)info.size << 9,
395*4882a593Smuzhiyun 			 (int)(size_t)info.start << 9,
396*4882a593Smuzhiyun 			 info.name);
397*4882a593Smuzhiyun 		snprintf(mtd_part_info_temp, MTD_SINGLE_PART_INFO_MAX_SIZE - 1,
398*4882a593Smuzhiyun 			 "0x%x@0x%x(%s)",
399*4882a593Smuzhiyun 			 (int)(size_t)info.size << 9,
400*4882a593Smuzhiyun 			 (int)(size_t)info.start << 9,
401*4882a593Smuzhiyun 			 info.name);
402*4882a593Smuzhiyun 		strcat(mtd_part_info, ",");
403*4882a593Smuzhiyun 		if (part_get_info(dev_desc, p + 1, &info)) {
404*4882a593Smuzhiyun 			/* Partition with grow tag in parameter will be resized */
405*4882a593Smuzhiyun 			if ((info.size + info.start + 64) >= dev_desc->lba) {
406*4882a593Smuzhiyun 				if (dev_desc->devnum == BLK_MTD_SPI_NOR) {
407*4882a593Smuzhiyun 					/* Nor is 64KB erase block(kernel) and gpt table just
408*4882a593Smuzhiyun 					 * resserve 33 sectors for the last partition. This
409*4882a593Smuzhiyun 					 * will erase the backup gpt table by user program,
410*4882a593Smuzhiyun 					 * so reserve one block.
411*4882a593Smuzhiyun 					 */
412*4882a593Smuzhiyun 					snprintf(mtd_part_info_p, data_len - 1, "0x%x@0x%x(%s)",
413*4882a593Smuzhiyun 						 (int)(size_t)(info.size -
414*4882a593Smuzhiyun 						 (info.size - 1) %
415*4882a593Smuzhiyun 						 (0x10000 >> 9) - 1) << 9,
416*4882a593Smuzhiyun 						 (int)(size_t)info.start << 9,
417*4882a593Smuzhiyun 						 info.name);
418*4882a593Smuzhiyun 					break;
419*4882a593Smuzhiyun 				} else {
420*4882a593Smuzhiyun 					/* Nand flash is erased by block and gpt table just
421*4882a593Smuzhiyun 					 * resserve 33 sectors for the last partition. This
422*4882a593Smuzhiyun 					 * will erase the backup gpt table by user program,
423*4882a593Smuzhiyun 					 * so reserve one block.
424*4882a593Smuzhiyun 					 */
425*4882a593Smuzhiyun 					snprintf(mtd_part_info_p, data_len - 1, "0x%x@0x%x(%s)",
426*4882a593Smuzhiyun 						 (int)(size_t)(info.size -
427*4882a593Smuzhiyun 						 (info.size - 1) %
428*4882a593Smuzhiyun 						 (mtd->erasesize >> 9) - 1) << 9,
429*4882a593Smuzhiyun 						 (int)(size_t)info.start << 9,
430*4882a593Smuzhiyun 						 info.name);
431*4882a593Smuzhiyun 					break;
432*4882a593Smuzhiyun 				}
433*4882a593Smuzhiyun 			} else {
434*4882a593Smuzhiyun 				snprintf(mtd_part_info_temp, MTD_SINGLE_PART_INFO_MAX_SIZE - 1,
435*4882a593Smuzhiyun 					 "0x%x@0x%x(%s)",
436*4882a593Smuzhiyun 					 (int)(size_t)info.size << 9,
437*4882a593Smuzhiyun 					 (int)(size_t)info.start << 9,
438*4882a593Smuzhiyun 					 info.name);
439*4882a593Smuzhiyun 				break;
440*4882a593Smuzhiyun 			}
441*4882a593Smuzhiyun 		}
442*4882a593Smuzhiyun 		length = strlen(mtd_part_info_temp);
443*4882a593Smuzhiyun 		data_len -= length;
444*4882a593Smuzhiyun 		mtd_part_info_p = mtd_part_info_p + length + 1;
445*4882a593Smuzhiyun 		memset(mtd_part_info_temp, 0, MTD_SINGLE_PART_INFO_MAX_SIZE);
446*4882a593Smuzhiyun 	}
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun 	return mtd_part_info;
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun 
mtd_dread(struct udevice * udev,lbaint_t start,lbaint_t blkcnt,void * dst)451*4882a593Smuzhiyun ulong mtd_dread(struct udevice *udev, lbaint_t start,
452*4882a593Smuzhiyun 		lbaint_t blkcnt, void *dst)
453*4882a593Smuzhiyun {
454*4882a593Smuzhiyun 	struct blk_desc *desc = dev_get_uclass_platdata(udev);
455*4882a593Smuzhiyun #if defined(CONFIG_NAND) || defined(CONFIG_MTD_SPI_NAND) || defined(CONFIG_SPI_FLASH_MTD)
456*4882a593Smuzhiyun 	loff_t off = (loff_t)(start * 512);
457*4882a593Smuzhiyun 	size_t rwsize = blkcnt * 512;
458*4882a593Smuzhiyun #endif
459*4882a593Smuzhiyun 	struct mtd_info *mtd;
460*4882a593Smuzhiyun 	int ret = 0;
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun 	if (!desc)
463*4882a593Smuzhiyun 		return ret;
464*4882a593Smuzhiyun 
465*4882a593Smuzhiyun 	mtd = desc->bdev->priv;
466*4882a593Smuzhiyun 	if (!mtd)
467*4882a593Smuzhiyun 		return 0;
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun 	if (blkcnt == 0)
470*4882a593Smuzhiyun 		return 0;
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun 	pr_debug("mtd dread %s %lx %lx\n", mtd->name, start, blkcnt);
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun 	if (desc->devnum == BLK_MTD_NAND) {
475*4882a593Smuzhiyun 		ret = mtd_map_read(mtd, off, &rwsize,
476*4882a593Smuzhiyun 				   NULL, mtd->size,
477*4882a593Smuzhiyun 				   (u_char *)(dst));
478*4882a593Smuzhiyun 		if (!ret)
479*4882a593Smuzhiyun 			return blkcnt;
480*4882a593Smuzhiyun 		else
481*4882a593Smuzhiyun 			return 0;
482*4882a593Smuzhiyun 	} else if (desc->devnum == BLK_MTD_SPI_NAND) {
483*4882a593Smuzhiyun 		ret = mtd_map_read(mtd, off, &rwsize,
484*4882a593Smuzhiyun 				   NULL, mtd->size,
485*4882a593Smuzhiyun 				   (u_char *)(dst));
486*4882a593Smuzhiyun 		if (!ret)
487*4882a593Smuzhiyun 			return blkcnt;
488*4882a593Smuzhiyun 		else
489*4882a593Smuzhiyun 			return 0;
490*4882a593Smuzhiyun 	} else if (desc->devnum == BLK_MTD_SPI_NOR) {
491*4882a593Smuzhiyun #if defined(CONFIG_SPI_FLASH_MTD) || defined(CONFIG_SPL_BUILD)
492*4882a593Smuzhiyun 		struct spi_nor *nor = (struct spi_nor *)mtd->priv;
493*4882a593Smuzhiyun 		struct spi_slave *spi = nor->spi;
494*4882a593Smuzhiyun 		size_t retlen_nor;
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun 		if (desc->op_flag == BLK_PRE_RW)
497*4882a593Smuzhiyun 			spi->mode |= SPI_DMA_PREPARE;
498*4882a593Smuzhiyun 		mtd_read(mtd, off, rwsize, &retlen_nor, dst);
499*4882a593Smuzhiyun 		if (desc->op_flag == BLK_PRE_RW)
500*4882a593Smuzhiyun 			spi->mode &= ~SPI_DMA_PREPARE;
501*4882a593Smuzhiyun 
502*4882a593Smuzhiyun 		if (retlen_nor == rwsize)
503*4882a593Smuzhiyun 			return blkcnt;
504*4882a593Smuzhiyun 		else
505*4882a593Smuzhiyun #endif
506*4882a593Smuzhiyun 			return 0;
507*4882a593Smuzhiyun 	} else {
508*4882a593Smuzhiyun 		return 0;
509*4882a593Smuzhiyun 	}
510*4882a593Smuzhiyun }
511*4882a593Smuzhiyun 
512*4882a593Smuzhiyun #if CONFIG_IS_ENABLED(MTD_WRITE)
mtd_dwrite(struct udevice * udev,lbaint_t start,lbaint_t blkcnt,const void * src)513*4882a593Smuzhiyun ulong mtd_dwrite(struct udevice *udev, lbaint_t start,
514*4882a593Smuzhiyun 		 lbaint_t blkcnt, const void *src)
515*4882a593Smuzhiyun {
516*4882a593Smuzhiyun 	struct blk_desc *desc = dev_get_uclass_platdata(udev);
517*4882a593Smuzhiyun #if defined(CONFIG_NAND) || defined(CONFIG_MTD_SPI_NAND) || defined(CONFIG_SPI_FLASH_MTD)
518*4882a593Smuzhiyun 	loff_t off = (loff_t)(start * 512);
519*4882a593Smuzhiyun 	size_t rwsize = blkcnt * 512;
520*4882a593Smuzhiyun #endif
521*4882a593Smuzhiyun 	struct mtd_info *mtd;
522*4882a593Smuzhiyun 	int ret = 0;
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun 	if (!desc)
525*4882a593Smuzhiyun 		return ret;
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun 	mtd = desc->bdev->priv;
528*4882a593Smuzhiyun 	if (!mtd)
529*4882a593Smuzhiyun 		return 0;
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun 	pr_debug("mtd dwrite %s %lx %lx\n", mtd->name, start, blkcnt);
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun 	if (blkcnt == 0)
534*4882a593Smuzhiyun 		return 0;
535*4882a593Smuzhiyun 
536*4882a593Smuzhiyun 	if (desc->devnum == BLK_MTD_NAND ||
537*4882a593Smuzhiyun 	    desc->devnum == BLK_MTD_SPI_NAND ||
538*4882a593Smuzhiyun 	    desc->devnum == BLK_MTD_SPI_NOR) {
539*4882a593Smuzhiyun 		if (desc->op_flag == BLK_MTD_CONT_WRITE) {
540*4882a593Smuzhiyun 			ret = mtd_map_write(mtd, off, &rwsize,
541*4882a593Smuzhiyun 					    NULL, mtd->size,
542*4882a593Smuzhiyun 					    (u_char *)(src), 0);
543*4882a593Smuzhiyun 			if (!ret)
544*4882a593Smuzhiyun 				return blkcnt;
545*4882a593Smuzhiyun 			else
546*4882a593Smuzhiyun 				return 0;
547*4882a593Smuzhiyun 		} else {
548*4882a593Smuzhiyun 			lbaint_t off_aligned, alinged;
549*4882a593Smuzhiyun 			size_t rwsize_aligned;
550*4882a593Smuzhiyun 			u8 *p_buf;
551*4882a593Smuzhiyun 
552*4882a593Smuzhiyun 			alinged = off & mtd->erasesize_mask;
553*4882a593Smuzhiyun 			off_aligned = off - alinged;
554*4882a593Smuzhiyun 			rwsize_aligned = rwsize + alinged;
555*4882a593Smuzhiyun 			rwsize_aligned = (rwsize_aligned + mtd->erasesize - 1) &
556*4882a593Smuzhiyun 				~(mtd->erasesize - 1);
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun 			p_buf = malloc(rwsize_aligned);
559*4882a593Smuzhiyun 			if (!p_buf) {
560*4882a593Smuzhiyun 				printf("%s: Fail to malloc!", __func__);
561*4882a593Smuzhiyun 				return 0;
562*4882a593Smuzhiyun 			}
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun 			ret = mtd_map_read(mtd, off_aligned, &rwsize_aligned,
565*4882a593Smuzhiyun 					   NULL, mtd->size,
566*4882a593Smuzhiyun 					   (u_char *)(p_buf));
567*4882a593Smuzhiyun 			if (ret) {
568*4882a593Smuzhiyun 				free(p_buf);
569*4882a593Smuzhiyun 				return 0;
570*4882a593Smuzhiyun 			}
571*4882a593Smuzhiyun 
572*4882a593Smuzhiyun 			memcpy(p_buf + alinged, src, rwsize);
573*4882a593Smuzhiyun 
574*4882a593Smuzhiyun 			ret = mtd_map_write(mtd, off_aligned, &rwsize_aligned,
575*4882a593Smuzhiyun 					    NULL, mtd->size,
576*4882a593Smuzhiyun 					    (u_char *)(p_buf), 0);
577*4882a593Smuzhiyun 			free(p_buf);
578*4882a593Smuzhiyun 			if (!ret)
579*4882a593Smuzhiyun 				return blkcnt;
580*4882a593Smuzhiyun 			else
581*4882a593Smuzhiyun 				return 0;
582*4882a593Smuzhiyun 		}
583*4882a593Smuzhiyun 	} else {
584*4882a593Smuzhiyun 		return 0;
585*4882a593Smuzhiyun 	}
586*4882a593Smuzhiyun 
587*4882a593Smuzhiyun 	return 0;
588*4882a593Smuzhiyun }
589*4882a593Smuzhiyun 
mtd_derase(struct udevice * udev,lbaint_t start,lbaint_t blkcnt)590*4882a593Smuzhiyun ulong mtd_derase(struct udevice *udev, lbaint_t start,
591*4882a593Smuzhiyun 		 lbaint_t blkcnt)
592*4882a593Smuzhiyun {
593*4882a593Smuzhiyun 	struct blk_desc *desc = dev_get_uclass_platdata(udev);
594*4882a593Smuzhiyun #if defined(CONFIG_NAND) || defined(CONFIG_MTD_SPI_NAND) || defined(CONFIG_SPI_FLASH_MTD)
595*4882a593Smuzhiyun 	loff_t off = (loff_t)(start * 512);
596*4882a593Smuzhiyun 	size_t len = blkcnt * 512;
597*4882a593Smuzhiyun #endif
598*4882a593Smuzhiyun 	struct mtd_info *mtd;
599*4882a593Smuzhiyun 	int ret = 0;
600*4882a593Smuzhiyun 
601*4882a593Smuzhiyun 	if (!desc)
602*4882a593Smuzhiyun 		return ret;
603*4882a593Smuzhiyun 
604*4882a593Smuzhiyun 	mtd = desc->bdev->priv;
605*4882a593Smuzhiyun 	if (!mtd)
606*4882a593Smuzhiyun 		return 0;
607*4882a593Smuzhiyun 
608*4882a593Smuzhiyun 	pr_debug("mtd derase %s %lx %lx\n", mtd->name, start, blkcnt);
609*4882a593Smuzhiyun 
610*4882a593Smuzhiyun 	if (blkcnt == 0)
611*4882a593Smuzhiyun 		return 0;
612*4882a593Smuzhiyun 
613*4882a593Smuzhiyun 	if (desc->devnum == BLK_MTD_NAND ||
614*4882a593Smuzhiyun 	    desc->devnum == BLK_MTD_SPI_NAND ||
615*4882a593Smuzhiyun 	    desc->devnum == BLK_MTD_SPI_NOR) {
616*4882a593Smuzhiyun 		ret = mtd_map_erase(mtd, off, len);
617*4882a593Smuzhiyun 		if (ret)
618*4882a593Smuzhiyun 			return ret;
619*4882a593Smuzhiyun 	} else {
620*4882a593Smuzhiyun 		return 0;
621*4882a593Smuzhiyun 	}
622*4882a593Smuzhiyun 
623*4882a593Smuzhiyun 	return blkcnt;
624*4882a593Smuzhiyun }
625*4882a593Smuzhiyun #endif
626*4882a593Smuzhiyun 
mtd_blk_probe(struct udevice * udev)627*4882a593Smuzhiyun static int mtd_blk_probe(struct udevice *udev)
628*4882a593Smuzhiyun {
629*4882a593Smuzhiyun 	struct mtd_info *mtd;
630*4882a593Smuzhiyun 	struct blk_desc *desc = dev_get_uclass_platdata(udev);
631*4882a593Smuzhiyun 	int ret, i = 0;
632*4882a593Smuzhiyun 
633*4882a593Smuzhiyun 	mtd = dev_get_uclass_priv(udev->parent);
634*4882a593Smuzhiyun 	if (mtd->type == MTD_NANDFLASH && desc->devnum == BLK_MTD_NAND) {
635*4882a593Smuzhiyun #ifndef CONFIG_SPL_BUILD
636*4882a593Smuzhiyun 		mtd = dev_get_priv(udev->parent);
637*4882a593Smuzhiyun #endif
638*4882a593Smuzhiyun 	}
639*4882a593Smuzhiyun 
640*4882a593Smuzhiyun 	/* Fill mtd devices information */
641*4882a593Smuzhiyun 	if (is_power_of_2(mtd->erasesize))
642*4882a593Smuzhiyun 		mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
643*4882a593Smuzhiyun 	else
644*4882a593Smuzhiyun 		mtd->erasesize_shift = 0;
645*4882a593Smuzhiyun 
646*4882a593Smuzhiyun 	if (is_power_of_2(mtd->writesize))
647*4882a593Smuzhiyun 		mtd->writesize_shift = ffs(mtd->writesize) - 1;
648*4882a593Smuzhiyun 	else
649*4882a593Smuzhiyun 		mtd->writesize_shift = 0;
650*4882a593Smuzhiyun 
651*4882a593Smuzhiyun 	mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
652*4882a593Smuzhiyun 	mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
653*4882a593Smuzhiyun 
654*4882a593Smuzhiyun 	desc->bdev->priv = mtd;
655*4882a593Smuzhiyun 	sprintf(desc->vendor, "0x%.4x", 0x2207);
656*4882a593Smuzhiyun 	if (strncmp(mtd->name, "nand", 4) == 0)
657*4882a593Smuzhiyun 		memcpy(desc->product, "rk-nand", strlen("rk-nand"));
658*4882a593Smuzhiyun 	else
659*4882a593Smuzhiyun 		memcpy(desc->product, mtd->name, strlen(mtd->name));
660*4882a593Smuzhiyun 	memcpy(desc->revision, "V1.00", sizeof("V1.00"));
661*4882a593Smuzhiyun 	if (mtd->type == MTD_NANDFLASH) {
662*4882a593Smuzhiyun #ifdef CONFIG_NAND
663*4882a593Smuzhiyun 		if (desc->devnum == BLK_MTD_NAND)
664*4882a593Smuzhiyun 			i = NAND_BBT_SCAN_MAXBLOCKS;
665*4882a593Smuzhiyun 		else if (desc->devnum == BLK_MTD_SPI_NAND)
666*4882a593Smuzhiyun 			i = NANDDEV_BBT_SCAN_MAXBLOCKS;
667*4882a593Smuzhiyun #endif
668*4882a593Smuzhiyun 
669*4882a593Smuzhiyun 		/*
670*4882a593Smuzhiyun 		 * Find the first useful block in the end,
671*4882a593Smuzhiyun 		 * and it is the end lba of the nand storage.
672*4882a593Smuzhiyun 		 */
673*4882a593Smuzhiyun 		for (; i < (mtd->size / mtd->erasesize); i++) {
674*4882a593Smuzhiyun 			ret =  mtd_block_isbad(mtd,
675*4882a593Smuzhiyun 					       mtd->size - mtd->erasesize * (i + 1));
676*4882a593Smuzhiyun 			if (!ret) {
677*4882a593Smuzhiyun 				desc->lba = (mtd->size >> 9) -
678*4882a593Smuzhiyun 					(mtd->erasesize >> 9) * i;
679*4882a593Smuzhiyun 				break;
680*4882a593Smuzhiyun 			}
681*4882a593Smuzhiyun 		}
682*4882a593Smuzhiyun 	} else {
683*4882a593Smuzhiyun 		desc->lba = mtd->size >> 9;
684*4882a593Smuzhiyun 	}
685*4882a593Smuzhiyun 
686*4882a593Smuzhiyun 	debug("MTD: desc->lba is %lx\n", desc->lba);
687*4882a593Smuzhiyun 
688*4882a593Smuzhiyun 	return 0;
689*4882a593Smuzhiyun }
690*4882a593Smuzhiyun 
691*4882a593Smuzhiyun static const struct blk_ops mtd_blk_ops = {
692*4882a593Smuzhiyun 	.read	= mtd_dread,
693*4882a593Smuzhiyun #if CONFIG_IS_ENABLED(MTD_WRITE)
694*4882a593Smuzhiyun 	.write	= mtd_dwrite,
695*4882a593Smuzhiyun 	.erase	= mtd_derase,
696*4882a593Smuzhiyun #endif
697*4882a593Smuzhiyun };
698*4882a593Smuzhiyun 
699*4882a593Smuzhiyun U_BOOT_DRIVER(mtd_blk) = {
700*4882a593Smuzhiyun 	.name		= "mtd_blk",
701*4882a593Smuzhiyun 	.id		= UCLASS_BLK,
702*4882a593Smuzhiyun 	.ops		= &mtd_blk_ops,
703*4882a593Smuzhiyun 	.probe		= mtd_blk_probe,
704*4882a593Smuzhiyun };
705