xref: /OK3568_Linux_fs/kernel/drivers/rkflash/sfc_nor_mtd.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun 
3*4882a593Smuzhiyun /* Copyright (c) 2018 Rockchip Electronics Co. Ltd. */
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun #include <linux/kernel.h>
6*4882a593Smuzhiyun #include <linux/mtd/cfi.h>
7*4882a593Smuzhiyun #include <linux/mtd/mtd.h>
8*4882a593Smuzhiyun #include <linux/mtd/partitions.h>
9*4882a593Smuzhiyun #include <linux/slab.h>
10*4882a593Smuzhiyun #include <linux/string.h>
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include "rkflash_blk.h"
13*4882a593Smuzhiyun #include "rkflash_debug.h"
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun struct snor_mtd_dev {
16*4882a593Smuzhiyun 	struct SFNOR_DEV *snor;
17*4882a593Smuzhiyun 	struct mutex	*lock; /* to lock this object */
18*4882a593Smuzhiyun 	struct mtd_info mtd;
19*4882a593Smuzhiyun 	u8 *dma_buf;
20*4882a593Smuzhiyun };
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun static struct mtd_partition nor_parts[MAX_PART_COUNT];
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #define SFC_NOR_MTD_DMA_MAX 8192
25*4882a593Smuzhiyun 
mtd_to_priv(struct mtd_info * ptr_mtd)26*4882a593Smuzhiyun static inline struct snor_mtd_dev *mtd_to_priv(struct mtd_info *ptr_mtd)
27*4882a593Smuzhiyun {
28*4882a593Smuzhiyun 	return (struct snor_mtd_dev *)((char *)ptr_mtd -
29*4882a593Smuzhiyun 		offsetof(struct snor_mtd_dev, mtd));
30*4882a593Smuzhiyun }
31*4882a593Smuzhiyun 
sfc_erase_mtd(struct mtd_info * mtd,struct erase_info * instr)32*4882a593Smuzhiyun static int sfc_erase_mtd(struct mtd_info *mtd, struct erase_info *instr)
33*4882a593Smuzhiyun {
34*4882a593Smuzhiyun 	int ret;
35*4882a593Smuzhiyun 	struct snor_mtd_dev *p_dev = mtd_to_priv(mtd);
36*4882a593Smuzhiyun 	u32 addr, len;
37*4882a593Smuzhiyun 	u32 rem;
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun 	addr = instr->addr;
40*4882a593Smuzhiyun 	len = instr->len;
41*4882a593Smuzhiyun 	rkflash_print_dio("%s addr= %x len= %x\n",
42*4882a593Smuzhiyun 			  __func__, addr, len);
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun 	if ((addr + len) > mtd->size)
45*4882a593Smuzhiyun 		return -EINVAL;
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun 	div_u64_rem(instr->len, mtd->erasesize, &rem);
48*4882a593Smuzhiyun 	if (rem)
49*4882a593Smuzhiyun 		return -EINVAL;
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	mutex_lock(p_dev->lock);
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun 	if (len == p_dev->mtd.size) {
54*4882a593Smuzhiyun 		ret = snor_erase(p_dev->snor, 0, ERASE_CHIP);
55*4882a593Smuzhiyun 		if (ret) {
56*4882a593Smuzhiyun 			rkflash_print_error("snor_erase CHIP 0x%x ret=%d\n",
57*4882a593Smuzhiyun 					    addr, ret);
58*4882a593Smuzhiyun 			instr->fail_addr = addr;
59*4882a593Smuzhiyun 			mutex_unlock(p_dev->lock);
60*4882a593Smuzhiyun 			return -EIO;
61*4882a593Smuzhiyun 		}
62*4882a593Smuzhiyun 	} else {
63*4882a593Smuzhiyun 		while (len > 0) {
64*4882a593Smuzhiyun 			ret = snor_erase(p_dev->snor, addr, ERASE_BLOCK64K);
65*4882a593Smuzhiyun 			if (ret) {
66*4882a593Smuzhiyun 				rkflash_print_error("snor_erase 0x%x ret=%d\n",
67*4882a593Smuzhiyun 						    addr, ret);
68*4882a593Smuzhiyun 				instr->fail_addr = addr;
69*4882a593Smuzhiyun 				mutex_unlock(p_dev->lock);
70*4882a593Smuzhiyun 				return -EIO;
71*4882a593Smuzhiyun 			}
72*4882a593Smuzhiyun 			addr += mtd->erasesize;
73*4882a593Smuzhiyun 			len -= mtd->erasesize;
74*4882a593Smuzhiyun 		}
75*4882a593Smuzhiyun 	}
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	mutex_unlock(p_dev->lock);
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 	return 0;
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun 
sfc_write_mtd(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const u_char * buf)82*4882a593Smuzhiyun static int sfc_write_mtd(struct mtd_info *mtd, loff_t to, size_t len,
83*4882a593Smuzhiyun 			 size_t *retlen, const u_char *buf)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun 	int status;
86*4882a593Smuzhiyun 	u32 addr, size, chunk, padding;
87*4882a593Smuzhiyun 	u32 page_align;
88*4882a593Smuzhiyun 	struct snor_mtd_dev *p_dev = mtd_to_priv(mtd);
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	rkflash_print_dio("%s addr= %llx len= %x\n", __func__, to, (u32)len);
91*4882a593Smuzhiyun 	if ((to + len) > mtd->size)
92*4882a593Smuzhiyun 		return -EINVAL;
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	mutex_lock(p_dev->lock);
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 	addr = to;
97*4882a593Smuzhiyun 	size = len;
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	while (size > 0) {
100*4882a593Smuzhiyun 		page_align = addr & (NOR_PAGE_SIZE - 1);
101*4882a593Smuzhiyun 		chunk = size;
102*4882a593Smuzhiyun 		if (chunk > (NOR_PAGE_SIZE - page_align))
103*4882a593Smuzhiyun 			chunk = NOR_PAGE_SIZE - page_align;
104*4882a593Smuzhiyun 		memcpy(p_dev->dma_buf, buf, chunk);
105*4882a593Smuzhiyun 		padding = 0;
106*4882a593Smuzhiyun 		if (chunk < NOR_PAGE_SIZE) {
107*4882a593Smuzhiyun 			/* 4 bytes algin */
108*4882a593Smuzhiyun 			padding = ((chunk + 3) & 0xFFFC) - chunk;
109*4882a593Smuzhiyun 			memset(p_dev->dma_buf + chunk, 0xFF, padding);
110*4882a593Smuzhiyun 		}
111*4882a593Smuzhiyun 		status = snor_prog_page(p_dev->snor, addr, p_dev->dma_buf,
112*4882a593Smuzhiyun 					chunk + padding);
113*4882a593Smuzhiyun 		if (status != SFC_OK) {
114*4882a593Smuzhiyun 			rkflash_print_error("snor_prog_page %x ret= %d\n",
115*4882a593Smuzhiyun 					    addr, status);
116*4882a593Smuzhiyun 			*retlen = len - size;
117*4882a593Smuzhiyun 			mutex_unlock(p_dev->lock);
118*4882a593Smuzhiyun 			return status;
119*4882a593Smuzhiyun 		}
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 		size -= chunk;
122*4882a593Smuzhiyun 		addr += chunk;
123*4882a593Smuzhiyun 		buf += chunk;
124*4882a593Smuzhiyun 	}
125*4882a593Smuzhiyun 	*retlen = len;
126*4882a593Smuzhiyun 	mutex_unlock(p_dev->lock);
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	return 0;
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun 
sfc_read_mtd(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)131*4882a593Smuzhiyun static int sfc_read_mtd(struct mtd_info *mtd, loff_t from, size_t len,
132*4882a593Smuzhiyun 			size_t *retlen, u_char *buf)
133*4882a593Smuzhiyun {
134*4882a593Smuzhiyun 	u32 addr, size, chunk;
135*4882a593Smuzhiyun 	u8 *p_buf =  (u8 *)buf;
136*4882a593Smuzhiyun 	int ret = SFC_OK;
137*4882a593Smuzhiyun 	struct snor_mtd_dev *p_dev = mtd_to_priv(mtd);
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	rkflash_print_dio("%s addr= %llx len= %x\n", __func__, from, (u32)len);
140*4882a593Smuzhiyun 	if ((from + len) > mtd->size)
141*4882a593Smuzhiyun 		return -EINVAL;
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	mutex_lock(p_dev->lock);
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 	addr = from;
146*4882a593Smuzhiyun 	size = len;
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	while (size > 0) {
149*4882a593Smuzhiyun 		chunk = (size < SFC_NOR_MTD_DMA_MAX) ? size : SFC_NOR_MTD_DMA_MAX;
150*4882a593Smuzhiyun 		ret = snor_read_data(p_dev->snor, addr, p_dev->dma_buf, chunk);
151*4882a593Smuzhiyun 		if (ret != SFC_OK) {
152*4882a593Smuzhiyun 			rkflash_print_error("snor_read_data %x ret=%d\n", addr, ret);
153*4882a593Smuzhiyun 			*retlen = len - size;
154*4882a593Smuzhiyun 			mutex_unlock(p_dev->lock);
155*4882a593Smuzhiyun 			return ret;
156*4882a593Smuzhiyun 		}
157*4882a593Smuzhiyun 		memcpy(p_buf, p_dev->dma_buf, chunk);
158*4882a593Smuzhiyun 		size -= chunk;
159*4882a593Smuzhiyun 		addr += chunk;
160*4882a593Smuzhiyun 		p_buf += chunk;
161*4882a593Smuzhiyun 	}
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 	*retlen = len;
164*4882a593Smuzhiyun 	mutex_unlock(p_dev->lock);
165*4882a593Smuzhiyun 	return 0;
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun /*
169*4882a593Smuzhiyun  * if not support rk_partition and partition is confirmed, you can define
170*4882a593Smuzhiyun  * strust def_nor_part by adding new partition like following example:
171*4882a593Smuzhiyun  *	{"u-boot", 0x1000 * 512, 0x2000 * 512},
172*4882a593Smuzhiyun  * Note.
173*4882a593Smuzhiyun  * 1. New partition format {name. size, offset}
174*4882a593Smuzhiyun  * 2. Unit:Byte
175*4882a593Smuzhiyun  * 3. Last partition 'size' can be set 0xFFFFFFFFF to fully user left space.
176*4882a593Smuzhiyun  */
177*4882a593Smuzhiyun struct mtd_partition def_nor_part[] = {};
178*4882a593Smuzhiyun 
sfc_nor_mtd_init(struct SFNOR_DEV * p_dev,struct mutex * lock)179*4882a593Smuzhiyun int sfc_nor_mtd_init(struct SFNOR_DEV *p_dev, struct mutex *lock)
180*4882a593Smuzhiyun {
181*4882a593Smuzhiyun 	int ret, i, part_num = 0;
182*4882a593Smuzhiyun 	int capacity;
183*4882a593Smuzhiyun 	struct STRUCT_PART_INFO *g_part;  /* size 2KB */
184*4882a593Smuzhiyun 	struct snor_mtd_dev *priv_dev = kzalloc(sizeof(*priv_dev), GFP_KERNEL);
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 	if (!priv_dev) {
187*4882a593Smuzhiyun 		rkflash_print_error("%s %d alloc failed\n", __func__, __LINE__);
188*4882a593Smuzhiyun 		return -ENOMEM;
189*4882a593Smuzhiyun 	}
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 	priv_dev->snor = p_dev;
192*4882a593Smuzhiyun 	capacity = p_dev->capacity;
193*4882a593Smuzhiyun 	priv_dev->mtd.name = "sfc_nor";
194*4882a593Smuzhiyun 	priv_dev->mtd.type = MTD_NORFLASH;
195*4882a593Smuzhiyun 	priv_dev->mtd.writesize = 1;
196*4882a593Smuzhiyun 	priv_dev->mtd.flags = MTD_CAP_NORFLASH;
197*4882a593Smuzhiyun 	/* see snor_write */
198*4882a593Smuzhiyun 	priv_dev->mtd.size = (u64)capacity << 9;
199*4882a593Smuzhiyun 	priv_dev->mtd._erase = sfc_erase_mtd;
200*4882a593Smuzhiyun 	priv_dev->mtd._read = sfc_read_mtd;
201*4882a593Smuzhiyun 	priv_dev->mtd._write = sfc_write_mtd;
202*4882a593Smuzhiyun 	priv_dev->mtd.erasesize = p_dev->blk_size << 9;
203*4882a593Smuzhiyun 	priv_dev->mtd.writebufsize = NOR_PAGE_SIZE;
204*4882a593Smuzhiyun 	priv_dev->lock = lock;
205*4882a593Smuzhiyun 	priv_dev->dma_buf = (u8 *)__get_free_pages(GFP_KERNEL | GFP_DMA32, get_order(SFC_NOR_MTD_DMA_MAX));
206*4882a593Smuzhiyun 	if (!priv_dev->dma_buf) {
207*4882a593Smuzhiyun 		rkflash_print_error("%s %d alloc failed\n", __func__, __LINE__);
208*4882a593Smuzhiyun 		ret = -ENOMEM;
209*4882a593Smuzhiyun 		goto error_out;
210*4882a593Smuzhiyun 	}
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	g_part = kmalloc(sizeof(*g_part), GFP_KERNEL);
213*4882a593Smuzhiyun 	if (!g_part) {
214*4882a593Smuzhiyun 		ret = -ENOMEM;
215*4882a593Smuzhiyun 		goto error_out;
216*4882a593Smuzhiyun 	}
217*4882a593Smuzhiyun 	part_num = 0;
218*4882a593Smuzhiyun 	if (snor_read(p_dev, 0, 4, g_part) == 4) {
219*4882a593Smuzhiyun 		if (g_part->hdr.ui_fw_tag == RK_PARTITION_TAG) {
220*4882a593Smuzhiyun 			part_num = g_part->hdr.ui_part_entry_count;
221*4882a593Smuzhiyun 			for (i = 0; i < part_num; i++) {
222*4882a593Smuzhiyun 				nor_parts[i].name =
223*4882a593Smuzhiyun 					kstrdup(g_part->part[i].sz_name,
224*4882a593Smuzhiyun 						GFP_KERNEL);
225*4882a593Smuzhiyun 				if (g_part->part[i].ui_pt_sz == 0xFFFFFFFF)
226*4882a593Smuzhiyun 					g_part->part[i].ui_pt_sz = capacity -
227*4882a593Smuzhiyun 						g_part->part[i].ui_pt_off;
228*4882a593Smuzhiyun 				nor_parts[i].offset =
229*4882a593Smuzhiyun 					(u64)g_part->part[i].ui_pt_off << 9;
230*4882a593Smuzhiyun 				nor_parts[i].size =
231*4882a593Smuzhiyun 					(u64)g_part->part[i].ui_pt_sz << 9;
232*4882a593Smuzhiyun 				nor_parts[i].mask_flags = 0;
233*4882a593Smuzhiyun 			}
234*4882a593Smuzhiyun 		} else {
235*4882a593Smuzhiyun 			part_num = ARRAY_SIZE(def_nor_part);
236*4882a593Smuzhiyun 			for (i = 0; i < part_num; i++) {
237*4882a593Smuzhiyun 				nor_parts[i].name =
238*4882a593Smuzhiyun 					kstrdup(def_nor_part[i].name,
239*4882a593Smuzhiyun 						GFP_KERNEL);
240*4882a593Smuzhiyun 				if (def_nor_part[i].size == 0xFFFFFFFF)
241*4882a593Smuzhiyun 					def_nor_part[i].size = (capacity << 9) -
242*4882a593Smuzhiyun 						def_nor_part[i].offset;
243*4882a593Smuzhiyun 				nor_parts[i].offset =
244*4882a593Smuzhiyun 					def_nor_part[i].offset;
245*4882a593Smuzhiyun 				nor_parts[i].size =
246*4882a593Smuzhiyun 					def_nor_part[i].size;
247*4882a593Smuzhiyun 				nor_parts[i].mask_flags = 0;
248*4882a593Smuzhiyun 			}
249*4882a593Smuzhiyun 		}
250*4882a593Smuzhiyun 	}
251*4882a593Smuzhiyun 	kfree(g_part);
252*4882a593Smuzhiyun 	ret = mtd_device_register(&priv_dev->mtd, nor_parts, part_num);
253*4882a593Smuzhiyun 	if (ret) {
254*4882a593Smuzhiyun 		pr_err("%s register mtd fail %d\n", __func__, ret);
255*4882a593Smuzhiyun 	} else {
256*4882a593Smuzhiyun 		pr_info("%s register mtd succuss\n", __func__);
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 		return 0;
259*4882a593Smuzhiyun 	}
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	free_pages((unsigned long)priv_dev->dma_buf, get_order(SFC_NOR_MTD_DMA_MAX));
262*4882a593Smuzhiyun error_out:
263*4882a593Smuzhiyun 	kfree(priv_dev);
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 	return ret;
266*4882a593Smuzhiyun }
267