1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * (C) Copyright 2008 Semihalf
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Written by: Piotr Ziecik <kosmo@semihalf.com>
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * SPDX-License-Identifier: GPL-2.0+
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <common.h>
10*4882a593Smuzhiyun #include <flash.h>
11*4882a593Smuzhiyun #include <malloc.h>
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #include <linux/errno.h>
14*4882a593Smuzhiyun #include <linux/mtd/mtd.h>
15*4882a593Smuzhiyun #include <linux/mtd/concat.h>
16*4882a593Smuzhiyun #include <mtd/cfi_flash.h>
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun static struct mtd_info cfi_mtd_info[CFI_MAX_FLASH_BANKS];
19*4882a593Smuzhiyun static char cfi_mtd_names[CFI_MAX_FLASH_BANKS][16];
20*4882a593Smuzhiyun #ifdef CONFIG_MTD_CONCAT
21*4882a593Smuzhiyun static char c_mtd_name[16];
22*4882a593Smuzhiyun #endif
23*4882a593Smuzhiyun
cfi_mtd_erase(struct mtd_info * mtd,struct erase_info * instr)24*4882a593Smuzhiyun static int cfi_mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
25*4882a593Smuzhiyun {
26*4882a593Smuzhiyun flash_info_t *fi = mtd->priv;
27*4882a593Smuzhiyun size_t a_start = fi->start[0] + instr->addr;
28*4882a593Smuzhiyun size_t a_end = a_start + instr->len;
29*4882a593Smuzhiyun int s_first = -1;
30*4882a593Smuzhiyun int s_last = -1;
31*4882a593Smuzhiyun int error, sect;
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun for (sect = 0; sect < fi->sector_count; sect++) {
34*4882a593Smuzhiyun if (a_start == fi->start[sect])
35*4882a593Smuzhiyun s_first = sect;
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun if (sect < fi->sector_count - 1) {
38*4882a593Smuzhiyun if (a_end == fi->start[sect + 1]) {
39*4882a593Smuzhiyun s_last = sect;
40*4882a593Smuzhiyun break;
41*4882a593Smuzhiyun }
42*4882a593Smuzhiyun } else {
43*4882a593Smuzhiyun s_last = sect;
44*4882a593Smuzhiyun break;
45*4882a593Smuzhiyun }
46*4882a593Smuzhiyun }
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun if (s_first >= 0 && s_first <= s_last) {
49*4882a593Smuzhiyun instr->state = MTD_ERASING;
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun flash_set_verbose(0);
52*4882a593Smuzhiyun error = flash_erase(fi, s_first, s_last);
53*4882a593Smuzhiyun flash_set_verbose(1);
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun if (error) {
56*4882a593Smuzhiyun instr->state = MTD_ERASE_FAILED;
57*4882a593Smuzhiyun return -EIO;
58*4882a593Smuzhiyun }
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun instr->state = MTD_ERASE_DONE;
61*4882a593Smuzhiyun mtd_erase_callback(instr);
62*4882a593Smuzhiyun return 0;
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun return -EINVAL;
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun
cfi_mtd_read(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)68*4882a593Smuzhiyun static int cfi_mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
69*4882a593Smuzhiyun size_t *retlen, u_char *buf)
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun flash_info_t *fi = mtd->priv;
72*4882a593Smuzhiyun u_char *f = (u_char*)(fi->start[0]) + from;
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun memcpy(buf, f, len);
75*4882a593Smuzhiyun *retlen = len;
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun return 0;
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun
cfi_mtd_write(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const u_char * buf)80*4882a593Smuzhiyun static int cfi_mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
81*4882a593Smuzhiyun size_t *retlen, const u_char *buf)
82*4882a593Smuzhiyun {
83*4882a593Smuzhiyun flash_info_t *fi = mtd->priv;
84*4882a593Smuzhiyun u_long t = fi->start[0] + to;
85*4882a593Smuzhiyun int error;
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun flash_set_verbose(0);
88*4882a593Smuzhiyun error = write_buff(fi, (u_char*)buf, t, len);
89*4882a593Smuzhiyun flash_set_verbose(1);
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun if (!error) {
92*4882a593Smuzhiyun *retlen = len;
93*4882a593Smuzhiyun return 0;
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun return -EIO;
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun
cfi_mtd_sync(struct mtd_info * mtd)99*4882a593Smuzhiyun static void cfi_mtd_sync(struct mtd_info *mtd)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun /*
102*4882a593Smuzhiyun * This function should wait until all pending operations
103*4882a593Smuzhiyun * finish. However this driver is fully synchronous, so
104*4882a593Smuzhiyun * this function returns immediately
105*4882a593Smuzhiyun */
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun
cfi_mtd_lock(struct mtd_info * mtd,loff_t ofs,uint64_t len)108*4882a593Smuzhiyun static int cfi_mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
109*4882a593Smuzhiyun {
110*4882a593Smuzhiyun flash_info_t *fi = mtd->priv;
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun flash_set_verbose(0);
113*4882a593Smuzhiyun flash_protect(FLAG_PROTECT_SET, fi->start[0] + ofs,
114*4882a593Smuzhiyun fi->start[0] + ofs + len - 1, fi);
115*4882a593Smuzhiyun flash_set_verbose(1);
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun return 0;
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun
cfi_mtd_unlock(struct mtd_info * mtd,loff_t ofs,uint64_t len)120*4882a593Smuzhiyun static int cfi_mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
121*4882a593Smuzhiyun {
122*4882a593Smuzhiyun flash_info_t *fi = mtd->priv;
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun flash_set_verbose(0);
125*4882a593Smuzhiyun flash_protect(FLAG_PROTECT_CLEAR, fi->start[0] + ofs,
126*4882a593Smuzhiyun fi->start[0] + ofs + len - 1, fi);
127*4882a593Smuzhiyun flash_set_verbose(1);
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun return 0;
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun
cfi_mtd_set_erasesize(struct mtd_info * mtd,flash_info_t * fi)132*4882a593Smuzhiyun static int cfi_mtd_set_erasesize(struct mtd_info *mtd, flash_info_t *fi)
133*4882a593Smuzhiyun {
134*4882a593Smuzhiyun int sect_size = 0;
135*4882a593Smuzhiyun int sect_size_old = 0;
136*4882a593Smuzhiyun int sect;
137*4882a593Smuzhiyun int regions = 0;
138*4882a593Smuzhiyun int numblocks = 0;
139*4882a593Smuzhiyun ulong offset;
140*4882a593Smuzhiyun ulong base_addr;
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun /*
143*4882a593Smuzhiyun * First detect the number of eraseregions so that we can allocate
144*4882a593Smuzhiyun * the array of eraseregions correctly
145*4882a593Smuzhiyun */
146*4882a593Smuzhiyun for (sect = 0; sect < fi->sector_count; sect++) {
147*4882a593Smuzhiyun if (sect_size_old != flash_sector_size(fi, sect))
148*4882a593Smuzhiyun regions++;
149*4882a593Smuzhiyun sect_size_old = flash_sector_size(fi, sect);
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun switch (regions) {
153*4882a593Smuzhiyun case 0:
154*4882a593Smuzhiyun return 1;
155*4882a593Smuzhiyun case 1: /* flash has uniform erase size */
156*4882a593Smuzhiyun mtd->numeraseregions = 0;
157*4882a593Smuzhiyun mtd->erasesize = sect_size_old;
158*4882a593Smuzhiyun return 0;
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun mtd->numeraseregions = regions;
162*4882a593Smuzhiyun mtd->eraseregions = malloc(sizeof(struct mtd_erase_region_info) * regions);
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun /*
165*4882a593Smuzhiyun * Now detect the largest sector and fill the eraseregions
166*4882a593Smuzhiyun */
167*4882a593Smuzhiyun regions = 0;
168*4882a593Smuzhiyun base_addr = offset = fi->start[0];
169*4882a593Smuzhiyun sect_size_old = flash_sector_size(fi, 0);
170*4882a593Smuzhiyun for (sect = 0; sect < fi->sector_count; sect++) {
171*4882a593Smuzhiyun if (sect_size_old != flash_sector_size(fi, sect)) {
172*4882a593Smuzhiyun mtd->eraseregions[regions].offset = offset - base_addr;
173*4882a593Smuzhiyun mtd->eraseregions[regions].erasesize = sect_size_old;
174*4882a593Smuzhiyun mtd->eraseregions[regions].numblocks = numblocks;
175*4882a593Smuzhiyun /* Now start counting the next eraseregions */
176*4882a593Smuzhiyun numblocks = 0;
177*4882a593Smuzhiyun regions++;
178*4882a593Smuzhiyun offset = fi->start[sect];
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun numblocks++;
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun /*
183*4882a593Smuzhiyun * Select the largest sector size as erasesize (e.g. for UBI)
184*4882a593Smuzhiyun */
185*4882a593Smuzhiyun if (flash_sector_size(fi, sect) > sect_size)
186*4882a593Smuzhiyun sect_size = flash_sector_size(fi, sect);
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun sect_size_old = flash_sector_size(fi, sect);
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun /*
192*4882a593Smuzhiyun * Set the last region
193*4882a593Smuzhiyun */
194*4882a593Smuzhiyun mtd->eraseregions[regions].offset = offset - base_addr;
195*4882a593Smuzhiyun mtd->eraseregions[regions].erasesize = sect_size_old;
196*4882a593Smuzhiyun mtd->eraseregions[regions].numblocks = numblocks;
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun mtd->erasesize = sect_size;
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun return 0;
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun
cfi_mtd_init(void)203*4882a593Smuzhiyun int cfi_mtd_init(void)
204*4882a593Smuzhiyun {
205*4882a593Smuzhiyun struct mtd_info *mtd;
206*4882a593Smuzhiyun flash_info_t *fi;
207*4882a593Smuzhiyun int error, i;
208*4882a593Smuzhiyun #ifdef CONFIG_MTD_CONCAT
209*4882a593Smuzhiyun int devices_found = 0;
210*4882a593Smuzhiyun struct mtd_info *mtd_list[CONFIG_SYS_MAX_FLASH_BANKS];
211*4882a593Smuzhiyun #endif
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun for (i = 0; i < CONFIG_SYS_MAX_FLASH_BANKS; i++) {
214*4882a593Smuzhiyun fi = &flash_info[i];
215*4882a593Smuzhiyun mtd = &cfi_mtd_info[i];
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun memset(mtd, 0, sizeof(struct mtd_info));
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun error = cfi_mtd_set_erasesize(mtd, fi);
220*4882a593Smuzhiyun if (error)
221*4882a593Smuzhiyun continue;
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun sprintf(cfi_mtd_names[i], "nor%d", i);
224*4882a593Smuzhiyun mtd->name = cfi_mtd_names[i];
225*4882a593Smuzhiyun mtd->type = MTD_NORFLASH;
226*4882a593Smuzhiyun mtd->flags = MTD_CAP_NORFLASH;
227*4882a593Smuzhiyun mtd->size = fi->size;
228*4882a593Smuzhiyun mtd->writesize = 1;
229*4882a593Smuzhiyun mtd->writebufsize = mtd->writesize;
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun mtd->_erase = cfi_mtd_erase;
232*4882a593Smuzhiyun mtd->_read = cfi_mtd_read;
233*4882a593Smuzhiyun mtd->_write = cfi_mtd_write;
234*4882a593Smuzhiyun mtd->_sync = cfi_mtd_sync;
235*4882a593Smuzhiyun mtd->_lock = cfi_mtd_lock;
236*4882a593Smuzhiyun mtd->_unlock = cfi_mtd_unlock;
237*4882a593Smuzhiyun mtd->priv = fi;
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun if (add_mtd_device(mtd))
240*4882a593Smuzhiyun return -ENOMEM;
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun #ifdef CONFIG_MTD_CONCAT
243*4882a593Smuzhiyun mtd_list[devices_found++] = mtd;
244*4882a593Smuzhiyun #endif
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun #ifdef CONFIG_MTD_CONCAT
248*4882a593Smuzhiyun if (devices_found > 1) {
249*4882a593Smuzhiyun /*
250*4882a593Smuzhiyun * We detected multiple devices. Concatenate them together.
251*4882a593Smuzhiyun */
252*4882a593Smuzhiyun sprintf(c_mtd_name, "nor%d", devices_found);
253*4882a593Smuzhiyun mtd = mtd_concat_create(mtd_list, devices_found, c_mtd_name);
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun if (mtd == NULL)
256*4882a593Smuzhiyun return -ENXIO;
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun if (add_mtd_device(mtd))
259*4882a593Smuzhiyun return -ENOMEM;
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun #endif /* CONFIG_MTD_CONCAT */
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun return 0;
264*4882a593Smuzhiyun }
265