1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright © 2009 - Maxim Levitsky
4*4882a593Smuzhiyun * SmartMedia/xD translation layer
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #include <linux/kernel.h>
8*4882a593Smuzhiyun #include <linux/module.h>
9*4882a593Smuzhiyun #include <linux/random.h>
10*4882a593Smuzhiyun #include <linux/hdreg.h>
11*4882a593Smuzhiyun #include <linux/kthread.h>
12*4882a593Smuzhiyun #include <linux/freezer.h>
13*4882a593Smuzhiyun #include <linux/sysfs.h>
14*4882a593Smuzhiyun #include <linux/bitops.h>
15*4882a593Smuzhiyun #include <linux/slab.h>
16*4882a593Smuzhiyun #include <linux/mtd/nand_ecc.h>
17*4882a593Smuzhiyun #include "nand/raw/sm_common.h"
18*4882a593Smuzhiyun #include "sm_ftl.h"
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun static struct workqueue_struct *cache_flush_workqueue;
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun static int cache_timeout = 1000;
25*4882a593Smuzhiyun module_param(cache_timeout, int, S_IRUGO);
26*4882a593Smuzhiyun MODULE_PARM_DESC(cache_timeout,
27*4882a593Smuzhiyun "Timeout (in ms) for cache flush (1000 ms default");
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun static int debug;
30*4882a593Smuzhiyun module_param(debug, int, S_IRUGO | S_IWUSR);
31*4882a593Smuzhiyun MODULE_PARM_DESC(debug, "Debug level (0-2)");
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun /* ------------------- sysfs attributes ---------------------------------- */
35*4882a593Smuzhiyun struct sm_sysfs_attribute {
36*4882a593Smuzhiyun struct device_attribute dev_attr;
37*4882a593Smuzhiyun char *data;
38*4882a593Smuzhiyun int len;
39*4882a593Smuzhiyun };
40*4882a593Smuzhiyun
sm_attr_show(struct device * dev,struct device_attribute * attr,char * buf)41*4882a593Smuzhiyun static ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
42*4882a593Smuzhiyun char *buf)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun struct sm_sysfs_attribute *sm_attr =
45*4882a593Smuzhiyun container_of(attr, struct sm_sysfs_attribute, dev_attr);
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun strncpy(buf, sm_attr->data, sm_attr->len);
48*4882a593Smuzhiyun return sm_attr->len;
49*4882a593Smuzhiyun }
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun #define NUM_ATTRIBUTES 1
53*4882a593Smuzhiyun #define SM_CIS_VENDOR_OFFSET 0x59
sm_create_sysfs_attributes(struct sm_ftl * ftl)54*4882a593Smuzhiyun static struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
55*4882a593Smuzhiyun {
56*4882a593Smuzhiyun struct attribute_group *attr_group;
57*4882a593Smuzhiyun struct attribute **attributes;
58*4882a593Smuzhiyun struct sm_sysfs_attribute *vendor_attribute;
59*4882a593Smuzhiyun char *vendor;
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun vendor = kstrndup(ftl->cis_buffer + SM_CIS_VENDOR_OFFSET,
62*4882a593Smuzhiyun SM_SMALL_PAGE - SM_CIS_VENDOR_OFFSET, GFP_KERNEL);
63*4882a593Smuzhiyun if (!vendor)
64*4882a593Smuzhiyun goto error1;
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun /* Initialize sysfs attributes */
67*4882a593Smuzhiyun vendor_attribute =
68*4882a593Smuzhiyun kzalloc(sizeof(struct sm_sysfs_attribute), GFP_KERNEL);
69*4882a593Smuzhiyun if (!vendor_attribute)
70*4882a593Smuzhiyun goto error2;
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun sysfs_attr_init(&vendor_attribute->dev_attr.attr);
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun vendor_attribute->data = vendor;
75*4882a593Smuzhiyun vendor_attribute->len = strlen(vendor);
76*4882a593Smuzhiyun vendor_attribute->dev_attr.attr.name = "vendor";
77*4882a593Smuzhiyun vendor_attribute->dev_attr.attr.mode = S_IRUGO;
78*4882a593Smuzhiyun vendor_attribute->dev_attr.show = sm_attr_show;
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun /* Create array of pointers to the attributes */
82*4882a593Smuzhiyun attributes = kcalloc(NUM_ATTRIBUTES + 1, sizeof(struct attribute *),
83*4882a593Smuzhiyun GFP_KERNEL);
84*4882a593Smuzhiyun if (!attributes)
85*4882a593Smuzhiyun goto error3;
86*4882a593Smuzhiyun attributes[0] = &vendor_attribute->dev_attr.attr;
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun /* Finally create the attribute group */
89*4882a593Smuzhiyun attr_group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL);
90*4882a593Smuzhiyun if (!attr_group)
91*4882a593Smuzhiyun goto error4;
92*4882a593Smuzhiyun attr_group->attrs = attributes;
93*4882a593Smuzhiyun return attr_group;
94*4882a593Smuzhiyun error4:
95*4882a593Smuzhiyun kfree(attributes);
96*4882a593Smuzhiyun error3:
97*4882a593Smuzhiyun kfree(vendor_attribute);
98*4882a593Smuzhiyun error2:
99*4882a593Smuzhiyun kfree(vendor);
100*4882a593Smuzhiyun error1:
101*4882a593Smuzhiyun return NULL;
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun
sm_delete_sysfs_attributes(struct sm_ftl * ftl)104*4882a593Smuzhiyun static void sm_delete_sysfs_attributes(struct sm_ftl *ftl)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun struct attribute **attributes = ftl->disk_attributes->attrs;
107*4882a593Smuzhiyun int i;
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun for (i = 0; attributes[i] ; i++) {
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun struct device_attribute *dev_attr = container_of(attributes[i],
112*4882a593Smuzhiyun struct device_attribute, attr);
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun struct sm_sysfs_attribute *sm_attr =
115*4882a593Smuzhiyun container_of(dev_attr,
116*4882a593Smuzhiyun struct sm_sysfs_attribute, dev_attr);
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun kfree(sm_attr->data);
119*4882a593Smuzhiyun kfree(sm_attr);
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun kfree(ftl->disk_attributes->attrs);
123*4882a593Smuzhiyun kfree(ftl->disk_attributes);
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun /* ----------------------- oob helpers -------------------------------------- */
128*4882a593Smuzhiyun
sm_get_lba(uint8_t * lba)129*4882a593Smuzhiyun static int sm_get_lba(uint8_t *lba)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun /* check fixed bits */
132*4882a593Smuzhiyun if ((lba[0] & 0xF8) != 0x10)
133*4882a593Smuzhiyun return -2;
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun /* check parity - endianness doesn't matter */
136*4882a593Smuzhiyun if (hweight16(*(uint16_t *)lba) & 1)
137*4882a593Smuzhiyun return -2;
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun return (lba[1] >> 1) | ((lba[0] & 0x07) << 7);
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun /*
144*4882a593Smuzhiyun * Read LBA associated with block
145*4882a593Smuzhiyun * returns -1, if block is erased
146*4882a593Smuzhiyun * returns -2 if error happens
147*4882a593Smuzhiyun */
sm_read_lba(struct sm_oob * oob)148*4882a593Smuzhiyun static int sm_read_lba(struct sm_oob *oob)
149*4882a593Smuzhiyun {
150*4882a593Smuzhiyun static const uint32_t erased_pattern[4] = {
151*4882a593Smuzhiyun 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun uint16_t lba_test;
154*4882a593Smuzhiyun int lba;
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun /* First test for erased block */
157*4882a593Smuzhiyun if (!memcmp(oob, erased_pattern, SM_OOB_SIZE))
158*4882a593Smuzhiyun return -1;
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun /* Now check is both copies of the LBA differ too much */
161*4882a593Smuzhiyun lba_test = *(uint16_t *)oob->lba_copy1 ^ *(uint16_t*)oob->lba_copy2;
162*4882a593Smuzhiyun if (lba_test && !is_power_of_2(lba_test))
163*4882a593Smuzhiyun return -2;
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun /* And read it */
166*4882a593Smuzhiyun lba = sm_get_lba(oob->lba_copy1);
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun if (lba == -2)
169*4882a593Smuzhiyun lba = sm_get_lba(oob->lba_copy2);
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun return lba;
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun
sm_write_lba(struct sm_oob * oob,uint16_t lba)174*4882a593Smuzhiyun static void sm_write_lba(struct sm_oob *oob, uint16_t lba)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun uint8_t tmp[2];
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun WARN_ON(lba >= 1000);
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun tmp[0] = 0x10 | ((lba >> 7) & 0x07);
181*4882a593Smuzhiyun tmp[1] = (lba << 1) & 0xFF;
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun if (hweight16(*(uint16_t *)tmp) & 0x01)
184*4882a593Smuzhiyun tmp[1] |= 1;
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun oob->lba_copy1[0] = oob->lba_copy2[0] = tmp[0];
187*4882a593Smuzhiyun oob->lba_copy1[1] = oob->lba_copy2[1] = tmp[1];
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun /* Make offset from parts */
sm_mkoffset(struct sm_ftl * ftl,int zone,int block,int boffset)192*4882a593Smuzhiyun static loff_t sm_mkoffset(struct sm_ftl *ftl, int zone, int block, int boffset)
193*4882a593Smuzhiyun {
194*4882a593Smuzhiyun WARN_ON(boffset & (SM_SECTOR_SIZE - 1));
195*4882a593Smuzhiyun WARN_ON(zone < 0 || zone >= ftl->zone_count);
196*4882a593Smuzhiyun WARN_ON(block >= ftl->zone_size);
197*4882a593Smuzhiyun WARN_ON(boffset >= ftl->block_size);
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun if (block == -1)
200*4882a593Smuzhiyun return -1;
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun return (zone * SM_MAX_ZONE_SIZE + block) * ftl->block_size + boffset;
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun /* Breaks offset into parts */
sm_break_offset(struct sm_ftl * ftl,loff_t loffset,int * zone,int * block,int * boffset)206*4882a593Smuzhiyun static void sm_break_offset(struct sm_ftl *ftl, loff_t loffset,
207*4882a593Smuzhiyun int *zone, int *block, int *boffset)
208*4882a593Smuzhiyun {
209*4882a593Smuzhiyun u64 offset = loffset;
210*4882a593Smuzhiyun *boffset = do_div(offset, ftl->block_size);
211*4882a593Smuzhiyun *block = do_div(offset, ftl->max_lba);
212*4882a593Smuzhiyun *zone = offset >= ftl->zone_count ? -1 : offset;
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun /* ---------------------- low level IO ------------------------------------- */
216*4882a593Smuzhiyun
sm_correct_sector(uint8_t * buffer,struct sm_oob * oob)217*4882a593Smuzhiyun static int sm_correct_sector(uint8_t *buffer, struct sm_oob *oob)
218*4882a593Smuzhiyun {
219*4882a593Smuzhiyun uint8_t ecc[3];
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun __nand_calculate_ecc(buffer, SM_SMALL_PAGE, ecc,
222*4882a593Smuzhiyun IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC));
223*4882a593Smuzhiyun if (__nand_correct_data(buffer, ecc, oob->ecc1, SM_SMALL_PAGE,
224*4882a593Smuzhiyun IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC)) < 0)
225*4882a593Smuzhiyun return -EIO;
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun buffer += SM_SMALL_PAGE;
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun __nand_calculate_ecc(buffer, SM_SMALL_PAGE, ecc,
230*4882a593Smuzhiyun IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC));
231*4882a593Smuzhiyun if (__nand_correct_data(buffer, ecc, oob->ecc2, SM_SMALL_PAGE,
232*4882a593Smuzhiyun IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC)) < 0)
233*4882a593Smuzhiyun return -EIO;
234*4882a593Smuzhiyun return 0;
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun /* Reads a sector + oob*/
sm_read_sector(struct sm_ftl * ftl,int zone,int block,int boffset,uint8_t * buffer,struct sm_oob * oob)238*4882a593Smuzhiyun static int sm_read_sector(struct sm_ftl *ftl,
239*4882a593Smuzhiyun int zone, int block, int boffset,
240*4882a593Smuzhiyun uint8_t *buffer, struct sm_oob *oob)
241*4882a593Smuzhiyun {
242*4882a593Smuzhiyun struct mtd_info *mtd = ftl->trans->mtd;
243*4882a593Smuzhiyun struct mtd_oob_ops ops;
244*4882a593Smuzhiyun struct sm_oob tmp_oob;
245*4882a593Smuzhiyun int ret = -EIO;
246*4882a593Smuzhiyun int try = 0;
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun /* FTL can contain -1 entries that are by default filled with bits */
249*4882a593Smuzhiyun if (block == -1) {
250*4882a593Smuzhiyun if (buffer)
251*4882a593Smuzhiyun memset(buffer, 0xFF, SM_SECTOR_SIZE);
252*4882a593Smuzhiyun return 0;
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun /* User might not need the oob, but we do for data verification */
256*4882a593Smuzhiyun if (!oob)
257*4882a593Smuzhiyun oob = &tmp_oob;
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun ops.mode = ftl->smallpagenand ? MTD_OPS_RAW : MTD_OPS_PLACE_OOB;
260*4882a593Smuzhiyun ops.ooboffs = 0;
261*4882a593Smuzhiyun ops.ooblen = SM_OOB_SIZE;
262*4882a593Smuzhiyun ops.oobbuf = (void *)oob;
263*4882a593Smuzhiyun ops.len = SM_SECTOR_SIZE;
264*4882a593Smuzhiyun ops.datbuf = buffer;
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun again:
267*4882a593Smuzhiyun if (try++) {
268*4882a593Smuzhiyun /* Avoid infinite recursion on CIS reads, sm_recheck_media
269*4882a593Smuzhiyun won't help anyway */
270*4882a593Smuzhiyun if (zone == 0 && block == ftl->cis_block && boffset ==
271*4882a593Smuzhiyun ftl->cis_boffset)
272*4882a593Smuzhiyun return ret;
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun /* Test if media is stable */
275*4882a593Smuzhiyun if (try == 3 || sm_recheck_media(ftl))
276*4882a593Smuzhiyun return ret;
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun /* Unfortunately, oob read will _always_ succeed,
280*4882a593Smuzhiyun despite card removal..... */
281*4882a593Smuzhiyun ret = mtd_read_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun /* Test for unknown errors */
284*4882a593Smuzhiyun if (ret != 0 && !mtd_is_bitflip_or_eccerr(ret)) {
285*4882a593Smuzhiyun dbg("read of block %d at zone %d, failed due to error (%d)",
286*4882a593Smuzhiyun block, zone, ret);
287*4882a593Smuzhiyun goto again;
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun /* Do a basic test on the oob, to guard against returned garbage */
291*4882a593Smuzhiyun if (oob->reserved != 0xFFFFFFFF && !is_power_of_2(~oob->reserved))
292*4882a593Smuzhiyun goto again;
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun /* This should never happen, unless there is a bug in the mtd driver */
295*4882a593Smuzhiyun WARN_ON(ops.oobretlen != SM_OOB_SIZE);
296*4882a593Smuzhiyun WARN_ON(buffer && ops.retlen != SM_SECTOR_SIZE);
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun if (!buffer)
299*4882a593Smuzhiyun return 0;
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun /* Test if sector marked as bad */
302*4882a593Smuzhiyun if (!sm_sector_valid(oob)) {
303*4882a593Smuzhiyun dbg("read of block %d at zone %d, failed because it is marked"
304*4882a593Smuzhiyun " as bad" , block, zone);
305*4882a593Smuzhiyun goto again;
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun /* Test ECC*/
309*4882a593Smuzhiyun if (mtd_is_eccerr(ret) ||
310*4882a593Smuzhiyun (ftl->smallpagenand && sm_correct_sector(buffer, oob))) {
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun dbg("read of block %d at zone %d, failed due to ECC error",
313*4882a593Smuzhiyun block, zone);
314*4882a593Smuzhiyun goto again;
315*4882a593Smuzhiyun }
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun return 0;
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun /* Writes a sector to media */
sm_write_sector(struct sm_ftl * ftl,int zone,int block,int boffset,uint8_t * buffer,struct sm_oob * oob)321*4882a593Smuzhiyun static int sm_write_sector(struct sm_ftl *ftl,
322*4882a593Smuzhiyun int zone, int block, int boffset,
323*4882a593Smuzhiyun uint8_t *buffer, struct sm_oob *oob)
324*4882a593Smuzhiyun {
325*4882a593Smuzhiyun struct mtd_oob_ops ops;
326*4882a593Smuzhiyun struct mtd_info *mtd = ftl->trans->mtd;
327*4882a593Smuzhiyun int ret;
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun BUG_ON(ftl->readonly);
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun if (zone == 0 && (block == ftl->cis_block || block == 0)) {
332*4882a593Smuzhiyun dbg("attempted to write the CIS!");
333*4882a593Smuzhiyun return -EIO;
334*4882a593Smuzhiyun }
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun if (ftl->unstable)
337*4882a593Smuzhiyun return -EIO;
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun ops.mode = ftl->smallpagenand ? MTD_OPS_RAW : MTD_OPS_PLACE_OOB;
340*4882a593Smuzhiyun ops.len = SM_SECTOR_SIZE;
341*4882a593Smuzhiyun ops.datbuf = buffer;
342*4882a593Smuzhiyun ops.ooboffs = 0;
343*4882a593Smuzhiyun ops.ooblen = SM_OOB_SIZE;
344*4882a593Smuzhiyun ops.oobbuf = (void *)oob;
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun ret = mtd_write_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun /* Now we assume that hardware will catch write bitflip errors */
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun if (ret) {
351*4882a593Smuzhiyun dbg("write to block %d at zone %d, failed with error %d",
352*4882a593Smuzhiyun block, zone, ret);
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun sm_recheck_media(ftl);
355*4882a593Smuzhiyun return ret;
356*4882a593Smuzhiyun }
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun /* This should never happen, unless there is a bug in the driver */
359*4882a593Smuzhiyun WARN_ON(ops.oobretlen != SM_OOB_SIZE);
360*4882a593Smuzhiyun WARN_ON(buffer && ops.retlen != SM_SECTOR_SIZE);
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun return 0;
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun /* ------------------------ block IO ------------------------------------- */
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun /* Write a block using data and lba, and invalid sector bitmap */
sm_write_block(struct sm_ftl * ftl,uint8_t * buf,int zone,int block,int lba,unsigned long invalid_bitmap)368*4882a593Smuzhiyun static int sm_write_block(struct sm_ftl *ftl, uint8_t *buf,
369*4882a593Smuzhiyun int zone, int block, int lba,
370*4882a593Smuzhiyun unsigned long invalid_bitmap)
371*4882a593Smuzhiyun {
372*4882a593Smuzhiyun struct sm_oob oob;
373*4882a593Smuzhiyun int boffset;
374*4882a593Smuzhiyun int retry = 0;
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun /* Initialize the oob with requested values */
377*4882a593Smuzhiyun memset(&oob, 0xFF, SM_OOB_SIZE);
378*4882a593Smuzhiyun sm_write_lba(&oob, lba);
379*4882a593Smuzhiyun restart:
380*4882a593Smuzhiyun if (ftl->unstable)
381*4882a593Smuzhiyun return -EIO;
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun for (boffset = 0; boffset < ftl->block_size;
384*4882a593Smuzhiyun boffset += SM_SECTOR_SIZE) {
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun oob.data_status = 0xFF;
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun if (test_bit(boffset / SM_SECTOR_SIZE, &invalid_bitmap)) {
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun sm_printk("sector %d of block at LBA %d of zone %d"
391*4882a593Smuzhiyun " couldn't be read, marking it as invalid",
392*4882a593Smuzhiyun boffset / SM_SECTOR_SIZE, lba, zone);
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun oob.data_status = 0;
395*4882a593Smuzhiyun }
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun if (ftl->smallpagenand) {
398*4882a593Smuzhiyun __nand_calculate_ecc(buf + boffset, SM_SMALL_PAGE,
399*4882a593Smuzhiyun oob.ecc1,
400*4882a593Smuzhiyun IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC));
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun __nand_calculate_ecc(buf + boffset + SM_SMALL_PAGE,
403*4882a593Smuzhiyun SM_SMALL_PAGE, oob.ecc2,
404*4882a593Smuzhiyun IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC));
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun if (!sm_write_sector(ftl, zone, block, boffset,
407*4882a593Smuzhiyun buf + boffset, &oob))
408*4882a593Smuzhiyun continue;
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun if (!retry) {
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun /* If write fails. try to erase the block */
413*4882a593Smuzhiyun /* This is safe, because we never write in blocks
414*4882a593Smuzhiyun that contain valuable data.
415*4882a593Smuzhiyun This is intended to repair block that are marked
416*4882a593Smuzhiyun as erased, but that isn't fully erased*/
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun if (sm_erase_block(ftl, zone, block, 0))
419*4882a593Smuzhiyun return -EIO;
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun retry = 1;
422*4882a593Smuzhiyun goto restart;
423*4882a593Smuzhiyun } else {
424*4882a593Smuzhiyun sm_mark_block_bad(ftl, zone, block);
425*4882a593Smuzhiyun return -EIO;
426*4882a593Smuzhiyun }
427*4882a593Smuzhiyun }
428*4882a593Smuzhiyun return 0;
429*4882a593Smuzhiyun }
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun /* Mark whole block at offset 'offs' as bad. */
sm_mark_block_bad(struct sm_ftl * ftl,int zone,int block)433*4882a593Smuzhiyun static void sm_mark_block_bad(struct sm_ftl *ftl, int zone, int block)
434*4882a593Smuzhiyun {
435*4882a593Smuzhiyun struct sm_oob oob;
436*4882a593Smuzhiyun int boffset;
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun memset(&oob, 0xFF, SM_OOB_SIZE);
439*4882a593Smuzhiyun oob.block_status = 0xF0;
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun if (ftl->unstable)
442*4882a593Smuzhiyun return;
443*4882a593Smuzhiyun
444*4882a593Smuzhiyun if (sm_recheck_media(ftl))
445*4882a593Smuzhiyun return;
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun sm_printk("marking block %d of zone %d as bad", block, zone);
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun /* We aren't checking the return value, because we don't care */
450*4882a593Smuzhiyun /* This also fails on fake xD cards, but I guess these won't expose
451*4882a593Smuzhiyun any bad blocks till fail completely */
452*4882a593Smuzhiyun for (boffset = 0; boffset < ftl->block_size; boffset += SM_SECTOR_SIZE)
453*4882a593Smuzhiyun sm_write_sector(ftl, zone, block, boffset, NULL, &oob);
454*4882a593Smuzhiyun }
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun /*
457*4882a593Smuzhiyun * Erase a block within a zone
458*4882a593Smuzhiyun * If erase succeeds, it updates free block fifo, otherwise marks block as bad
459*4882a593Smuzhiyun */
sm_erase_block(struct sm_ftl * ftl,int zone_num,uint16_t block,int put_free)460*4882a593Smuzhiyun static int sm_erase_block(struct sm_ftl *ftl, int zone_num, uint16_t block,
461*4882a593Smuzhiyun int put_free)
462*4882a593Smuzhiyun {
463*4882a593Smuzhiyun struct ftl_zone *zone = &ftl->zones[zone_num];
464*4882a593Smuzhiyun struct mtd_info *mtd = ftl->trans->mtd;
465*4882a593Smuzhiyun struct erase_info erase;
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun erase.addr = sm_mkoffset(ftl, zone_num, block, 0);
468*4882a593Smuzhiyun erase.len = ftl->block_size;
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun if (ftl->unstable)
471*4882a593Smuzhiyun return -EIO;
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun BUG_ON(ftl->readonly);
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun if (zone_num == 0 && (block == ftl->cis_block || block == 0)) {
476*4882a593Smuzhiyun sm_printk("attempted to erase the CIS!");
477*4882a593Smuzhiyun return -EIO;
478*4882a593Smuzhiyun }
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun if (mtd_erase(mtd, &erase)) {
481*4882a593Smuzhiyun sm_printk("erase of block %d in zone %d failed",
482*4882a593Smuzhiyun block, zone_num);
483*4882a593Smuzhiyun goto error;
484*4882a593Smuzhiyun }
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun if (put_free)
487*4882a593Smuzhiyun kfifo_in(&zone->free_sectors,
488*4882a593Smuzhiyun (const unsigned char *)&block, sizeof(block));
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun return 0;
491*4882a593Smuzhiyun error:
492*4882a593Smuzhiyun sm_mark_block_bad(ftl, zone_num, block);
493*4882a593Smuzhiyun return -EIO;
494*4882a593Smuzhiyun }
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun /* Thoroughly test that block is valid. */
sm_check_block(struct sm_ftl * ftl,int zone,int block)497*4882a593Smuzhiyun static int sm_check_block(struct sm_ftl *ftl, int zone, int block)
498*4882a593Smuzhiyun {
499*4882a593Smuzhiyun int boffset;
500*4882a593Smuzhiyun struct sm_oob oob;
501*4882a593Smuzhiyun int lbas[] = { -3, 0, 0, 0 };
502*4882a593Smuzhiyun int i = 0;
503*4882a593Smuzhiyun int test_lba;
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun /* First just check that block doesn't look fishy */
507*4882a593Smuzhiyun /* Only blocks that are valid or are sliced in two parts, are
508*4882a593Smuzhiyun accepted */
509*4882a593Smuzhiyun for (boffset = 0; boffset < ftl->block_size;
510*4882a593Smuzhiyun boffset += SM_SECTOR_SIZE) {
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun /* This shouldn't happen anyway */
513*4882a593Smuzhiyun if (sm_read_sector(ftl, zone, block, boffset, NULL, &oob))
514*4882a593Smuzhiyun return -2;
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun test_lba = sm_read_lba(&oob);
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun if (lbas[i] != test_lba)
519*4882a593Smuzhiyun lbas[++i] = test_lba;
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun /* If we found three different LBAs, something is fishy */
522*4882a593Smuzhiyun if (i == 3)
523*4882a593Smuzhiyun return -EIO;
524*4882a593Smuzhiyun }
525*4882a593Smuzhiyun
526*4882a593Smuzhiyun /* If the block is sliced (partially erased usually) erase it */
527*4882a593Smuzhiyun if (i == 2) {
528*4882a593Smuzhiyun sm_erase_block(ftl, zone, block, 1);
529*4882a593Smuzhiyun return 1;
530*4882a593Smuzhiyun }
531*4882a593Smuzhiyun
532*4882a593Smuzhiyun return 0;
533*4882a593Smuzhiyun }
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun /* ----------------- media scanning --------------------------------- */
536*4882a593Smuzhiyun static const struct chs_entry chs_table[] = {
537*4882a593Smuzhiyun { 1, 125, 4, 4 },
538*4882a593Smuzhiyun { 2, 125, 4, 8 },
539*4882a593Smuzhiyun { 4, 250, 4, 8 },
540*4882a593Smuzhiyun { 8, 250, 4, 16 },
541*4882a593Smuzhiyun { 16, 500, 4, 16 },
542*4882a593Smuzhiyun { 32, 500, 8, 16 },
543*4882a593Smuzhiyun { 64, 500, 8, 32 },
544*4882a593Smuzhiyun { 128, 500, 16, 32 },
545*4882a593Smuzhiyun { 256, 1000, 16, 32 },
546*4882a593Smuzhiyun { 512, 1015, 32, 63 },
547*4882a593Smuzhiyun { 1024, 985, 33, 63 },
548*4882a593Smuzhiyun { 2048, 985, 33, 63 },
549*4882a593Smuzhiyun { 0 },
550*4882a593Smuzhiyun };
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun
553*4882a593Smuzhiyun static const uint8_t cis_signature[] = {
554*4882a593Smuzhiyun 0x01, 0x03, 0xD9, 0x01, 0xFF, 0x18, 0x02, 0xDF, 0x01, 0x20
555*4882a593Smuzhiyun };
556*4882a593Smuzhiyun /* Find out media parameters.
557*4882a593Smuzhiyun * This ideally has to be based on nand id, but for now device size is enough */
sm_get_media_info(struct sm_ftl * ftl,struct mtd_info * mtd)558*4882a593Smuzhiyun static int sm_get_media_info(struct sm_ftl *ftl, struct mtd_info *mtd)
559*4882a593Smuzhiyun {
560*4882a593Smuzhiyun int i;
561*4882a593Smuzhiyun int size_in_megs = mtd->size / (1024 * 1024);
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun ftl->readonly = mtd->type == MTD_ROM;
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun /* Manual settings for very old devices */
566*4882a593Smuzhiyun ftl->zone_count = 1;
567*4882a593Smuzhiyun ftl->smallpagenand = 0;
568*4882a593Smuzhiyun
569*4882a593Smuzhiyun switch (size_in_megs) {
570*4882a593Smuzhiyun case 1:
571*4882a593Smuzhiyun /* 1 MiB flash/rom SmartMedia card (256 byte pages)*/
572*4882a593Smuzhiyun ftl->zone_size = 256;
573*4882a593Smuzhiyun ftl->max_lba = 250;
574*4882a593Smuzhiyun ftl->block_size = 8 * SM_SECTOR_SIZE;
575*4882a593Smuzhiyun ftl->smallpagenand = 1;
576*4882a593Smuzhiyun
577*4882a593Smuzhiyun break;
578*4882a593Smuzhiyun case 2:
579*4882a593Smuzhiyun /* 2 MiB flash SmartMedia (256 byte pages)*/
580*4882a593Smuzhiyun if (mtd->writesize == SM_SMALL_PAGE) {
581*4882a593Smuzhiyun ftl->zone_size = 512;
582*4882a593Smuzhiyun ftl->max_lba = 500;
583*4882a593Smuzhiyun ftl->block_size = 8 * SM_SECTOR_SIZE;
584*4882a593Smuzhiyun ftl->smallpagenand = 1;
585*4882a593Smuzhiyun /* 2 MiB rom SmartMedia */
586*4882a593Smuzhiyun } else {
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun if (!ftl->readonly)
589*4882a593Smuzhiyun return -ENODEV;
590*4882a593Smuzhiyun
591*4882a593Smuzhiyun ftl->zone_size = 256;
592*4882a593Smuzhiyun ftl->max_lba = 250;
593*4882a593Smuzhiyun ftl->block_size = 16 * SM_SECTOR_SIZE;
594*4882a593Smuzhiyun }
595*4882a593Smuzhiyun break;
596*4882a593Smuzhiyun case 4:
597*4882a593Smuzhiyun /* 4 MiB flash/rom SmartMedia device */
598*4882a593Smuzhiyun ftl->zone_size = 512;
599*4882a593Smuzhiyun ftl->max_lba = 500;
600*4882a593Smuzhiyun ftl->block_size = 16 * SM_SECTOR_SIZE;
601*4882a593Smuzhiyun break;
602*4882a593Smuzhiyun case 8:
603*4882a593Smuzhiyun /* 8 MiB flash/rom SmartMedia device */
604*4882a593Smuzhiyun ftl->zone_size = 1024;
605*4882a593Smuzhiyun ftl->max_lba = 1000;
606*4882a593Smuzhiyun ftl->block_size = 16 * SM_SECTOR_SIZE;
607*4882a593Smuzhiyun }
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun /* Minimum xD size is 16MiB. Also, all xD cards have standard zone
610*4882a593Smuzhiyun sizes. SmartMedia cards exist up to 128 MiB and have same layout*/
611*4882a593Smuzhiyun if (size_in_megs >= 16) {
612*4882a593Smuzhiyun ftl->zone_count = size_in_megs / 16;
613*4882a593Smuzhiyun ftl->zone_size = 1024;
614*4882a593Smuzhiyun ftl->max_lba = 1000;
615*4882a593Smuzhiyun ftl->block_size = 32 * SM_SECTOR_SIZE;
616*4882a593Smuzhiyun }
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun /* Test for proper write,erase and oob sizes */
619*4882a593Smuzhiyun if (mtd->erasesize > ftl->block_size)
620*4882a593Smuzhiyun return -ENODEV;
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun if (mtd->writesize > SM_SECTOR_SIZE)
623*4882a593Smuzhiyun return -ENODEV;
624*4882a593Smuzhiyun
625*4882a593Smuzhiyun if (ftl->smallpagenand && mtd->oobsize < SM_SMALL_OOB_SIZE)
626*4882a593Smuzhiyun return -ENODEV;
627*4882a593Smuzhiyun
628*4882a593Smuzhiyun if (!ftl->smallpagenand && mtd->oobsize < SM_OOB_SIZE)
629*4882a593Smuzhiyun return -ENODEV;
630*4882a593Smuzhiyun
631*4882a593Smuzhiyun /* We use OOB */
632*4882a593Smuzhiyun if (!mtd_has_oob(mtd))
633*4882a593Smuzhiyun return -ENODEV;
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun /* Find geometry information */
636*4882a593Smuzhiyun for (i = 0 ; i < ARRAY_SIZE(chs_table) ; i++) {
637*4882a593Smuzhiyun if (chs_table[i].size == size_in_megs) {
638*4882a593Smuzhiyun ftl->cylinders = chs_table[i].cyl;
639*4882a593Smuzhiyun ftl->heads = chs_table[i].head;
640*4882a593Smuzhiyun ftl->sectors = chs_table[i].sec;
641*4882a593Smuzhiyun return 0;
642*4882a593Smuzhiyun }
643*4882a593Smuzhiyun }
644*4882a593Smuzhiyun
645*4882a593Smuzhiyun sm_printk("media has unknown size : %dMiB", size_in_megs);
646*4882a593Smuzhiyun ftl->cylinders = 985;
647*4882a593Smuzhiyun ftl->heads = 33;
648*4882a593Smuzhiyun ftl->sectors = 63;
649*4882a593Smuzhiyun return 0;
650*4882a593Smuzhiyun }
651*4882a593Smuzhiyun
652*4882a593Smuzhiyun /* Validate the CIS */
sm_read_cis(struct sm_ftl * ftl)653*4882a593Smuzhiyun static int sm_read_cis(struct sm_ftl *ftl)
654*4882a593Smuzhiyun {
655*4882a593Smuzhiyun struct sm_oob oob;
656*4882a593Smuzhiyun
657*4882a593Smuzhiyun if (sm_read_sector(ftl,
658*4882a593Smuzhiyun 0, ftl->cis_block, ftl->cis_boffset, ftl->cis_buffer, &oob))
659*4882a593Smuzhiyun return -EIO;
660*4882a593Smuzhiyun
661*4882a593Smuzhiyun if (!sm_sector_valid(&oob) || !sm_block_valid(&oob))
662*4882a593Smuzhiyun return -EIO;
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun if (!memcmp(ftl->cis_buffer + ftl->cis_page_offset,
665*4882a593Smuzhiyun cis_signature, sizeof(cis_signature))) {
666*4882a593Smuzhiyun return 0;
667*4882a593Smuzhiyun }
668*4882a593Smuzhiyun
669*4882a593Smuzhiyun return -EIO;
670*4882a593Smuzhiyun }
671*4882a593Smuzhiyun
672*4882a593Smuzhiyun /* Scan the media for the CIS */
sm_find_cis(struct sm_ftl * ftl)673*4882a593Smuzhiyun static int sm_find_cis(struct sm_ftl *ftl)
674*4882a593Smuzhiyun {
675*4882a593Smuzhiyun struct sm_oob oob;
676*4882a593Smuzhiyun int block, boffset;
677*4882a593Smuzhiyun int block_found = 0;
678*4882a593Smuzhiyun int cis_found = 0;
679*4882a593Smuzhiyun
680*4882a593Smuzhiyun /* Search for first valid block */
681*4882a593Smuzhiyun for (block = 0 ; block < ftl->zone_size - ftl->max_lba ; block++) {
682*4882a593Smuzhiyun
683*4882a593Smuzhiyun if (sm_read_sector(ftl, 0, block, 0, NULL, &oob))
684*4882a593Smuzhiyun continue;
685*4882a593Smuzhiyun
686*4882a593Smuzhiyun if (!sm_block_valid(&oob))
687*4882a593Smuzhiyun continue;
688*4882a593Smuzhiyun block_found = 1;
689*4882a593Smuzhiyun break;
690*4882a593Smuzhiyun }
691*4882a593Smuzhiyun
692*4882a593Smuzhiyun if (!block_found)
693*4882a593Smuzhiyun return -EIO;
694*4882a593Smuzhiyun
695*4882a593Smuzhiyun /* Search for first valid sector in this block */
696*4882a593Smuzhiyun for (boffset = 0 ; boffset < ftl->block_size;
697*4882a593Smuzhiyun boffset += SM_SECTOR_SIZE) {
698*4882a593Smuzhiyun
699*4882a593Smuzhiyun if (sm_read_sector(ftl, 0, block, boffset, NULL, &oob))
700*4882a593Smuzhiyun continue;
701*4882a593Smuzhiyun
702*4882a593Smuzhiyun if (!sm_sector_valid(&oob))
703*4882a593Smuzhiyun continue;
704*4882a593Smuzhiyun break;
705*4882a593Smuzhiyun }
706*4882a593Smuzhiyun
707*4882a593Smuzhiyun if (boffset == ftl->block_size)
708*4882a593Smuzhiyun return -EIO;
709*4882a593Smuzhiyun
710*4882a593Smuzhiyun ftl->cis_block = block;
711*4882a593Smuzhiyun ftl->cis_boffset = boffset;
712*4882a593Smuzhiyun ftl->cis_page_offset = 0;
713*4882a593Smuzhiyun
714*4882a593Smuzhiyun cis_found = !sm_read_cis(ftl);
715*4882a593Smuzhiyun
716*4882a593Smuzhiyun if (!cis_found) {
717*4882a593Smuzhiyun ftl->cis_page_offset = SM_SMALL_PAGE;
718*4882a593Smuzhiyun cis_found = !sm_read_cis(ftl);
719*4882a593Smuzhiyun }
720*4882a593Smuzhiyun
721*4882a593Smuzhiyun if (cis_found) {
722*4882a593Smuzhiyun dbg("CIS block found at offset %x",
723*4882a593Smuzhiyun block * ftl->block_size +
724*4882a593Smuzhiyun boffset + ftl->cis_page_offset);
725*4882a593Smuzhiyun return 0;
726*4882a593Smuzhiyun }
727*4882a593Smuzhiyun return -EIO;
728*4882a593Smuzhiyun }
729*4882a593Smuzhiyun
730*4882a593Smuzhiyun /* Basic test to determine if underlying mtd device if functional */
sm_recheck_media(struct sm_ftl * ftl)731*4882a593Smuzhiyun static int sm_recheck_media(struct sm_ftl *ftl)
732*4882a593Smuzhiyun {
733*4882a593Smuzhiyun if (sm_read_cis(ftl)) {
734*4882a593Smuzhiyun
735*4882a593Smuzhiyun if (!ftl->unstable) {
736*4882a593Smuzhiyun sm_printk("media unstable, not allowing writes");
737*4882a593Smuzhiyun ftl->unstable = 1;
738*4882a593Smuzhiyun }
739*4882a593Smuzhiyun return -EIO;
740*4882a593Smuzhiyun }
741*4882a593Smuzhiyun return 0;
742*4882a593Smuzhiyun }
743*4882a593Smuzhiyun
744*4882a593Smuzhiyun /* Initialize a FTL zone */
sm_init_zone(struct sm_ftl * ftl,int zone_num)745*4882a593Smuzhiyun static int sm_init_zone(struct sm_ftl *ftl, int zone_num)
746*4882a593Smuzhiyun {
747*4882a593Smuzhiyun struct ftl_zone *zone = &ftl->zones[zone_num];
748*4882a593Smuzhiyun struct sm_oob oob;
749*4882a593Smuzhiyun uint16_t block;
750*4882a593Smuzhiyun int lba;
751*4882a593Smuzhiyun int i = 0;
752*4882a593Smuzhiyun int len;
753*4882a593Smuzhiyun
754*4882a593Smuzhiyun dbg("initializing zone %d", zone_num);
755*4882a593Smuzhiyun
756*4882a593Smuzhiyun /* Allocate memory for FTL table */
757*4882a593Smuzhiyun zone->lba_to_phys_table = kmalloc_array(ftl->max_lba, 2, GFP_KERNEL);
758*4882a593Smuzhiyun
759*4882a593Smuzhiyun if (!zone->lba_to_phys_table)
760*4882a593Smuzhiyun return -ENOMEM;
761*4882a593Smuzhiyun memset(zone->lba_to_phys_table, -1, ftl->max_lba * 2);
762*4882a593Smuzhiyun
763*4882a593Smuzhiyun
764*4882a593Smuzhiyun /* Allocate memory for free sectors FIFO */
765*4882a593Smuzhiyun if (kfifo_alloc(&zone->free_sectors, ftl->zone_size * 2, GFP_KERNEL)) {
766*4882a593Smuzhiyun kfree(zone->lba_to_phys_table);
767*4882a593Smuzhiyun return -ENOMEM;
768*4882a593Smuzhiyun }
769*4882a593Smuzhiyun
770*4882a593Smuzhiyun /* Now scan the zone */
771*4882a593Smuzhiyun for (block = 0 ; block < ftl->zone_size ; block++) {
772*4882a593Smuzhiyun
773*4882a593Smuzhiyun /* Skip blocks till the CIS (including) */
774*4882a593Smuzhiyun if (zone_num == 0 && block <= ftl->cis_block)
775*4882a593Smuzhiyun continue;
776*4882a593Smuzhiyun
777*4882a593Smuzhiyun /* Read the oob of first sector */
778*4882a593Smuzhiyun if (sm_read_sector(ftl, zone_num, block, 0, NULL, &oob)) {
779*4882a593Smuzhiyun kfifo_free(&zone->free_sectors);
780*4882a593Smuzhiyun kfree(zone->lba_to_phys_table);
781*4882a593Smuzhiyun return -EIO;
782*4882a593Smuzhiyun }
783*4882a593Smuzhiyun
784*4882a593Smuzhiyun /* Test to see if block is erased. It is enough to test
785*4882a593Smuzhiyun first sector, because erase happens in one shot */
786*4882a593Smuzhiyun if (sm_block_erased(&oob)) {
787*4882a593Smuzhiyun kfifo_in(&zone->free_sectors,
788*4882a593Smuzhiyun (unsigned char *)&block, 2);
789*4882a593Smuzhiyun continue;
790*4882a593Smuzhiyun }
791*4882a593Smuzhiyun
792*4882a593Smuzhiyun /* If block is marked as bad, skip it */
793*4882a593Smuzhiyun /* This assumes we can trust first sector*/
794*4882a593Smuzhiyun /* However the way the block valid status is defined, ensures
795*4882a593Smuzhiyun very low probability of failure here */
796*4882a593Smuzhiyun if (!sm_block_valid(&oob)) {
797*4882a593Smuzhiyun dbg("PH %04d <-> <marked bad>", block);
798*4882a593Smuzhiyun continue;
799*4882a593Smuzhiyun }
800*4882a593Smuzhiyun
801*4882a593Smuzhiyun
802*4882a593Smuzhiyun lba = sm_read_lba(&oob);
803*4882a593Smuzhiyun
804*4882a593Smuzhiyun /* Invalid LBA means that block is damaged. */
805*4882a593Smuzhiyun /* We can try to erase it, or mark it as bad, but
806*4882a593Smuzhiyun lets leave that to recovery application */
807*4882a593Smuzhiyun if (lba == -2 || lba >= ftl->max_lba) {
808*4882a593Smuzhiyun dbg("PH %04d <-> LBA %04d(bad)", block, lba);
809*4882a593Smuzhiyun continue;
810*4882a593Smuzhiyun }
811*4882a593Smuzhiyun
812*4882a593Smuzhiyun
813*4882a593Smuzhiyun /* If there is no collision,
814*4882a593Smuzhiyun just put the sector in the FTL table */
815*4882a593Smuzhiyun if (zone->lba_to_phys_table[lba] < 0) {
816*4882a593Smuzhiyun dbg_verbose("PH %04d <-> LBA %04d", block, lba);
817*4882a593Smuzhiyun zone->lba_to_phys_table[lba] = block;
818*4882a593Smuzhiyun continue;
819*4882a593Smuzhiyun }
820*4882a593Smuzhiyun
821*4882a593Smuzhiyun sm_printk("collision"
822*4882a593Smuzhiyun " of LBA %d between blocks %d and %d in zone %d",
823*4882a593Smuzhiyun lba, zone->lba_to_phys_table[lba], block, zone_num);
824*4882a593Smuzhiyun
825*4882a593Smuzhiyun /* Test that this block is valid*/
826*4882a593Smuzhiyun if (sm_check_block(ftl, zone_num, block))
827*4882a593Smuzhiyun continue;
828*4882a593Smuzhiyun
829*4882a593Smuzhiyun /* Test now the old block */
830*4882a593Smuzhiyun if (sm_check_block(ftl, zone_num,
831*4882a593Smuzhiyun zone->lba_to_phys_table[lba])) {
832*4882a593Smuzhiyun zone->lba_to_phys_table[lba] = block;
833*4882a593Smuzhiyun continue;
834*4882a593Smuzhiyun }
835*4882a593Smuzhiyun
836*4882a593Smuzhiyun /* If both blocks are valid and share same LBA, it means that
837*4882a593Smuzhiyun they hold different versions of same data. It not
838*4882a593Smuzhiyun known which is more recent, thus just erase one of them
839*4882a593Smuzhiyun */
840*4882a593Smuzhiyun sm_printk("both blocks are valid, erasing the later");
841*4882a593Smuzhiyun sm_erase_block(ftl, zone_num, block, 1);
842*4882a593Smuzhiyun }
843*4882a593Smuzhiyun
844*4882a593Smuzhiyun dbg("zone initialized");
845*4882a593Smuzhiyun zone->initialized = 1;
846*4882a593Smuzhiyun
847*4882a593Smuzhiyun /* No free sectors, means that the zone is heavily damaged, write won't
848*4882a593Smuzhiyun work, but it can still can be (partially) read */
849*4882a593Smuzhiyun if (!kfifo_len(&zone->free_sectors)) {
850*4882a593Smuzhiyun sm_printk("no free blocks in zone %d", zone_num);
851*4882a593Smuzhiyun return 0;
852*4882a593Smuzhiyun }
853*4882a593Smuzhiyun
854*4882a593Smuzhiyun /* Randomize first block we write to */
855*4882a593Smuzhiyun get_random_bytes(&i, 2);
856*4882a593Smuzhiyun i %= (kfifo_len(&zone->free_sectors) / 2);
857*4882a593Smuzhiyun
858*4882a593Smuzhiyun while (i--) {
859*4882a593Smuzhiyun len = kfifo_out(&zone->free_sectors,
860*4882a593Smuzhiyun (unsigned char *)&block, 2);
861*4882a593Smuzhiyun WARN_ON(len != 2);
862*4882a593Smuzhiyun kfifo_in(&zone->free_sectors, (const unsigned char *)&block, 2);
863*4882a593Smuzhiyun }
864*4882a593Smuzhiyun return 0;
865*4882a593Smuzhiyun }
866*4882a593Smuzhiyun
867*4882a593Smuzhiyun /* Get and automatically initialize an FTL mapping for one zone */
sm_get_zone(struct sm_ftl * ftl,int zone_num)868*4882a593Smuzhiyun static struct ftl_zone *sm_get_zone(struct sm_ftl *ftl, int zone_num)
869*4882a593Smuzhiyun {
870*4882a593Smuzhiyun struct ftl_zone *zone;
871*4882a593Smuzhiyun int error;
872*4882a593Smuzhiyun
873*4882a593Smuzhiyun BUG_ON(zone_num >= ftl->zone_count);
874*4882a593Smuzhiyun zone = &ftl->zones[zone_num];
875*4882a593Smuzhiyun
876*4882a593Smuzhiyun if (!zone->initialized) {
877*4882a593Smuzhiyun error = sm_init_zone(ftl, zone_num);
878*4882a593Smuzhiyun
879*4882a593Smuzhiyun if (error)
880*4882a593Smuzhiyun return ERR_PTR(error);
881*4882a593Smuzhiyun }
882*4882a593Smuzhiyun return zone;
883*4882a593Smuzhiyun }
884*4882a593Smuzhiyun
885*4882a593Smuzhiyun
886*4882a593Smuzhiyun /* ----------------- cache handling ------------------------------------------*/
887*4882a593Smuzhiyun
888*4882a593Smuzhiyun /* Initialize the one block cache */
sm_cache_init(struct sm_ftl * ftl)889*4882a593Smuzhiyun static void sm_cache_init(struct sm_ftl *ftl)
890*4882a593Smuzhiyun {
891*4882a593Smuzhiyun ftl->cache_data_invalid_bitmap = 0xFFFFFFFF;
892*4882a593Smuzhiyun ftl->cache_clean = 1;
893*4882a593Smuzhiyun ftl->cache_zone = -1;
894*4882a593Smuzhiyun ftl->cache_block = -1;
895*4882a593Smuzhiyun /*memset(ftl->cache_data, 0xAA, ftl->block_size);*/
896*4882a593Smuzhiyun }
897*4882a593Smuzhiyun
898*4882a593Smuzhiyun /* Put sector in one block cache */
sm_cache_put(struct sm_ftl * ftl,char * buffer,int boffset)899*4882a593Smuzhiyun static void sm_cache_put(struct sm_ftl *ftl, char *buffer, int boffset)
900*4882a593Smuzhiyun {
901*4882a593Smuzhiyun memcpy(ftl->cache_data + boffset, buffer, SM_SECTOR_SIZE);
902*4882a593Smuzhiyun clear_bit(boffset / SM_SECTOR_SIZE, &ftl->cache_data_invalid_bitmap);
903*4882a593Smuzhiyun ftl->cache_clean = 0;
904*4882a593Smuzhiyun }
905*4882a593Smuzhiyun
906*4882a593Smuzhiyun /* Read a sector from the cache */
sm_cache_get(struct sm_ftl * ftl,char * buffer,int boffset)907*4882a593Smuzhiyun static int sm_cache_get(struct sm_ftl *ftl, char *buffer, int boffset)
908*4882a593Smuzhiyun {
909*4882a593Smuzhiyun if (test_bit(boffset / SM_SECTOR_SIZE,
910*4882a593Smuzhiyun &ftl->cache_data_invalid_bitmap))
911*4882a593Smuzhiyun return -1;
912*4882a593Smuzhiyun
913*4882a593Smuzhiyun memcpy(buffer, ftl->cache_data + boffset, SM_SECTOR_SIZE);
914*4882a593Smuzhiyun return 0;
915*4882a593Smuzhiyun }
916*4882a593Smuzhiyun
917*4882a593Smuzhiyun /* Write the cache to hardware */
sm_cache_flush(struct sm_ftl * ftl)918*4882a593Smuzhiyun static int sm_cache_flush(struct sm_ftl *ftl)
919*4882a593Smuzhiyun {
920*4882a593Smuzhiyun struct ftl_zone *zone;
921*4882a593Smuzhiyun
922*4882a593Smuzhiyun int sector_num;
923*4882a593Smuzhiyun uint16_t write_sector;
924*4882a593Smuzhiyun int zone_num = ftl->cache_zone;
925*4882a593Smuzhiyun int block_num;
926*4882a593Smuzhiyun
927*4882a593Smuzhiyun if (ftl->cache_clean)
928*4882a593Smuzhiyun return 0;
929*4882a593Smuzhiyun
930*4882a593Smuzhiyun if (ftl->unstable)
931*4882a593Smuzhiyun return -EIO;
932*4882a593Smuzhiyun
933*4882a593Smuzhiyun BUG_ON(zone_num < 0);
934*4882a593Smuzhiyun zone = &ftl->zones[zone_num];
935*4882a593Smuzhiyun block_num = zone->lba_to_phys_table[ftl->cache_block];
936*4882a593Smuzhiyun
937*4882a593Smuzhiyun
938*4882a593Smuzhiyun /* Try to read all unread areas of the cache block*/
939*4882a593Smuzhiyun for_each_set_bit(sector_num, &ftl->cache_data_invalid_bitmap,
940*4882a593Smuzhiyun ftl->block_size / SM_SECTOR_SIZE) {
941*4882a593Smuzhiyun
942*4882a593Smuzhiyun if (!sm_read_sector(ftl,
943*4882a593Smuzhiyun zone_num, block_num, sector_num * SM_SECTOR_SIZE,
944*4882a593Smuzhiyun ftl->cache_data + sector_num * SM_SECTOR_SIZE, NULL))
945*4882a593Smuzhiyun clear_bit(sector_num,
946*4882a593Smuzhiyun &ftl->cache_data_invalid_bitmap);
947*4882a593Smuzhiyun }
948*4882a593Smuzhiyun restart:
949*4882a593Smuzhiyun
950*4882a593Smuzhiyun if (ftl->unstable)
951*4882a593Smuzhiyun return -EIO;
952*4882a593Smuzhiyun
953*4882a593Smuzhiyun /* If there are no spare blocks, */
954*4882a593Smuzhiyun /* we could still continue by erasing/writing the current block,
955*4882a593Smuzhiyun but for such worn out media it doesn't worth the trouble,
956*4882a593Smuzhiyun and the dangers */
957*4882a593Smuzhiyun if (kfifo_out(&zone->free_sectors,
958*4882a593Smuzhiyun (unsigned char *)&write_sector, 2) != 2) {
959*4882a593Smuzhiyun dbg("no free sectors for write!");
960*4882a593Smuzhiyun return -EIO;
961*4882a593Smuzhiyun }
962*4882a593Smuzhiyun
963*4882a593Smuzhiyun
964*4882a593Smuzhiyun if (sm_write_block(ftl, ftl->cache_data, zone_num, write_sector,
965*4882a593Smuzhiyun ftl->cache_block, ftl->cache_data_invalid_bitmap))
966*4882a593Smuzhiyun goto restart;
967*4882a593Smuzhiyun
968*4882a593Smuzhiyun /* Update the FTL table */
969*4882a593Smuzhiyun zone->lba_to_phys_table[ftl->cache_block] = write_sector;
970*4882a593Smuzhiyun
971*4882a593Smuzhiyun /* Write succesfull, so erase and free the old block */
972*4882a593Smuzhiyun if (block_num > 0)
973*4882a593Smuzhiyun sm_erase_block(ftl, zone_num, block_num, 1);
974*4882a593Smuzhiyun
975*4882a593Smuzhiyun sm_cache_init(ftl);
976*4882a593Smuzhiyun return 0;
977*4882a593Smuzhiyun }
978*4882a593Smuzhiyun
979*4882a593Smuzhiyun
980*4882a593Smuzhiyun /* flush timer, runs a second after last write */
sm_cache_flush_timer(struct timer_list * t)981*4882a593Smuzhiyun static void sm_cache_flush_timer(struct timer_list *t)
982*4882a593Smuzhiyun {
983*4882a593Smuzhiyun struct sm_ftl *ftl = from_timer(ftl, t, timer);
984*4882a593Smuzhiyun queue_work(cache_flush_workqueue, &ftl->flush_work);
985*4882a593Smuzhiyun }
986*4882a593Smuzhiyun
987*4882a593Smuzhiyun /* cache flush work, kicked by timer */
sm_cache_flush_work(struct work_struct * work)988*4882a593Smuzhiyun static void sm_cache_flush_work(struct work_struct *work)
989*4882a593Smuzhiyun {
990*4882a593Smuzhiyun struct sm_ftl *ftl = container_of(work, struct sm_ftl, flush_work);
991*4882a593Smuzhiyun mutex_lock(&ftl->mutex);
992*4882a593Smuzhiyun sm_cache_flush(ftl);
993*4882a593Smuzhiyun mutex_unlock(&ftl->mutex);
994*4882a593Smuzhiyun return;
995*4882a593Smuzhiyun }
996*4882a593Smuzhiyun
997*4882a593Smuzhiyun /* ---------------- outside interface -------------------------------------- */
998*4882a593Smuzhiyun
999*4882a593Smuzhiyun /* outside interface: read a sector */
sm_read(struct mtd_blktrans_dev * dev,unsigned long sect_no,char * buf)1000*4882a593Smuzhiyun static int sm_read(struct mtd_blktrans_dev *dev,
1001*4882a593Smuzhiyun unsigned long sect_no, char *buf)
1002*4882a593Smuzhiyun {
1003*4882a593Smuzhiyun struct sm_ftl *ftl = dev->priv;
1004*4882a593Smuzhiyun struct ftl_zone *zone;
1005*4882a593Smuzhiyun int error = 0, in_cache = 0;
1006*4882a593Smuzhiyun int zone_num, block, boffset;
1007*4882a593Smuzhiyun
1008*4882a593Smuzhiyun sm_break_offset(ftl, sect_no << 9, &zone_num, &block, &boffset);
1009*4882a593Smuzhiyun mutex_lock(&ftl->mutex);
1010*4882a593Smuzhiyun
1011*4882a593Smuzhiyun
1012*4882a593Smuzhiyun zone = sm_get_zone(ftl, zone_num);
1013*4882a593Smuzhiyun if (IS_ERR(zone)) {
1014*4882a593Smuzhiyun error = PTR_ERR(zone);
1015*4882a593Smuzhiyun goto unlock;
1016*4882a593Smuzhiyun }
1017*4882a593Smuzhiyun
1018*4882a593Smuzhiyun /* Have to look at cache first */
1019*4882a593Smuzhiyun if (ftl->cache_zone == zone_num && ftl->cache_block == block) {
1020*4882a593Smuzhiyun in_cache = 1;
1021*4882a593Smuzhiyun if (!sm_cache_get(ftl, buf, boffset))
1022*4882a593Smuzhiyun goto unlock;
1023*4882a593Smuzhiyun }
1024*4882a593Smuzhiyun
1025*4882a593Smuzhiyun /* Translate the block and return if doesn't exist in the table */
1026*4882a593Smuzhiyun block = zone->lba_to_phys_table[block];
1027*4882a593Smuzhiyun
1028*4882a593Smuzhiyun if (block == -1) {
1029*4882a593Smuzhiyun memset(buf, 0xFF, SM_SECTOR_SIZE);
1030*4882a593Smuzhiyun goto unlock;
1031*4882a593Smuzhiyun }
1032*4882a593Smuzhiyun
1033*4882a593Smuzhiyun if (sm_read_sector(ftl, zone_num, block, boffset, buf, NULL)) {
1034*4882a593Smuzhiyun error = -EIO;
1035*4882a593Smuzhiyun goto unlock;
1036*4882a593Smuzhiyun }
1037*4882a593Smuzhiyun
1038*4882a593Smuzhiyun if (in_cache)
1039*4882a593Smuzhiyun sm_cache_put(ftl, buf, boffset);
1040*4882a593Smuzhiyun unlock:
1041*4882a593Smuzhiyun mutex_unlock(&ftl->mutex);
1042*4882a593Smuzhiyun return error;
1043*4882a593Smuzhiyun }
1044*4882a593Smuzhiyun
1045*4882a593Smuzhiyun /* outside interface: write a sector */
sm_write(struct mtd_blktrans_dev * dev,unsigned long sec_no,char * buf)1046*4882a593Smuzhiyun static int sm_write(struct mtd_blktrans_dev *dev,
1047*4882a593Smuzhiyun unsigned long sec_no, char *buf)
1048*4882a593Smuzhiyun {
1049*4882a593Smuzhiyun struct sm_ftl *ftl = dev->priv;
1050*4882a593Smuzhiyun struct ftl_zone *zone;
1051*4882a593Smuzhiyun int error = 0, zone_num, block, boffset;
1052*4882a593Smuzhiyun
1053*4882a593Smuzhiyun BUG_ON(ftl->readonly);
1054*4882a593Smuzhiyun sm_break_offset(ftl, sec_no << 9, &zone_num, &block, &boffset);
1055*4882a593Smuzhiyun
1056*4882a593Smuzhiyun /* No need in flush thread running now */
1057*4882a593Smuzhiyun del_timer(&ftl->timer);
1058*4882a593Smuzhiyun mutex_lock(&ftl->mutex);
1059*4882a593Smuzhiyun
1060*4882a593Smuzhiyun zone = sm_get_zone(ftl, zone_num);
1061*4882a593Smuzhiyun if (IS_ERR(zone)) {
1062*4882a593Smuzhiyun error = PTR_ERR(zone);
1063*4882a593Smuzhiyun goto unlock;
1064*4882a593Smuzhiyun }
1065*4882a593Smuzhiyun
1066*4882a593Smuzhiyun /* If entry is not in cache, flush it */
1067*4882a593Smuzhiyun if (ftl->cache_block != block || ftl->cache_zone != zone_num) {
1068*4882a593Smuzhiyun
1069*4882a593Smuzhiyun error = sm_cache_flush(ftl);
1070*4882a593Smuzhiyun if (error)
1071*4882a593Smuzhiyun goto unlock;
1072*4882a593Smuzhiyun
1073*4882a593Smuzhiyun ftl->cache_block = block;
1074*4882a593Smuzhiyun ftl->cache_zone = zone_num;
1075*4882a593Smuzhiyun }
1076*4882a593Smuzhiyun
1077*4882a593Smuzhiyun sm_cache_put(ftl, buf, boffset);
1078*4882a593Smuzhiyun unlock:
1079*4882a593Smuzhiyun mod_timer(&ftl->timer, jiffies + msecs_to_jiffies(cache_timeout));
1080*4882a593Smuzhiyun mutex_unlock(&ftl->mutex);
1081*4882a593Smuzhiyun return error;
1082*4882a593Smuzhiyun }
1083*4882a593Smuzhiyun
1084*4882a593Smuzhiyun /* outside interface: flush everything */
sm_flush(struct mtd_blktrans_dev * dev)1085*4882a593Smuzhiyun static int sm_flush(struct mtd_blktrans_dev *dev)
1086*4882a593Smuzhiyun {
1087*4882a593Smuzhiyun struct sm_ftl *ftl = dev->priv;
1088*4882a593Smuzhiyun int retval;
1089*4882a593Smuzhiyun
1090*4882a593Smuzhiyun mutex_lock(&ftl->mutex);
1091*4882a593Smuzhiyun retval = sm_cache_flush(ftl);
1092*4882a593Smuzhiyun mutex_unlock(&ftl->mutex);
1093*4882a593Smuzhiyun return retval;
1094*4882a593Smuzhiyun }
1095*4882a593Smuzhiyun
1096*4882a593Smuzhiyun /* outside interface: device is released */
sm_release(struct mtd_blktrans_dev * dev)1097*4882a593Smuzhiyun static void sm_release(struct mtd_blktrans_dev *dev)
1098*4882a593Smuzhiyun {
1099*4882a593Smuzhiyun struct sm_ftl *ftl = dev->priv;
1100*4882a593Smuzhiyun
1101*4882a593Smuzhiyun del_timer_sync(&ftl->timer);
1102*4882a593Smuzhiyun cancel_work_sync(&ftl->flush_work);
1103*4882a593Smuzhiyun mutex_lock(&ftl->mutex);
1104*4882a593Smuzhiyun sm_cache_flush(ftl);
1105*4882a593Smuzhiyun mutex_unlock(&ftl->mutex);
1106*4882a593Smuzhiyun }
1107*4882a593Smuzhiyun
1108*4882a593Smuzhiyun /* outside interface: get geometry */
sm_getgeo(struct mtd_blktrans_dev * dev,struct hd_geometry * geo)1109*4882a593Smuzhiyun static int sm_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
1110*4882a593Smuzhiyun {
1111*4882a593Smuzhiyun struct sm_ftl *ftl = dev->priv;
1112*4882a593Smuzhiyun geo->heads = ftl->heads;
1113*4882a593Smuzhiyun geo->sectors = ftl->sectors;
1114*4882a593Smuzhiyun geo->cylinders = ftl->cylinders;
1115*4882a593Smuzhiyun return 0;
1116*4882a593Smuzhiyun }
1117*4882a593Smuzhiyun
1118*4882a593Smuzhiyun /* external interface: main initialization function */
sm_add_mtd(struct mtd_blktrans_ops * tr,struct mtd_info * mtd)1119*4882a593Smuzhiyun static void sm_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
1120*4882a593Smuzhiyun {
1121*4882a593Smuzhiyun struct mtd_blktrans_dev *trans;
1122*4882a593Smuzhiyun struct sm_ftl *ftl;
1123*4882a593Smuzhiyun
1124*4882a593Smuzhiyun /* Allocate & initialize our private structure */
1125*4882a593Smuzhiyun ftl = kzalloc(sizeof(struct sm_ftl), GFP_KERNEL);
1126*4882a593Smuzhiyun if (!ftl)
1127*4882a593Smuzhiyun goto error1;
1128*4882a593Smuzhiyun
1129*4882a593Smuzhiyun
1130*4882a593Smuzhiyun mutex_init(&ftl->mutex);
1131*4882a593Smuzhiyun timer_setup(&ftl->timer, sm_cache_flush_timer, 0);
1132*4882a593Smuzhiyun INIT_WORK(&ftl->flush_work, sm_cache_flush_work);
1133*4882a593Smuzhiyun
1134*4882a593Smuzhiyun /* Read media information */
1135*4882a593Smuzhiyun if (sm_get_media_info(ftl, mtd)) {
1136*4882a593Smuzhiyun dbg("found unsupported mtd device, aborting");
1137*4882a593Smuzhiyun goto error2;
1138*4882a593Smuzhiyun }
1139*4882a593Smuzhiyun
1140*4882a593Smuzhiyun
1141*4882a593Smuzhiyun /* Allocate temporary CIS buffer for read retry support */
1142*4882a593Smuzhiyun ftl->cis_buffer = kzalloc(SM_SECTOR_SIZE, GFP_KERNEL);
1143*4882a593Smuzhiyun if (!ftl->cis_buffer)
1144*4882a593Smuzhiyun goto error2;
1145*4882a593Smuzhiyun
1146*4882a593Smuzhiyun /* Allocate zone array, it will be initialized on demand */
1147*4882a593Smuzhiyun ftl->zones = kcalloc(ftl->zone_count, sizeof(struct ftl_zone),
1148*4882a593Smuzhiyun GFP_KERNEL);
1149*4882a593Smuzhiyun if (!ftl->zones)
1150*4882a593Smuzhiyun goto error3;
1151*4882a593Smuzhiyun
1152*4882a593Smuzhiyun /* Allocate the cache*/
1153*4882a593Smuzhiyun ftl->cache_data = kzalloc(ftl->block_size, GFP_KERNEL);
1154*4882a593Smuzhiyun
1155*4882a593Smuzhiyun if (!ftl->cache_data)
1156*4882a593Smuzhiyun goto error4;
1157*4882a593Smuzhiyun
1158*4882a593Smuzhiyun sm_cache_init(ftl);
1159*4882a593Smuzhiyun
1160*4882a593Smuzhiyun
1161*4882a593Smuzhiyun /* Allocate upper layer structure and initialize it */
1162*4882a593Smuzhiyun trans = kzalloc(sizeof(struct mtd_blktrans_dev), GFP_KERNEL);
1163*4882a593Smuzhiyun if (!trans)
1164*4882a593Smuzhiyun goto error5;
1165*4882a593Smuzhiyun
1166*4882a593Smuzhiyun ftl->trans = trans;
1167*4882a593Smuzhiyun trans->priv = ftl;
1168*4882a593Smuzhiyun
1169*4882a593Smuzhiyun trans->tr = tr;
1170*4882a593Smuzhiyun trans->mtd = mtd;
1171*4882a593Smuzhiyun trans->devnum = -1;
1172*4882a593Smuzhiyun trans->size = (ftl->block_size * ftl->max_lba * ftl->zone_count) >> 9;
1173*4882a593Smuzhiyun trans->readonly = ftl->readonly;
1174*4882a593Smuzhiyun
1175*4882a593Smuzhiyun if (sm_find_cis(ftl)) {
1176*4882a593Smuzhiyun dbg("CIS not found on mtd device, aborting");
1177*4882a593Smuzhiyun goto error6;
1178*4882a593Smuzhiyun }
1179*4882a593Smuzhiyun
1180*4882a593Smuzhiyun ftl->disk_attributes = sm_create_sysfs_attributes(ftl);
1181*4882a593Smuzhiyun if (!ftl->disk_attributes)
1182*4882a593Smuzhiyun goto error6;
1183*4882a593Smuzhiyun trans->disk_attributes = ftl->disk_attributes;
1184*4882a593Smuzhiyun
1185*4882a593Smuzhiyun sm_printk("Found %d MiB xD/SmartMedia FTL on mtd%d",
1186*4882a593Smuzhiyun (int)(mtd->size / (1024 * 1024)), mtd->index);
1187*4882a593Smuzhiyun
1188*4882a593Smuzhiyun dbg("FTL layout:");
1189*4882a593Smuzhiyun dbg("%d zone(s), each consists of %d blocks (+%d spares)",
1190*4882a593Smuzhiyun ftl->zone_count, ftl->max_lba,
1191*4882a593Smuzhiyun ftl->zone_size - ftl->max_lba);
1192*4882a593Smuzhiyun dbg("each block consists of %d bytes",
1193*4882a593Smuzhiyun ftl->block_size);
1194*4882a593Smuzhiyun
1195*4882a593Smuzhiyun
1196*4882a593Smuzhiyun /* Register device*/
1197*4882a593Smuzhiyun if (add_mtd_blktrans_dev(trans)) {
1198*4882a593Smuzhiyun dbg("error in mtdblktrans layer");
1199*4882a593Smuzhiyun goto error6;
1200*4882a593Smuzhiyun }
1201*4882a593Smuzhiyun return;
1202*4882a593Smuzhiyun error6:
1203*4882a593Smuzhiyun kfree(trans);
1204*4882a593Smuzhiyun error5:
1205*4882a593Smuzhiyun kfree(ftl->cache_data);
1206*4882a593Smuzhiyun error4:
1207*4882a593Smuzhiyun kfree(ftl->zones);
1208*4882a593Smuzhiyun error3:
1209*4882a593Smuzhiyun kfree(ftl->cis_buffer);
1210*4882a593Smuzhiyun error2:
1211*4882a593Smuzhiyun kfree(ftl);
1212*4882a593Smuzhiyun error1:
1213*4882a593Smuzhiyun return;
1214*4882a593Smuzhiyun }
1215*4882a593Smuzhiyun
1216*4882a593Smuzhiyun /* main interface: device {surprise,} removal */
sm_remove_dev(struct mtd_blktrans_dev * dev)1217*4882a593Smuzhiyun static void sm_remove_dev(struct mtd_blktrans_dev *dev)
1218*4882a593Smuzhiyun {
1219*4882a593Smuzhiyun struct sm_ftl *ftl = dev->priv;
1220*4882a593Smuzhiyun int i;
1221*4882a593Smuzhiyun
1222*4882a593Smuzhiyun del_mtd_blktrans_dev(dev);
1223*4882a593Smuzhiyun ftl->trans = NULL;
1224*4882a593Smuzhiyun
1225*4882a593Smuzhiyun for (i = 0 ; i < ftl->zone_count; i++) {
1226*4882a593Smuzhiyun
1227*4882a593Smuzhiyun if (!ftl->zones[i].initialized)
1228*4882a593Smuzhiyun continue;
1229*4882a593Smuzhiyun
1230*4882a593Smuzhiyun kfree(ftl->zones[i].lba_to_phys_table);
1231*4882a593Smuzhiyun kfifo_free(&ftl->zones[i].free_sectors);
1232*4882a593Smuzhiyun }
1233*4882a593Smuzhiyun
1234*4882a593Smuzhiyun sm_delete_sysfs_attributes(ftl);
1235*4882a593Smuzhiyun kfree(ftl->cis_buffer);
1236*4882a593Smuzhiyun kfree(ftl->zones);
1237*4882a593Smuzhiyun kfree(ftl->cache_data);
1238*4882a593Smuzhiyun kfree(ftl);
1239*4882a593Smuzhiyun }
1240*4882a593Smuzhiyun
1241*4882a593Smuzhiyun static struct mtd_blktrans_ops sm_ftl_ops = {
1242*4882a593Smuzhiyun .name = "smblk",
1243*4882a593Smuzhiyun .major = 0,
1244*4882a593Smuzhiyun .part_bits = SM_FTL_PARTN_BITS,
1245*4882a593Smuzhiyun .blksize = SM_SECTOR_SIZE,
1246*4882a593Smuzhiyun .getgeo = sm_getgeo,
1247*4882a593Smuzhiyun
1248*4882a593Smuzhiyun .add_mtd = sm_add_mtd,
1249*4882a593Smuzhiyun .remove_dev = sm_remove_dev,
1250*4882a593Smuzhiyun
1251*4882a593Smuzhiyun .readsect = sm_read,
1252*4882a593Smuzhiyun .writesect = sm_write,
1253*4882a593Smuzhiyun
1254*4882a593Smuzhiyun .flush = sm_flush,
1255*4882a593Smuzhiyun .release = sm_release,
1256*4882a593Smuzhiyun
1257*4882a593Smuzhiyun .owner = THIS_MODULE,
1258*4882a593Smuzhiyun };
1259*4882a593Smuzhiyun
sm_module_init(void)1260*4882a593Smuzhiyun static __init int sm_module_init(void)
1261*4882a593Smuzhiyun {
1262*4882a593Smuzhiyun int error = 0;
1263*4882a593Smuzhiyun
1264*4882a593Smuzhiyun cache_flush_workqueue = create_freezable_workqueue("smflush");
1265*4882a593Smuzhiyun if (!cache_flush_workqueue)
1266*4882a593Smuzhiyun return -ENOMEM;
1267*4882a593Smuzhiyun
1268*4882a593Smuzhiyun error = register_mtd_blktrans(&sm_ftl_ops);
1269*4882a593Smuzhiyun if (error)
1270*4882a593Smuzhiyun destroy_workqueue(cache_flush_workqueue);
1271*4882a593Smuzhiyun return error;
1272*4882a593Smuzhiyun
1273*4882a593Smuzhiyun }
1274*4882a593Smuzhiyun
sm_module_exit(void)1275*4882a593Smuzhiyun static void __exit sm_module_exit(void)
1276*4882a593Smuzhiyun {
1277*4882a593Smuzhiyun destroy_workqueue(cache_flush_workqueue);
1278*4882a593Smuzhiyun deregister_mtd_blktrans(&sm_ftl_ops);
1279*4882a593Smuzhiyun }
1280*4882a593Smuzhiyun
1281*4882a593Smuzhiyun module_init(sm_module_init);
1282*4882a593Smuzhiyun module_exit(sm_module_exit);
1283*4882a593Smuzhiyun
1284*4882a593Smuzhiyun MODULE_LICENSE("GPL");
1285*4882a593Smuzhiyun MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>");
1286*4882a593Smuzhiyun MODULE_DESCRIPTION("Smartmedia/xD mtd translation layer");
1287