1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * block2mtd.c - create an mtd from a block device
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright (C) 2001,2002 Simon Evans <spse@secret.org.uk>
5*4882a593Smuzhiyun * Copyright (C) 2004-2006 Joern Engel <joern@wh.fh-wedel.de>
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Licence: GPL
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun /*
13*4882a593Smuzhiyun * When the first attempt at device initialization fails, we may need to
14*4882a593Smuzhiyun * wait a little bit and retry. This timeout, by default 3 seconds, gives
15*4882a593Smuzhiyun * device time to start up. Required on BCM2708 and a few other chipsets.
16*4882a593Smuzhiyun */
17*4882a593Smuzhiyun #define MTD_DEFAULT_TIMEOUT 3
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun #include <linux/module.h>
20*4882a593Smuzhiyun #include <linux/delay.h>
21*4882a593Smuzhiyun #include <linux/fs.h>
22*4882a593Smuzhiyun #include <linux/blkdev.h>
23*4882a593Smuzhiyun #include <linux/backing-dev.h>
24*4882a593Smuzhiyun #include <linux/bio.h>
25*4882a593Smuzhiyun #include <linux/pagemap.h>
26*4882a593Smuzhiyun #include <linux/list.h>
27*4882a593Smuzhiyun #include <linux/init.h>
28*4882a593Smuzhiyun #include <linux/mtd/mtd.h>
29*4882a593Smuzhiyun #include <linux/mutex.h>
30*4882a593Smuzhiyun #include <linux/mount.h>
31*4882a593Smuzhiyun #include <linux/slab.h>
32*4882a593Smuzhiyun #include <linux/major.h>
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun /* Info for the block device */
35*4882a593Smuzhiyun struct block2mtd_dev {
36*4882a593Smuzhiyun struct list_head list;
37*4882a593Smuzhiyun struct block_device *blkdev;
38*4882a593Smuzhiyun struct mtd_info mtd;
39*4882a593Smuzhiyun struct mutex write_mutex;
40*4882a593Smuzhiyun };
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun /* Static info about the MTD, used in cleanup_module */
44*4882a593Smuzhiyun static LIST_HEAD(blkmtd_device_list);
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun
page_read(struct address_space * mapping,pgoff_t index)47*4882a593Smuzhiyun static struct page *page_read(struct address_space *mapping, pgoff_t index)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun return read_mapping_page(mapping, index, NULL);
50*4882a593Smuzhiyun }
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun /* erase a specified part of the device */
_block2mtd_erase(struct block2mtd_dev * dev,loff_t to,size_t len)53*4882a593Smuzhiyun static int _block2mtd_erase(struct block2mtd_dev *dev, loff_t to, size_t len)
54*4882a593Smuzhiyun {
55*4882a593Smuzhiyun struct address_space *mapping = dev->blkdev->bd_inode->i_mapping;
56*4882a593Smuzhiyun struct page *page;
57*4882a593Smuzhiyun pgoff_t index = to >> PAGE_SHIFT; // page index
58*4882a593Smuzhiyun int pages = len >> PAGE_SHIFT;
59*4882a593Smuzhiyun u_long *p;
60*4882a593Smuzhiyun u_long *max;
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun while (pages) {
63*4882a593Smuzhiyun page = page_read(mapping, index);
64*4882a593Smuzhiyun if (IS_ERR(page))
65*4882a593Smuzhiyun return PTR_ERR(page);
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun max = page_address(page) + PAGE_SIZE;
68*4882a593Smuzhiyun for (p=page_address(page); p<max; p++)
69*4882a593Smuzhiyun if (*p != -1UL) {
70*4882a593Smuzhiyun lock_page(page);
71*4882a593Smuzhiyun memset(page_address(page), 0xff, PAGE_SIZE);
72*4882a593Smuzhiyun set_page_dirty(page);
73*4882a593Smuzhiyun unlock_page(page);
74*4882a593Smuzhiyun balance_dirty_pages_ratelimited(mapping);
75*4882a593Smuzhiyun break;
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun put_page(page);
79*4882a593Smuzhiyun pages--;
80*4882a593Smuzhiyun index++;
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun return 0;
83*4882a593Smuzhiyun }
block2mtd_erase(struct mtd_info * mtd,struct erase_info * instr)84*4882a593Smuzhiyun static int block2mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
85*4882a593Smuzhiyun {
86*4882a593Smuzhiyun struct block2mtd_dev *dev = mtd->priv;
87*4882a593Smuzhiyun size_t from = instr->addr;
88*4882a593Smuzhiyun size_t len = instr->len;
89*4882a593Smuzhiyun int err;
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun mutex_lock(&dev->write_mutex);
92*4882a593Smuzhiyun err = _block2mtd_erase(dev, from, len);
93*4882a593Smuzhiyun mutex_unlock(&dev->write_mutex);
94*4882a593Smuzhiyun if (err)
95*4882a593Smuzhiyun pr_err("erase failed err = %d\n", err);
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun return err;
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun
block2mtd_read(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)101*4882a593Smuzhiyun static int block2mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
102*4882a593Smuzhiyun size_t *retlen, u_char *buf)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun struct block2mtd_dev *dev = mtd->priv;
105*4882a593Smuzhiyun struct page *page;
106*4882a593Smuzhiyun pgoff_t index = from >> PAGE_SHIFT;
107*4882a593Smuzhiyun int offset = from & (PAGE_SIZE-1);
108*4882a593Smuzhiyun int cpylen;
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun while (len) {
111*4882a593Smuzhiyun if ((offset + len) > PAGE_SIZE)
112*4882a593Smuzhiyun cpylen = PAGE_SIZE - offset; // multiple pages
113*4882a593Smuzhiyun else
114*4882a593Smuzhiyun cpylen = len; // this page
115*4882a593Smuzhiyun len = len - cpylen;
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun page = page_read(dev->blkdev->bd_inode->i_mapping, index);
118*4882a593Smuzhiyun if (IS_ERR(page))
119*4882a593Smuzhiyun return PTR_ERR(page);
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun memcpy(buf, page_address(page) + offset, cpylen);
122*4882a593Smuzhiyun put_page(page);
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun if (retlen)
125*4882a593Smuzhiyun *retlen += cpylen;
126*4882a593Smuzhiyun buf += cpylen;
127*4882a593Smuzhiyun offset = 0;
128*4882a593Smuzhiyun index++;
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun return 0;
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun /* write data to the underlying device */
_block2mtd_write(struct block2mtd_dev * dev,const u_char * buf,loff_t to,size_t len,size_t * retlen)135*4882a593Smuzhiyun static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf,
136*4882a593Smuzhiyun loff_t to, size_t len, size_t *retlen)
137*4882a593Smuzhiyun {
138*4882a593Smuzhiyun struct page *page;
139*4882a593Smuzhiyun struct address_space *mapping = dev->blkdev->bd_inode->i_mapping;
140*4882a593Smuzhiyun pgoff_t index = to >> PAGE_SHIFT; // page index
141*4882a593Smuzhiyun int offset = to & ~PAGE_MASK; // page offset
142*4882a593Smuzhiyun int cpylen;
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun while (len) {
145*4882a593Smuzhiyun if ((offset+len) > PAGE_SIZE)
146*4882a593Smuzhiyun cpylen = PAGE_SIZE - offset; // multiple pages
147*4882a593Smuzhiyun else
148*4882a593Smuzhiyun cpylen = len; // this page
149*4882a593Smuzhiyun len = len - cpylen;
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun page = page_read(mapping, index);
152*4882a593Smuzhiyun if (IS_ERR(page))
153*4882a593Smuzhiyun return PTR_ERR(page);
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun if (memcmp(page_address(page)+offset, buf, cpylen)) {
156*4882a593Smuzhiyun lock_page(page);
157*4882a593Smuzhiyun memcpy(page_address(page) + offset, buf, cpylen);
158*4882a593Smuzhiyun set_page_dirty(page);
159*4882a593Smuzhiyun unlock_page(page);
160*4882a593Smuzhiyun balance_dirty_pages_ratelimited(mapping);
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun put_page(page);
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun if (retlen)
165*4882a593Smuzhiyun *retlen += cpylen;
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun buf += cpylen;
168*4882a593Smuzhiyun offset = 0;
169*4882a593Smuzhiyun index++;
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun return 0;
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun
block2mtd_write(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const u_char * buf)175*4882a593Smuzhiyun static int block2mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
176*4882a593Smuzhiyun size_t *retlen, const u_char *buf)
177*4882a593Smuzhiyun {
178*4882a593Smuzhiyun struct block2mtd_dev *dev = mtd->priv;
179*4882a593Smuzhiyun int err;
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun mutex_lock(&dev->write_mutex);
182*4882a593Smuzhiyun err = _block2mtd_write(dev, buf, to, len, retlen);
183*4882a593Smuzhiyun mutex_unlock(&dev->write_mutex);
184*4882a593Smuzhiyun if (err > 0)
185*4882a593Smuzhiyun err = 0;
186*4882a593Smuzhiyun return err;
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun /* sync the device - wait until the write queue is empty */
block2mtd_sync(struct mtd_info * mtd)191*4882a593Smuzhiyun static void block2mtd_sync(struct mtd_info *mtd)
192*4882a593Smuzhiyun {
193*4882a593Smuzhiyun struct block2mtd_dev *dev = mtd->priv;
194*4882a593Smuzhiyun sync_blockdev(dev->blkdev);
195*4882a593Smuzhiyun return;
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun
block2mtd_free_device(struct block2mtd_dev * dev)199*4882a593Smuzhiyun static void block2mtd_free_device(struct block2mtd_dev *dev)
200*4882a593Smuzhiyun {
201*4882a593Smuzhiyun if (!dev)
202*4882a593Smuzhiyun return;
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun kfree(dev->mtd.name);
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun if (dev->blkdev) {
207*4882a593Smuzhiyun invalidate_mapping_pages(dev->blkdev->bd_inode->i_mapping,
208*4882a593Smuzhiyun 0, -1);
209*4882a593Smuzhiyun blkdev_put(dev->blkdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun kfree(dev);
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun
add_device(char * devname,int erase_size,int timeout)216*4882a593Smuzhiyun static struct block2mtd_dev *add_device(char *devname, int erase_size,
217*4882a593Smuzhiyun int timeout)
218*4882a593Smuzhiyun {
219*4882a593Smuzhiyun #ifndef MODULE
220*4882a593Smuzhiyun int i;
221*4882a593Smuzhiyun #endif
222*4882a593Smuzhiyun const fmode_t mode = FMODE_READ | FMODE_WRITE | FMODE_EXCL;
223*4882a593Smuzhiyun struct block_device *bdev;
224*4882a593Smuzhiyun struct block2mtd_dev *dev;
225*4882a593Smuzhiyun char *name;
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun if (!devname)
228*4882a593Smuzhiyun return NULL;
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun dev = kzalloc(sizeof(struct block2mtd_dev), GFP_KERNEL);
231*4882a593Smuzhiyun if (!dev)
232*4882a593Smuzhiyun return NULL;
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun /* Get a handle on the device */
235*4882a593Smuzhiyun bdev = blkdev_get_by_path(devname, mode, dev);
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun #ifndef MODULE
238*4882a593Smuzhiyun /*
239*4882a593Smuzhiyun * We might not have the root device mounted at this point.
240*4882a593Smuzhiyun * Try to resolve the device name by other means.
241*4882a593Smuzhiyun */
242*4882a593Smuzhiyun for (i = 0; IS_ERR(bdev) && i <= timeout; i++) {
243*4882a593Smuzhiyun dev_t devt;
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun if (i)
246*4882a593Smuzhiyun /*
247*4882a593Smuzhiyun * Calling wait_for_device_probe in the first loop
248*4882a593Smuzhiyun * was not enough, sleep for a bit in subsequent
249*4882a593Smuzhiyun * go-arounds.
250*4882a593Smuzhiyun */
251*4882a593Smuzhiyun msleep(1000);
252*4882a593Smuzhiyun wait_for_device_probe();
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun devt = name_to_dev_t(devname);
255*4882a593Smuzhiyun if (!devt)
256*4882a593Smuzhiyun continue;
257*4882a593Smuzhiyun bdev = blkdev_get_by_dev(devt, mode, dev);
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun #endif
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun if (IS_ERR(bdev)) {
262*4882a593Smuzhiyun pr_err("error: cannot open device %s\n", devname);
263*4882a593Smuzhiyun goto err_free_block2mtd;
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun dev->blkdev = bdev;
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) {
268*4882a593Smuzhiyun pr_err("attempting to use an MTD device as a block device\n");
269*4882a593Smuzhiyun goto err_free_block2mtd;
270*4882a593Smuzhiyun }
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun if ((long)dev->blkdev->bd_inode->i_size % erase_size) {
273*4882a593Smuzhiyun pr_err("erasesize must be a divisor of device size\n");
274*4882a593Smuzhiyun goto err_free_block2mtd;
275*4882a593Smuzhiyun }
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun mutex_init(&dev->write_mutex);
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun /* Setup the MTD structure */
280*4882a593Smuzhiyun /* make the name contain the block device in */
281*4882a593Smuzhiyun name = kasprintf(GFP_KERNEL, "block2mtd: %s", devname);
282*4882a593Smuzhiyun if (!name)
283*4882a593Smuzhiyun goto err_destroy_mutex;
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun dev->mtd.name = name;
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK;
288*4882a593Smuzhiyun dev->mtd.erasesize = erase_size;
289*4882a593Smuzhiyun dev->mtd.writesize = 1;
290*4882a593Smuzhiyun dev->mtd.writebufsize = PAGE_SIZE;
291*4882a593Smuzhiyun dev->mtd.type = MTD_RAM;
292*4882a593Smuzhiyun dev->mtd.flags = MTD_CAP_RAM;
293*4882a593Smuzhiyun dev->mtd._erase = block2mtd_erase;
294*4882a593Smuzhiyun dev->mtd._write = block2mtd_write;
295*4882a593Smuzhiyun dev->mtd._sync = block2mtd_sync;
296*4882a593Smuzhiyun dev->mtd._read = block2mtd_read;
297*4882a593Smuzhiyun dev->mtd.priv = dev;
298*4882a593Smuzhiyun dev->mtd.owner = THIS_MODULE;
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun if (mtd_device_register(&dev->mtd, NULL, 0)) {
301*4882a593Smuzhiyun /* Device didn't get added, so free the entry */
302*4882a593Smuzhiyun goto err_destroy_mutex;
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun list_add(&dev->list, &blkmtd_device_list);
306*4882a593Smuzhiyun pr_info("mtd%d: [%s] erase_size = %dKiB [%d]\n",
307*4882a593Smuzhiyun dev->mtd.index,
308*4882a593Smuzhiyun dev->mtd.name + strlen("block2mtd: "),
309*4882a593Smuzhiyun dev->mtd.erasesize >> 10, dev->mtd.erasesize);
310*4882a593Smuzhiyun return dev;
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun err_destroy_mutex:
313*4882a593Smuzhiyun mutex_destroy(&dev->write_mutex);
314*4882a593Smuzhiyun err_free_block2mtd:
315*4882a593Smuzhiyun block2mtd_free_device(dev);
316*4882a593Smuzhiyun return NULL;
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun /* This function works similar to reguler strtoul. In addition, it
321*4882a593Smuzhiyun * allows some suffixes for a more human-readable number format:
322*4882a593Smuzhiyun * ki, Ki, kiB, KiB - multiply result with 1024
323*4882a593Smuzhiyun * Mi, MiB - multiply result with 1024^2
324*4882a593Smuzhiyun * Gi, GiB - multiply result with 1024^3
325*4882a593Smuzhiyun */
ustrtoul(const char * cp,char ** endp,unsigned int base)326*4882a593Smuzhiyun static int ustrtoul(const char *cp, char **endp, unsigned int base)
327*4882a593Smuzhiyun {
328*4882a593Smuzhiyun unsigned long result = simple_strtoul(cp, endp, base);
329*4882a593Smuzhiyun switch (**endp) {
330*4882a593Smuzhiyun case 'G' :
331*4882a593Smuzhiyun result *= 1024;
332*4882a593Smuzhiyun fallthrough;
333*4882a593Smuzhiyun case 'M':
334*4882a593Smuzhiyun result *= 1024;
335*4882a593Smuzhiyun fallthrough;
336*4882a593Smuzhiyun case 'K':
337*4882a593Smuzhiyun case 'k':
338*4882a593Smuzhiyun result *= 1024;
339*4882a593Smuzhiyun /* By dwmw2 editorial decree, "ki", "Mi" or "Gi" are to be used. */
340*4882a593Smuzhiyun if ((*endp)[1] == 'i') {
341*4882a593Smuzhiyun if ((*endp)[2] == 'B')
342*4882a593Smuzhiyun (*endp) += 3;
343*4882a593Smuzhiyun else
344*4882a593Smuzhiyun (*endp) += 2;
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun return result;
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun
parse_num(size_t * num,const char * token)351*4882a593Smuzhiyun static int parse_num(size_t *num, const char *token)
352*4882a593Smuzhiyun {
353*4882a593Smuzhiyun char *endp;
354*4882a593Smuzhiyun size_t n;
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun n = (size_t) ustrtoul(token, &endp, 0);
357*4882a593Smuzhiyun if (*endp)
358*4882a593Smuzhiyun return -EINVAL;
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun *num = n;
361*4882a593Smuzhiyun return 0;
362*4882a593Smuzhiyun }
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun
kill_final_newline(char * str)365*4882a593Smuzhiyun static inline void kill_final_newline(char *str)
366*4882a593Smuzhiyun {
367*4882a593Smuzhiyun char *newline = strrchr(str, '\n');
368*4882a593Smuzhiyun if (newline && !newline[1])
369*4882a593Smuzhiyun *newline = 0;
370*4882a593Smuzhiyun }
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun #ifndef MODULE
374*4882a593Smuzhiyun static int block2mtd_init_called = 0;
375*4882a593Smuzhiyun /* 80 for device, 12 for erase size */
376*4882a593Smuzhiyun static char block2mtd_paramline[80 + 12];
377*4882a593Smuzhiyun #endif
378*4882a593Smuzhiyun
block2mtd_setup2(const char * val)379*4882a593Smuzhiyun static int block2mtd_setup2(const char *val)
380*4882a593Smuzhiyun {
381*4882a593Smuzhiyun /* 80 for device, 12 for erase size, 80 for name, 8 for timeout */
382*4882a593Smuzhiyun char buf[80 + 12 + 80 + 8];
383*4882a593Smuzhiyun char *str = buf;
384*4882a593Smuzhiyun char *token[2];
385*4882a593Smuzhiyun char *name;
386*4882a593Smuzhiyun size_t erase_size = PAGE_SIZE;
387*4882a593Smuzhiyun unsigned long timeout = MTD_DEFAULT_TIMEOUT;
388*4882a593Smuzhiyun int i, ret;
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun if (strnlen(val, sizeof(buf)) >= sizeof(buf)) {
391*4882a593Smuzhiyun pr_err("parameter too long\n");
392*4882a593Smuzhiyun return 0;
393*4882a593Smuzhiyun }
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun strcpy(str, val);
396*4882a593Smuzhiyun kill_final_newline(str);
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun for (i = 0; i < 2; i++)
399*4882a593Smuzhiyun token[i] = strsep(&str, ",");
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun if (str) {
402*4882a593Smuzhiyun pr_err("too many arguments\n");
403*4882a593Smuzhiyun return 0;
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun if (!token[0]) {
407*4882a593Smuzhiyun pr_err("no argument\n");
408*4882a593Smuzhiyun return 0;
409*4882a593Smuzhiyun }
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun name = token[0];
412*4882a593Smuzhiyun if (strlen(name) + 1 > 80) {
413*4882a593Smuzhiyun pr_err("device name too long\n");
414*4882a593Smuzhiyun return 0;
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun if (token[1]) {
418*4882a593Smuzhiyun ret = parse_num(&erase_size, token[1]);
419*4882a593Smuzhiyun if (ret) {
420*4882a593Smuzhiyun pr_err("illegal erase size\n");
421*4882a593Smuzhiyun return 0;
422*4882a593Smuzhiyun }
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun add_device(name, erase_size, timeout);
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun return 0;
428*4882a593Smuzhiyun }
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun
block2mtd_setup(const char * val,const struct kernel_param * kp)431*4882a593Smuzhiyun static int block2mtd_setup(const char *val, const struct kernel_param *kp)
432*4882a593Smuzhiyun {
433*4882a593Smuzhiyun #ifdef MODULE
434*4882a593Smuzhiyun return block2mtd_setup2(val);
435*4882a593Smuzhiyun #else
436*4882a593Smuzhiyun /* If more parameters are later passed in via
437*4882a593Smuzhiyun /sys/module/block2mtd/parameters/block2mtd
438*4882a593Smuzhiyun and block2mtd_init() has already been called,
439*4882a593Smuzhiyun we can parse the argument now. */
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun if (block2mtd_init_called)
442*4882a593Smuzhiyun return block2mtd_setup2(val);
443*4882a593Smuzhiyun
444*4882a593Smuzhiyun /* During early boot stage, we only save the parameters
445*4882a593Smuzhiyun here. We must parse them later: if the param passed
446*4882a593Smuzhiyun from kernel boot command line, block2mtd_setup() is
447*4882a593Smuzhiyun called so early that it is not possible to resolve
448*4882a593Smuzhiyun the device (even kmalloc() fails). Deter that work to
449*4882a593Smuzhiyun block2mtd_setup2(). */
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun strlcpy(block2mtd_paramline, val, sizeof(block2mtd_paramline));
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun return 0;
454*4882a593Smuzhiyun #endif
455*4882a593Smuzhiyun }
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun module_param_call(block2mtd, block2mtd_setup, NULL, NULL, 0200);
459*4882a593Smuzhiyun MODULE_PARM_DESC(block2mtd, "Device to use. \"block2mtd=<dev>[,<erasesize>]\"");
460*4882a593Smuzhiyun
block2mtd_init(void)461*4882a593Smuzhiyun static int __init block2mtd_init(void)
462*4882a593Smuzhiyun {
463*4882a593Smuzhiyun int ret = 0;
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun #ifndef MODULE
466*4882a593Smuzhiyun if (strlen(block2mtd_paramline))
467*4882a593Smuzhiyun ret = block2mtd_setup2(block2mtd_paramline);
468*4882a593Smuzhiyun block2mtd_init_called = 1;
469*4882a593Smuzhiyun #endif
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun return ret;
472*4882a593Smuzhiyun }
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun
block2mtd_exit(void)475*4882a593Smuzhiyun static void block2mtd_exit(void)
476*4882a593Smuzhiyun {
477*4882a593Smuzhiyun struct list_head *pos, *next;
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun /* Remove the MTD devices */
480*4882a593Smuzhiyun list_for_each_safe(pos, next, &blkmtd_device_list) {
481*4882a593Smuzhiyun struct block2mtd_dev *dev = list_entry(pos, typeof(*dev), list);
482*4882a593Smuzhiyun block2mtd_sync(&dev->mtd);
483*4882a593Smuzhiyun mtd_device_unregister(&dev->mtd);
484*4882a593Smuzhiyun mutex_destroy(&dev->write_mutex);
485*4882a593Smuzhiyun pr_info("mtd%d: [%s] removed\n",
486*4882a593Smuzhiyun dev->mtd.index,
487*4882a593Smuzhiyun dev->mtd.name + strlen("block2mtd: "));
488*4882a593Smuzhiyun list_del(&dev->list);
489*4882a593Smuzhiyun block2mtd_free_device(dev);
490*4882a593Smuzhiyun }
491*4882a593Smuzhiyun }
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun late_initcall(block2mtd_init);
494*4882a593Smuzhiyun module_exit(block2mtd_exit);
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun MODULE_LICENSE("GPL");
497*4882a593Smuzhiyun MODULE_AUTHOR("Joern Engel <joern@lazybastard.org>");
498*4882a593Smuzhiyun MODULE_DESCRIPTION("Emulate an MTD using a block device");
499