1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Direct MTD block device access
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org>
6*4882a593Smuzhiyun * Copyright © 2000-2003 Nicolas Pitre <nico@fluxnic.net>
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/fs.h>
10*4882a593Smuzhiyun #include <linux/init.h>
11*4882a593Smuzhiyun #include <linux/kernel.h>
12*4882a593Smuzhiyun #include <linux/module.h>
13*4882a593Smuzhiyun #include <linux/sched.h>
14*4882a593Smuzhiyun #include <linux/slab.h>
15*4882a593Smuzhiyun #include <linux/types.h>
16*4882a593Smuzhiyun #include <linux/vmalloc.h>
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun #include <linux/mtd/mtd.h>
19*4882a593Smuzhiyun #include <linux/mtd/blktrans.h>
20*4882a593Smuzhiyun #include <linux/mutex.h>
21*4882a593Smuzhiyun #include <linux/major.h>
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun struct mtdblk_dev {
25*4882a593Smuzhiyun struct mtd_blktrans_dev mbd;
26*4882a593Smuzhiyun int count;
27*4882a593Smuzhiyun struct mutex cache_mutex;
28*4882a593Smuzhiyun unsigned char *cache_data;
29*4882a593Smuzhiyun unsigned long cache_offset;
30*4882a593Smuzhiyun unsigned int cache_size;
31*4882a593Smuzhiyun enum { STATE_EMPTY, STATE_CLEAN, STATE_DIRTY } cache_state;
32*4882a593Smuzhiyun };
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun /*
35*4882a593Smuzhiyun * Cache stuff...
36*4882a593Smuzhiyun *
37*4882a593Smuzhiyun * Since typical flash erasable sectors are much larger than what Linux's
38*4882a593Smuzhiyun * buffer cache can handle, we must implement read-modify-write on flash
39*4882a593Smuzhiyun * sectors for each block write requests. To avoid over-erasing flash sectors
40*4882a593Smuzhiyun * and to speed things up, we locally cache a whole flash sector while it is
41*4882a593Smuzhiyun * being written to until a different sector is required.
42*4882a593Smuzhiyun */
43*4882a593Smuzhiyun
erase_write(struct mtd_info * mtd,unsigned long pos,unsigned int len,const char * buf)44*4882a593Smuzhiyun static int erase_write (struct mtd_info *mtd, unsigned long pos,
45*4882a593Smuzhiyun unsigned int len, const char *buf)
46*4882a593Smuzhiyun {
47*4882a593Smuzhiyun struct erase_info erase;
48*4882a593Smuzhiyun size_t retlen;
49*4882a593Smuzhiyun int ret;
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun /*
52*4882a593Smuzhiyun * First, let's erase the flash block.
53*4882a593Smuzhiyun */
54*4882a593Smuzhiyun erase.addr = pos;
55*4882a593Smuzhiyun erase.len = len;
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun ret = mtd_erase(mtd, &erase);
58*4882a593Smuzhiyun if (ret) {
59*4882a593Smuzhiyun printk (KERN_WARNING "mtdblock: erase of region [0x%lx, 0x%x] "
60*4882a593Smuzhiyun "on \"%s\" failed\n",
61*4882a593Smuzhiyun pos, len, mtd->name);
62*4882a593Smuzhiyun return ret;
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun /*
66*4882a593Smuzhiyun * Next, write the data to flash.
67*4882a593Smuzhiyun */
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun ret = mtd_write(mtd, pos, len, &retlen, buf);
70*4882a593Smuzhiyun if (ret)
71*4882a593Smuzhiyun return ret;
72*4882a593Smuzhiyun if (retlen != len)
73*4882a593Smuzhiyun return -EIO;
74*4882a593Smuzhiyun return 0;
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun
write_cached_data(struct mtdblk_dev * mtdblk)78*4882a593Smuzhiyun static int write_cached_data (struct mtdblk_dev *mtdblk)
79*4882a593Smuzhiyun {
80*4882a593Smuzhiyun struct mtd_info *mtd = mtdblk->mbd.mtd;
81*4882a593Smuzhiyun int ret;
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun if (mtdblk->cache_state != STATE_DIRTY)
84*4882a593Smuzhiyun return 0;
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun pr_debug("mtdblock: writing cached data for \"%s\" "
87*4882a593Smuzhiyun "at 0x%lx, size 0x%x\n", mtd->name,
88*4882a593Smuzhiyun mtdblk->cache_offset, mtdblk->cache_size);
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun ret = erase_write (mtd, mtdblk->cache_offset,
91*4882a593Smuzhiyun mtdblk->cache_size, mtdblk->cache_data);
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun /*
94*4882a593Smuzhiyun * Here we could arguably set the cache state to STATE_CLEAN.
95*4882a593Smuzhiyun * However this could lead to inconsistency since we will not
96*4882a593Smuzhiyun * be notified if this content is altered on the flash by other
97*4882a593Smuzhiyun * means. Let's declare it empty and leave buffering tasks to
98*4882a593Smuzhiyun * the buffer cache instead.
99*4882a593Smuzhiyun *
100*4882a593Smuzhiyun * If this cache_offset points to a bad block, data cannot be
101*4882a593Smuzhiyun * written to the device. Clear cache_state to avoid writing to
102*4882a593Smuzhiyun * bad blocks repeatedly.
103*4882a593Smuzhiyun */
104*4882a593Smuzhiyun if (ret == 0 || ret == -EIO)
105*4882a593Smuzhiyun mtdblk->cache_state = STATE_EMPTY;
106*4882a593Smuzhiyun return ret;
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun
do_cached_write(struct mtdblk_dev * mtdblk,unsigned long pos,int len,const char * buf)110*4882a593Smuzhiyun static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos,
111*4882a593Smuzhiyun int len, const char *buf)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun struct mtd_info *mtd = mtdblk->mbd.mtd;
114*4882a593Smuzhiyun unsigned int sect_size = mtdblk->cache_size;
115*4882a593Smuzhiyun size_t retlen;
116*4882a593Smuzhiyun int ret;
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun pr_debug("mtdblock: write on \"%s\" at 0x%lx, size 0x%x\n",
119*4882a593Smuzhiyun mtd->name, pos, len);
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun if (!sect_size)
122*4882a593Smuzhiyun return mtd_write(mtd, pos, len, &retlen, buf);
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun while (len > 0) {
125*4882a593Smuzhiyun unsigned long sect_start = (pos/sect_size)*sect_size;
126*4882a593Smuzhiyun unsigned int offset = pos - sect_start;
127*4882a593Smuzhiyun unsigned int size = sect_size - offset;
128*4882a593Smuzhiyun if( size > len )
129*4882a593Smuzhiyun size = len;
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun if (size == sect_size) {
132*4882a593Smuzhiyun /*
133*4882a593Smuzhiyun * We are covering a whole sector. Thus there is no
134*4882a593Smuzhiyun * need to bother with the cache while it may still be
135*4882a593Smuzhiyun * useful for other partial writes.
136*4882a593Smuzhiyun */
137*4882a593Smuzhiyun ret = erase_write (mtd, pos, size, buf);
138*4882a593Smuzhiyun if (ret)
139*4882a593Smuzhiyun return ret;
140*4882a593Smuzhiyun } else {
141*4882a593Smuzhiyun /* Partial sector: need to use the cache */
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun if (mtdblk->cache_state == STATE_DIRTY &&
144*4882a593Smuzhiyun mtdblk->cache_offset != sect_start) {
145*4882a593Smuzhiyun ret = write_cached_data(mtdblk);
146*4882a593Smuzhiyun if (ret)
147*4882a593Smuzhiyun return ret;
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun if (mtdblk->cache_state == STATE_EMPTY ||
151*4882a593Smuzhiyun mtdblk->cache_offset != sect_start) {
152*4882a593Smuzhiyun /* fill the cache with the current sector */
153*4882a593Smuzhiyun mtdblk->cache_state = STATE_EMPTY;
154*4882a593Smuzhiyun ret = mtd_read(mtd, sect_start, sect_size,
155*4882a593Smuzhiyun &retlen, mtdblk->cache_data);
156*4882a593Smuzhiyun if (ret)
157*4882a593Smuzhiyun return ret;
158*4882a593Smuzhiyun if (retlen != sect_size)
159*4882a593Smuzhiyun return -EIO;
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun mtdblk->cache_offset = sect_start;
162*4882a593Smuzhiyun mtdblk->cache_size = sect_size;
163*4882a593Smuzhiyun mtdblk->cache_state = STATE_CLEAN;
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun /* write data to our local cache */
167*4882a593Smuzhiyun memcpy (mtdblk->cache_data + offset, buf, size);
168*4882a593Smuzhiyun mtdblk->cache_state = STATE_DIRTY;
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun buf += size;
172*4882a593Smuzhiyun pos += size;
173*4882a593Smuzhiyun len -= size;
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun return 0;
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun
do_cached_read(struct mtdblk_dev * mtdblk,unsigned long pos,int len,char * buf)180*4882a593Smuzhiyun static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
181*4882a593Smuzhiyun int len, char *buf)
182*4882a593Smuzhiyun {
183*4882a593Smuzhiyun struct mtd_info *mtd = mtdblk->mbd.mtd;
184*4882a593Smuzhiyun unsigned int sect_size = mtdblk->cache_size;
185*4882a593Smuzhiyun size_t retlen;
186*4882a593Smuzhiyun int ret;
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun pr_debug("mtdblock: read on \"%s\" at 0x%lx, size 0x%x\n",
189*4882a593Smuzhiyun mtd->name, pos, len);
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun if (!sect_size)
192*4882a593Smuzhiyun return mtd_read(mtd, pos, len, &retlen, buf);
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun while (len > 0) {
195*4882a593Smuzhiyun unsigned long sect_start = (pos/sect_size)*sect_size;
196*4882a593Smuzhiyun unsigned int offset = pos - sect_start;
197*4882a593Smuzhiyun unsigned int size = sect_size - offset;
198*4882a593Smuzhiyun if (size > len)
199*4882a593Smuzhiyun size = len;
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun /*
202*4882a593Smuzhiyun * Check if the requested data is already cached
203*4882a593Smuzhiyun * Read the requested amount of data from our internal cache if it
204*4882a593Smuzhiyun * contains what we want, otherwise we read the data directly
205*4882a593Smuzhiyun * from flash.
206*4882a593Smuzhiyun */
207*4882a593Smuzhiyun if (mtdblk->cache_state != STATE_EMPTY &&
208*4882a593Smuzhiyun mtdblk->cache_offset == sect_start) {
209*4882a593Smuzhiyun memcpy (buf, mtdblk->cache_data + offset, size);
210*4882a593Smuzhiyun } else {
211*4882a593Smuzhiyun ret = mtd_read(mtd, pos, size, &retlen, buf);
212*4882a593Smuzhiyun if (ret)
213*4882a593Smuzhiyun return ret;
214*4882a593Smuzhiyun if (retlen != size)
215*4882a593Smuzhiyun return -EIO;
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun buf += size;
219*4882a593Smuzhiyun pos += size;
220*4882a593Smuzhiyun len -= size;
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun return 0;
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun
mtdblock_readsect(struct mtd_blktrans_dev * dev,unsigned long block,char * buf)226*4882a593Smuzhiyun static int mtdblock_readsect(struct mtd_blktrans_dev *dev,
227*4882a593Smuzhiyun unsigned long block, char *buf)
228*4882a593Smuzhiyun {
229*4882a593Smuzhiyun struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd);
230*4882a593Smuzhiyun return do_cached_read(mtdblk, block<<9, 512, buf);
231*4882a593Smuzhiyun }
232*4882a593Smuzhiyun
mtdblock_writesect(struct mtd_blktrans_dev * dev,unsigned long block,char * buf)233*4882a593Smuzhiyun static int mtdblock_writesect(struct mtd_blktrans_dev *dev,
234*4882a593Smuzhiyun unsigned long block, char *buf)
235*4882a593Smuzhiyun {
236*4882a593Smuzhiyun struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd);
237*4882a593Smuzhiyun if (unlikely(!mtdblk->cache_data && mtdblk->cache_size)) {
238*4882a593Smuzhiyun mtdblk->cache_data = vmalloc(mtdblk->mbd.mtd->erasesize);
239*4882a593Smuzhiyun if (!mtdblk->cache_data)
240*4882a593Smuzhiyun return -EINTR;
241*4882a593Smuzhiyun /* -EINTR is not really correct, but it is the best match
242*4882a593Smuzhiyun * documented in man 2 write for all cases. We could also
243*4882a593Smuzhiyun * return -EAGAIN sometimes, but why bother?
244*4882a593Smuzhiyun */
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun return do_cached_write(mtdblk, block<<9, 512, buf);
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun
mtdblock_open(struct mtd_blktrans_dev * mbd)249*4882a593Smuzhiyun static int mtdblock_open(struct mtd_blktrans_dev *mbd)
250*4882a593Smuzhiyun {
251*4882a593Smuzhiyun struct mtdblk_dev *mtdblk = container_of(mbd, struct mtdblk_dev, mbd);
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun pr_debug("mtdblock_open\n");
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun if (mtdblk->count) {
256*4882a593Smuzhiyun mtdblk->count++;
257*4882a593Smuzhiyun return 0;
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun /* OK, it's not open. Create cache info for it */
261*4882a593Smuzhiyun mtdblk->count = 1;
262*4882a593Smuzhiyun mutex_init(&mtdblk->cache_mutex);
263*4882a593Smuzhiyun mtdblk->cache_state = STATE_EMPTY;
264*4882a593Smuzhiyun if (!(mbd->mtd->flags & MTD_NO_ERASE) && mbd->mtd->erasesize) {
265*4882a593Smuzhiyun mtdblk->cache_size = mbd->mtd->erasesize;
266*4882a593Smuzhiyun mtdblk->cache_data = NULL;
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun pr_debug("ok\n");
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun return 0;
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun
mtdblock_release(struct mtd_blktrans_dev * mbd)274*4882a593Smuzhiyun static void mtdblock_release(struct mtd_blktrans_dev *mbd)
275*4882a593Smuzhiyun {
276*4882a593Smuzhiyun struct mtdblk_dev *mtdblk = container_of(mbd, struct mtdblk_dev, mbd);
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun pr_debug("mtdblock_release\n");
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun mutex_lock(&mtdblk->cache_mutex);
281*4882a593Smuzhiyun write_cached_data(mtdblk);
282*4882a593Smuzhiyun mutex_unlock(&mtdblk->cache_mutex);
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun if (!--mtdblk->count) {
285*4882a593Smuzhiyun /*
286*4882a593Smuzhiyun * It was the last usage. Free the cache, but only sync if
287*4882a593Smuzhiyun * opened for writing.
288*4882a593Smuzhiyun */
289*4882a593Smuzhiyun if (mbd->file_mode & FMODE_WRITE)
290*4882a593Smuzhiyun mtd_sync(mbd->mtd);
291*4882a593Smuzhiyun vfree(mtdblk->cache_data);
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun pr_debug("ok\n");
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun
mtdblock_flush(struct mtd_blktrans_dev * dev)297*4882a593Smuzhiyun static int mtdblock_flush(struct mtd_blktrans_dev *dev)
298*4882a593Smuzhiyun {
299*4882a593Smuzhiyun struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd);
300*4882a593Smuzhiyun int ret;
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun mutex_lock(&mtdblk->cache_mutex);
303*4882a593Smuzhiyun ret = write_cached_data(mtdblk);
304*4882a593Smuzhiyun mutex_unlock(&mtdblk->cache_mutex);
305*4882a593Smuzhiyun mtd_sync(dev->mtd);
306*4882a593Smuzhiyun return ret;
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun
mtdblock_add_mtd(struct mtd_blktrans_ops * tr,struct mtd_info * mtd)309*4882a593Smuzhiyun static void mtdblock_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
310*4882a593Smuzhiyun {
311*4882a593Smuzhiyun struct mtdblk_dev *dev = kzalloc(sizeof(*dev), GFP_KERNEL);
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun if (!dev)
314*4882a593Smuzhiyun return;
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun dev->mbd.mtd = mtd;
317*4882a593Smuzhiyun dev->mbd.devnum = mtd->index;
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun dev->mbd.size = mtd->size >> 9;
320*4882a593Smuzhiyun dev->mbd.tr = tr;
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun if (!(mtd->flags & MTD_WRITEABLE))
323*4882a593Smuzhiyun dev->mbd.readonly = 1;
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun if (add_mtd_blktrans_dev(&dev->mbd))
326*4882a593Smuzhiyun kfree(dev);
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun
mtdblock_remove_dev(struct mtd_blktrans_dev * dev)329*4882a593Smuzhiyun static void mtdblock_remove_dev(struct mtd_blktrans_dev *dev)
330*4882a593Smuzhiyun {
331*4882a593Smuzhiyun del_mtd_blktrans_dev(dev);
332*4882a593Smuzhiyun }
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun static struct mtd_blktrans_ops mtdblock_tr = {
335*4882a593Smuzhiyun .name = "mtdblock",
336*4882a593Smuzhiyun .major = MTD_BLOCK_MAJOR,
337*4882a593Smuzhiyun .part_bits = 0,
338*4882a593Smuzhiyun .blksize = 512,
339*4882a593Smuzhiyun .open = mtdblock_open,
340*4882a593Smuzhiyun .flush = mtdblock_flush,
341*4882a593Smuzhiyun .release = mtdblock_release,
342*4882a593Smuzhiyun .readsect = mtdblock_readsect,
343*4882a593Smuzhiyun .writesect = mtdblock_writesect,
344*4882a593Smuzhiyun .add_mtd = mtdblock_add_mtd,
345*4882a593Smuzhiyun .remove_dev = mtdblock_remove_dev,
346*4882a593Smuzhiyun .owner = THIS_MODULE,
347*4882a593Smuzhiyun };
348*4882a593Smuzhiyun
init_mtdblock(void)349*4882a593Smuzhiyun static int __init init_mtdblock(void)
350*4882a593Smuzhiyun {
351*4882a593Smuzhiyun return register_mtd_blktrans(&mtdblock_tr);
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun
cleanup_mtdblock(void)354*4882a593Smuzhiyun static void __exit cleanup_mtdblock(void)
355*4882a593Smuzhiyun {
356*4882a593Smuzhiyun deregister_mtd_blktrans(&mtdblock_tr);
357*4882a593Smuzhiyun }
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun module_init(init_mtdblock);
360*4882a593Smuzhiyun module_exit(cleanup_mtdblock);
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun MODULE_LICENSE("GPL");
364*4882a593Smuzhiyun MODULE_AUTHOR("Nicolas Pitre <nico@fluxnic.net> et al.");
365*4882a593Smuzhiyun MODULE_DESCRIPTION("Caching read/erase/writeback block device emulation access to MTD devices");
366