1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun
3*4882a593Smuzhiyun #define dev_fmt(fmt) "mtdoops-pstore: " fmt
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun #include <linux/kernel.h>
6*4882a593Smuzhiyun #include <linux/module.h>
7*4882a593Smuzhiyun #include <linux/pstore_blk.h>
8*4882a593Smuzhiyun #include <linux/mtd/mtd.h>
9*4882a593Smuzhiyun #include <linux/bitops.h>
10*4882a593Smuzhiyun #include <linux/slab.h>
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun static struct mtdpstore_context {
13*4882a593Smuzhiyun int index;
14*4882a593Smuzhiyun struct pstore_blk_config info;
15*4882a593Smuzhiyun struct pstore_device_info dev;
16*4882a593Smuzhiyun struct mtd_info *mtd;
17*4882a593Smuzhiyun unsigned long *rmmap; /* removed bit map */
18*4882a593Smuzhiyun unsigned long *usedmap; /* used bit map */
19*4882a593Smuzhiyun /*
20*4882a593Smuzhiyun * used for panic write
21*4882a593Smuzhiyun * As there are no block_isbad for panic case, we should keep this
22*4882a593Smuzhiyun * status before panic to ensure panic_write not failed.
23*4882a593Smuzhiyun */
24*4882a593Smuzhiyun unsigned long *badmap; /* bad block bit map */
25*4882a593Smuzhiyun } oops_cxt;
26*4882a593Smuzhiyun
mtdpstore_block_isbad(struct mtdpstore_context * cxt,loff_t off)27*4882a593Smuzhiyun static int mtdpstore_block_isbad(struct mtdpstore_context *cxt, loff_t off)
28*4882a593Smuzhiyun {
29*4882a593Smuzhiyun int ret;
30*4882a593Smuzhiyun struct mtd_info *mtd = cxt->mtd;
31*4882a593Smuzhiyun u64 blknum;
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun off = ALIGN_DOWN(off, mtd->erasesize);
34*4882a593Smuzhiyun blknum = div_u64(off, mtd->erasesize);
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun if (test_bit(blknum, cxt->badmap))
37*4882a593Smuzhiyun return true;
38*4882a593Smuzhiyun ret = mtd_block_isbad(mtd, off);
39*4882a593Smuzhiyun if (ret < 0) {
40*4882a593Smuzhiyun dev_err(&mtd->dev, "mtd_block_isbad failed, aborting\n");
41*4882a593Smuzhiyun return ret;
42*4882a593Smuzhiyun } else if (ret > 0) {
43*4882a593Smuzhiyun set_bit(blknum, cxt->badmap);
44*4882a593Smuzhiyun return true;
45*4882a593Smuzhiyun }
46*4882a593Smuzhiyun return false;
47*4882a593Smuzhiyun }
48*4882a593Smuzhiyun
mtdpstore_panic_block_isbad(struct mtdpstore_context * cxt,loff_t off)49*4882a593Smuzhiyun static inline int mtdpstore_panic_block_isbad(struct mtdpstore_context *cxt,
50*4882a593Smuzhiyun loff_t off)
51*4882a593Smuzhiyun {
52*4882a593Smuzhiyun struct mtd_info *mtd = cxt->mtd;
53*4882a593Smuzhiyun u64 blknum;
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun off = ALIGN_DOWN(off, mtd->erasesize);
56*4882a593Smuzhiyun blknum = div_u64(off, mtd->erasesize);
57*4882a593Smuzhiyun return test_bit(blknum, cxt->badmap);
58*4882a593Smuzhiyun }
59*4882a593Smuzhiyun
mtdpstore_mark_used(struct mtdpstore_context * cxt,loff_t off)60*4882a593Smuzhiyun static inline void mtdpstore_mark_used(struct mtdpstore_context *cxt,
61*4882a593Smuzhiyun loff_t off)
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun struct mtd_info *mtd = cxt->mtd;
64*4882a593Smuzhiyun u64 zonenum = div_u64(off, cxt->info.kmsg_size);
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun dev_dbg(&mtd->dev, "mark zone %llu used\n", zonenum);
67*4882a593Smuzhiyun set_bit(zonenum, cxt->usedmap);
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun
mtdpstore_mark_unused(struct mtdpstore_context * cxt,loff_t off)70*4882a593Smuzhiyun static inline void mtdpstore_mark_unused(struct mtdpstore_context *cxt,
71*4882a593Smuzhiyun loff_t off)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun struct mtd_info *mtd = cxt->mtd;
74*4882a593Smuzhiyun u64 zonenum = div_u64(off, cxt->info.kmsg_size);
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun dev_dbg(&mtd->dev, "mark zone %llu unused\n", zonenum);
77*4882a593Smuzhiyun clear_bit(zonenum, cxt->usedmap);
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun
mtdpstore_block_mark_unused(struct mtdpstore_context * cxt,loff_t off)80*4882a593Smuzhiyun static inline void mtdpstore_block_mark_unused(struct mtdpstore_context *cxt,
81*4882a593Smuzhiyun loff_t off)
82*4882a593Smuzhiyun {
83*4882a593Smuzhiyun struct mtd_info *mtd = cxt->mtd;
84*4882a593Smuzhiyun u32 zonecnt = mtd->erasesize / cxt->info.kmsg_size;
85*4882a593Smuzhiyun u64 zonenum;
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun off = ALIGN_DOWN(off, mtd->erasesize);
88*4882a593Smuzhiyun zonenum = div_u64(off, cxt->info.kmsg_size);
89*4882a593Smuzhiyun while (zonecnt > 0) {
90*4882a593Smuzhiyun dev_dbg(&mtd->dev, "mark zone %llu unused\n", zonenum);
91*4882a593Smuzhiyun clear_bit(zonenum, cxt->usedmap);
92*4882a593Smuzhiyun zonenum++;
93*4882a593Smuzhiyun zonecnt--;
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun
mtdpstore_is_used(struct mtdpstore_context * cxt,loff_t off)97*4882a593Smuzhiyun static inline int mtdpstore_is_used(struct mtdpstore_context *cxt, loff_t off)
98*4882a593Smuzhiyun {
99*4882a593Smuzhiyun u64 zonenum = div_u64(off, cxt->info.kmsg_size);
100*4882a593Smuzhiyun u64 blknum = div_u64(off, cxt->mtd->erasesize);
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun if (test_bit(blknum, cxt->badmap))
103*4882a593Smuzhiyun return true;
104*4882a593Smuzhiyun return test_bit(zonenum, cxt->usedmap);
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun
mtdpstore_block_is_used(struct mtdpstore_context * cxt,loff_t off)107*4882a593Smuzhiyun static int mtdpstore_block_is_used(struct mtdpstore_context *cxt,
108*4882a593Smuzhiyun loff_t off)
109*4882a593Smuzhiyun {
110*4882a593Smuzhiyun struct mtd_info *mtd = cxt->mtd;
111*4882a593Smuzhiyun u32 zonecnt = mtd->erasesize / cxt->info.kmsg_size;
112*4882a593Smuzhiyun u64 zonenum;
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun off = ALIGN_DOWN(off, mtd->erasesize);
115*4882a593Smuzhiyun zonenum = div_u64(off, cxt->info.kmsg_size);
116*4882a593Smuzhiyun while (zonecnt > 0) {
117*4882a593Smuzhiyun if (test_bit(zonenum, cxt->usedmap))
118*4882a593Smuzhiyun return true;
119*4882a593Smuzhiyun zonenum++;
120*4882a593Smuzhiyun zonecnt--;
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun return false;
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun
mtdpstore_is_empty(struct mtdpstore_context * cxt,char * buf,size_t size)125*4882a593Smuzhiyun static int mtdpstore_is_empty(struct mtdpstore_context *cxt, char *buf,
126*4882a593Smuzhiyun size_t size)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun struct mtd_info *mtd = cxt->mtd;
129*4882a593Smuzhiyun size_t sz;
130*4882a593Smuzhiyun int i;
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun sz = min_t(uint32_t, size, mtd->writesize / 4);
133*4882a593Smuzhiyun for (i = 0; i < sz; i++) {
134*4882a593Smuzhiyun if (buf[i] != (char)0xFF)
135*4882a593Smuzhiyun return false;
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun return true;
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun
mtdpstore_mark_removed(struct mtdpstore_context * cxt,loff_t off)140*4882a593Smuzhiyun static void mtdpstore_mark_removed(struct mtdpstore_context *cxt, loff_t off)
141*4882a593Smuzhiyun {
142*4882a593Smuzhiyun struct mtd_info *mtd = cxt->mtd;
143*4882a593Smuzhiyun u64 zonenum = div_u64(off, cxt->info.kmsg_size);
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun dev_dbg(&mtd->dev, "mark zone %llu removed\n", zonenum);
146*4882a593Smuzhiyun set_bit(zonenum, cxt->rmmap);
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun
mtdpstore_block_clear_removed(struct mtdpstore_context * cxt,loff_t off)149*4882a593Smuzhiyun static void mtdpstore_block_clear_removed(struct mtdpstore_context *cxt,
150*4882a593Smuzhiyun loff_t off)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun struct mtd_info *mtd = cxt->mtd;
153*4882a593Smuzhiyun u32 zonecnt = mtd->erasesize / cxt->info.kmsg_size;
154*4882a593Smuzhiyun u64 zonenum;
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun off = ALIGN_DOWN(off, mtd->erasesize);
157*4882a593Smuzhiyun zonenum = div_u64(off, cxt->info.kmsg_size);
158*4882a593Smuzhiyun while (zonecnt > 0) {
159*4882a593Smuzhiyun clear_bit(zonenum, cxt->rmmap);
160*4882a593Smuzhiyun zonenum++;
161*4882a593Smuzhiyun zonecnt--;
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun
mtdpstore_block_is_removed(struct mtdpstore_context * cxt,loff_t off)165*4882a593Smuzhiyun static int mtdpstore_block_is_removed(struct mtdpstore_context *cxt,
166*4882a593Smuzhiyun loff_t off)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun struct mtd_info *mtd = cxt->mtd;
169*4882a593Smuzhiyun u32 zonecnt = mtd->erasesize / cxt->info.kmsg_size;
170*4882a593Smuzhiyun u64 zonenum;
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun off = ALIGN_DOWN(off, mtd->erasesize);
173*4882a593Smuzhiyun zonenum = div_u64(off, cxt->info.kmsg_size);
174*4882a593Smuzhiyun while (zonecnt > 0) {
175*4882a593Smuzhiyun if (test_bit(zonenum, cxt->rmmap))
176*4882a593Smuzhiyun return true;
177*4882a593Smuzhiyun zonenum++;
178*4882a593Smuzhiyun zonecnt--;
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun return false;
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun
mtdpstore_erase_do(struct mtdpstore_context * cxt,loff_t off)183*4882a593Smuzhiyun static int mtdpstore_erase_do(struct mtdpstore_context *cxt, loff_t off)
184*4882a593Smuzhiyun {
185*4882a593Smuzhiyun struct mtd_info *mtd = cxt->mtd;
186*4882a593Smuzhiyun struct erase_info erase;
187*4882a593Smuzhiyun int ret;
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun off = ALIGN_DOWN(off, cxt->mtd->erasesize);
190*4882a593Smuzhiyun dev_dbg(&mtd->dev, "try to erase off 0x%llx\n", off);
191*4882a593Smuzhiyun erase.len = cxt->mtd->erasesize;
192*4882a593Smuzhiyun erase.addr = off;
193*4882a593Smuzhiyun ret = mtd_erase(cxt->mtd, &erase);
194*4882a593Smuzhiyun if (!ret)
195*4882a593Smuzhiyun mtdpstore_block_clear_removed(cxt, off);
196*4882a593Smuzhiyun else
197*4882a593Smuzhiyun dev_err(&mtd->dev, "erase of region [0x%llx, 0x%llx] on \"%s\" failed\n",
198*4882a593Smuzhiyun (unsigned long long)erase.addr,
199*4882a593Smuzhiyun (unsigned long long)erase.len, cxt->info.device);
200*4882a593Smuzhiyun return ret;
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun /*
204*4882a593Smuzhiyun * called while removing file
205*4882a593Smuzhiyun *
206*4882a593Smuzhiyun * Avoiding over erasing, do erase block only when the whole block is unused.
207*4882a593Smuzhiyun * If the block contains valid log, do erase lazily on flush_removed() when
208*4882a593Smuzhiyun * unregister.
209*4882a593Smuzhiyun */
mtdpstore_erase(size_t size,loff_t off)210*4882a593Smuzhiyun static ssize_t mtdpstore_erase(size_t size, loff_t off)
211*4882a593Smuzhiyun {
212*4882a593Smuzhiyun struct mtdpstore_context *cxt = &oops_cxt;
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun if (mtdpstore_block_isbad(cxt, off))
215*4882a593Smuzhiyun return -EIO;
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun mtdpstore_mark_unused(cxt, off);
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun /* If the block still has valid data, mtdpstore do erase lazily */
220*4882a593Smuzhiyun if (likely(mtdpstore_block_is_used(cxt, off))) {
221*4882a593Smuzhiyun mtdpstore_mark_removed(cxt, off);
222*4882a593Smuzhiyun return 0;
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun /* all zones are unused, erase it */
226*4882a593Smuzhiyun return mtdpstore_erase_do(cxt, off);
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun /*
230*4882a593Smuzhiyun * What is security for mtdpstore?
231*4882a593Smuzhiyun * As there is no erase for panic case, we should ensure at least one zone
232*4882a593Smuzhiyun * is writable. Otherwise, panic write will fail.
233*4882a593Smuzhiyun * If zone is used, write operation will return -ENOMSG, which means that
234*4882a593Smuzhiyun * pstore/blk will try one by one until gets an empty zone. So, it is not
235*4882a593Smuzhiyun * needed to ensure the next zone is empty, but at least one.
236*4882a593Smuzhiyun */
mtdpstore_security(struct mtdpstore_context * cxt,loff_t off)237*4882a593Smuzhiyun static int mtdpstore_security(struct mtdpstore_context *cxt, loff_t off)
238*4882a593Smuzhiyun {
239*4882a593Smuzhiyun int ret = 0, i;
240*4882a593Smuzhiyun struct mtd_info *mtd = cxt->mtd;
241*4882a593Smuzhiyun u32 zonenum = (u32)div_u64(off, cxt->info.kmsg_size);
242*4882a593Smuzhiyun u32 zonecnt = (u32)div_u64(cxt->mtd->size, cxt->info.kmsg_size);
243*4882a593Smuzhiyun u32 blkcnt = (u32)div_u64(cxt->mtd->size, cxt->mtd->erasesize);
244*4882a593Smuzhiyun u32 erasesize = cxt->mtd->erasesize;
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun for (i = 0; i < zonecnt; i++) {
247*4882a593Smuzhiyun u32 num = (zonenum + i) % zonecnt;
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun /* found empty zone */
250*4882a593Smuzhiyun if (!test_bit(num, cxt->usedmap))
251*4882a593Smuzhiyun return 0;
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun /* If there is no any empty zone, we have no way but to do erase */
255*4882a593Smuzhiyun while (blkcnt--) {
256*4882a593Smuzhiyun div64_u64_rem(off + erasesize, cxt->mtd->size, (u64 *)&off);
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun if (mtdpstore_block_isbad(cxt, off))
259*4882a593Smuzhiyun continue;
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun ret = mtdpstore_erase_do(cxt, off);
262*4882a593Smuzhiyun if (!ret) {
263*4882a593Smuzhiyun mtdpstore_block_mark_unused(cxt, off);
264*4882a593Smuzhiyun break;
265*4882a593Smuzhiyun }
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun if (ret)
269*4882a593Smuzhiyun dev_err(&mtd->dev, "all blocks bad!\n");
270*4882a593Smuzhiyun dev_dbg(&mtd->dev, "end security\n");
271*4882a593Smuzhiyun return ret;
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun
mtdpstore_write(const char * buf,size_t size,loff_t off)274*4882a593Smuzhiyun static ssize_t mtdpstore_write(const char *buf, size_t size, loff_t off)
275*4882a593Smuzhiyun {
276*4882a593Smuzhiyun struct mtdpstore_context *cxt = &oops_cxt;
277*4882a593Smuzhiyun struct mtd_info *mtd = cxt->mtd;
278*4882a593Smuzhiyun size_t retlen;
279*4882a593Smuzhiyun int ret;
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun if (mtdpstore_block_isbad(cxt, off))
282*4882a593Smuzhiyun return -ENOMSG;
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun /* zone is used, please try next one */
285*4882a593Smuzhiyun if (mtdpstore_is_used(cxt, off))
286*4882a593Smuzhiyun return -ENOMSG;
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun dev_dbg(&mtd->dev, "try to write off 0x%llx size %zu\n", off, size);
289*4882a593Smuzhiyun ret = mtd_write(cxt->mtd, off, size, &retlen, (u_char *)buf);
290*4882a593Smuzhiyun if (ret < 0 || retlen != size) {
291*4882a593Smuzhiyun dev_err(&mtd->dev, "write failure at %lld (%zu of %zu written), err %d\n",
292*4882a593Smuzhiyun off, retlen, size, ret);
293*4882a593Smuzhiyun return -EIO;
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun mtdpstore_mark_used(cxt, off);
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun mtdpstore_security(cxt, off);
298*4882a593Smuzhiyun return retlen;
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun
mtdpstore_is_io_error(int ret)301*4882a593Smuzhiyun static inline bool mtdpstore_is_io_error(int ret)
302*4882a593Smuzhiyun {
303*4882a593Smuzhiyun return ret < 0 && !mtd_is_bitflip(ret) && !mtd_is_eccerr(ret);
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun /*
307*4882a593Smuzhiyun * All zones will be read as pstore/blk will read zone one by one when do
308*4882a593Smuzhiyun * recover.
309*4882a593Smuzhiyun */
mtdpstore_read(char * buf,size_t size,loff_t off)310*4882a593Smuzhiyun static ssize_t mtdpstore_read(char *buf, size_t size, loff_t off)
311*4882a593Smuzhiyun {
312*4882a593Smuzhiyun struct mtdpstore_context *cxt = &oops_cxt;
313*4882a593Smuzhiyun struct mtd_info *mtd = cxt->mtd;
314*4882a593Smuzhiyun size_t retlen, done;
315*4882a593Smuzhiyun int ret;
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun if (mtdpstore_block_isbad(cxt, off))
318*4882a593Smuzhiyun return -ENOMSG;
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun dev_dbg(&mtd->dev, "try to read off 0x%llx size %zu\n", off, size);
321*4882a593Smuzhiyun for (done = 0, retlen = 0; done < size; done += retlen) {
322*4882a593Smuzhiyun retlen = 0;
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun ret = mtd_read(cxt->mtd, off + done, size - done, &retlen,
325*4882a593Smuzhiyun (u_char *)buf + done);
326*4882a593Smuzhiyun if (mtdpstore_is_io_error(ret)) {
327*4882a593Smuzhiyun dev_err(&mtd->dev, "read failure at %lld (%zu of %zu read), err %d\n",
328*4882a593Smuzhiyun off + done, retlen, size - done, ret);
329*4882a593Smuzhiyun /* the zone may be broken, try next one */
330*4882a593Smuzhiyun return -ENOMSG;
331*4882a593Smuzhiyun }
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun /*
334*4882a593Smuzhiyun * ECC error. The impact on log data is so small. Maybe we can
335*4882a593Smuzhiyun * still read it and try to understand. So mtdpstore just hands
336*4882a593Smuzhiyun * over what it gets and user can judge whether the data is
337*4882a593Smuzhiyun * valid or not.
338*4882a593Smuzhiyun */
339*4882a593Smuzhiyun if (mtd_is_eccerr(ret)) {
340*4882a593Smuzhiyun dev_err(&mtd->dev, "ecc error at %lld (%zu of %zu read), err %d\n",
341*4882a593Smuzhiyun off + done, retlen, size - done, ret);
342*4882a593Smuzhiyun /* driver may not set retlen when ecc error */
343*4882a593Smuzhiyun retlen = retlen == 0 ? size - done : retlen;
344*4882a593Smuzhiyun }
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun if (mtdpstore_is_empty(cxt, buf, size))
348*4882a593Smuzhiyun mtdpstore_mark_unused(cxt, off);
349*4882a593Smuzhiyun else
350*4882a593Smuzhiyun mtdpstore_mark_used(cxt, off);
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun mtdpstore_security(cxt, off);
353*4882a593Smuzhiyun return retlen;
354*4882a593Smuzhiyun }
355*4882a593Smuzhiyun
mtdpstore_panic_write(const char * buf,size_t size,loff_t off)356*4882a593Smuzhiyun static ssize_t mtdpstore_panic_write(const char *buf, size_t size, loff_t off)
357*4882a593Smuzhiyun {
358*4882a593Smuzhiyun struct mtdpstore_context *cxt = &oops_cxt;
359*4882a593Smuzhiyun struct mtd_info *mtd = cxt->mtd;
360*4882a593Smuzhiyun size_t retlen;
361*4882a593Smuzhiyun int ret;
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun if (mtdpstore_panic_block_isbad(cxt, off))
364*4882a593Smuzhiyun return -ENOMSG;
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun /* zone is used, please try next one */
367*4882a593Smuzhiyun if (mtdpstore_is_used(cxt, off))
368*4882a593Smuzhiyun return -ENOMSG;
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun ret = mtd_panic_write(cxt->mtd, off, size, &retlen, (u_char *)buf);
371*4882a593Smuzhiyun if (ret < 0 || size != retlen) {
372*4882a593Smuzhiyun dev_err(&mtd->dev, "panic write failure at %lld (%zu of %zu read), err %d\n",
373*4882a593Smuzhiyun off, retlen, size, ret);
374*4882a593Smuzhiyun return -EIO;
375*4882a593Smuzhiyun }
376*4882a593Smuzhiyun mtdpstore_mark_used(cxt, off);
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun return retlen;
379*4882a593Smuzhiyun }
380*4882a593Smuzhiyun
mtdpstore_notify_add(struct mtd_info * mtd)381*4882a593Smuzhiyun static void mtdpstore_notify_add(struct mtd_info *mtd)
382*4882a593Smuzhiyun {
383*4882a593Smuzhiyun int ret;
384*4882a593Smuzhiyun struct mtdpstore_context *cxt = &oops_cxt;
385*4882a593Smuzhiyun struct pstore_blk_config *info = &cxt->info;
386*4882a593Smuzhiyun unsigned long longcnt;
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun if (!strcmp(mtd->name, info->device))
389*4882a593Smuzhiyun cxt->index = mtd->index;
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun if (mtd->index != cxt->index || cxt->index < 0)
392*4882a593Smuzhiyun return;
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun dev_dbg(&mtd->dev, "found matching MTD device %s\n", mtd->name);
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun if (mtd->size < info->kmsg_size * 2) {
397*4882a593Smuzhiyun dev_err(&mtd->dev, "MTD partition %d not big enough\n",
398*4882a593Smuzhiyun mtd->index);
399*4882a593Smuzhiyun return;
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun /*
402*4882a593Smuzhiyun * kmsg_size must be aligned to 4096 Bytes, which is limited by
403*4882a593Smuzhiyun * psblk. The default value of kmsg_size is 64KB. If kmsg_size
404*4882a593Smuzhiyun * is larger than erasesize, some errors will occur since mtdpsotre
405*4882a593Smuzhiyun * is designed on it.
406*4882a593Smuzhiyun */
407*4882a593Smuzhiyun if (mtd->erasesize < info->kmsg_size) {
408*4882a593Smuzhiyun dev_err(&mtd->dev, "eraseblock size of MTD partition %d too small\n",
409*4882a593Smuzhiyun mtd->index);
410*4882a593Smuzhiyun return;
411*4882a593Smuzhiyun }
412*4882a593Smuzhiyun if (unlikely(info->kmsg_size % mtd->writesize)) {
413*4882a593Smuzhiyun dev_err(&mtd->dev, "record size %lu KB must align to write size %d KB\n",
414*4882a593Smuzhiyun info->kmsg_size / 1024,
415*4882a593Smuzhiyun mtd->writesize / 1024);
416*4882a593Smuzhiyun return;
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun longcnt = BITS_TO_LONGS(div_u64(mtd->size, info->kmsg_size));
420*4882a593Smuzhiyun cxt->rmmap = kcalloc(longcnt, sizeof(long), GFP_KERNEL);
421*4882a593Smuzhiyun cxt->usedmap = kcalloc(longcnt, sizeof(long), GFP_KERNEL);
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun longcnt = BITS_TO_LONGS(div_u64(mtd->size, mtd->erasesize));
424*4882a593Smuzhiyun cxt->badmap = kcalloc(longcnt, sizeof(long), GFP_KERNEL);
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun cxt->dev.total_size = mtd->size;
427*4882a593Smuzhiyun /* just support dmesg right now */
428*4882a593Smuzhiyun cxt->dev.flags = PSTORE_FLAGS_DMESG;
429*4882a593Smuzhiyun cxt->dev.read = mtdpstore_read;
430*4882a593Smuzhiyun cxt->dev.write = mtdpstore_write;
431*4882a593Smuzhiyun cxt->dev.erase = mtdpstore_erase;
432*4882a593Smuzhiyun cxt->dev.panic_write = mtdpstore_panic_write;
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun ret = register_pstore_device(&cxt->dev);
435*4882a593Smuzhiyun if (ret) {
436*4882a593Smuzhiyun dev_err(&mtd->dev, "mtd%d register to psblk failed\n",
437*4882a593Smuzhiyun mtd->index);
438*4882a593Smuzhiyun return;
439*4882a593Smuzhiyun }
440*4882a593Smuzhiyun cxt->mtd = mtd;
441*4882a593Smuzhiyun dev_info(&mtd->dev, "Attached to MTD device %d\n", mtd->index);
442*4882a593Smuzhiyun }
443*4882a593Smuzhiyun
mtdpstore_flush_removed_do(struct mtdpstore_context * cxt,loff_t off,size_t size)444*4882a593Smuzhiyun static int mtdpstore_flush_removed_do(struct mtdpstore_context *cxt,
445*4882a593Smuzhiyun loff_t off, size_t size)
446*4882a593Smuzhiyun {
447*4882a593Smuzhiyun struct mtd_info *mtd = cxt->mtd;
448*4882a593Smuzhiyun u_char *buf;
449*4882a593Smuzhiyun int ret;
450*4882a593Smuzhiyun size_t retlen;
451*4882a593Smuzhiyun struct erase_info erase;
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun buf = kmalloc(mtd->erasesize, GFP_KERNEL);
454*4882a593Smuzhiyun if (!buf)
455*4882a593Smuzhiyun return -ENOMEM;
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun /* 1st. read to cache */
458*4882a593Smuzhiyun ret = mtd_read(mtd, off, mtd->erasesize, &retlen, buf);
459*4882a593Smuzhiyun if (mtdpstore_is_io_error(ret))
460*4882a593Smuzhiyun goto free;
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun /* 2nd. erase block */
463*4882a593Smuzhiyun erase.len = mtd->erasesize;
464*4882a593Smuzhiyun erase.addr = off;
465*4882a593Smuzhiyun ret = mtd_erase(mtd, &erase);
466*4882a593Smuzhiyun if (ret)
467*4882a593Smuzhiyun goto free;
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun /* 3rd. write back */
470*4882a593Smuzhiyun while (size) {
471*4882a593Smuzhiyun unsigned int zonesize = cxt->info.kmsg_size;
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun /* there is valid data on block, write back */
474*4882a593Smuzhiyun if (mtdpstore_is_used(cxt, off)) {
475*4882a593Smuzhiyun ret = mtd_write(mtd, off, zonesize, &retlen, buf);
476*4882a593Smuzhiyun if (ret)
477*4882a593Smuzhiyun dev_err(&mtd->dev, "write failure at %lld (%zu of %u written), err %d\n",
478*4882a593Smuzhiyun off, retlen, zonesize, ret);
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun off += zonesize;
482*4882a593Smuzhiyun size -= min_t(unsigned int, zonesize, size);
483*4882a593Smuzhiyun }
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun free:
486*4882a593Smuzhiyun kfree(buf);
487*4882a593Smuzhiyun return ret;
488*4882a593Smuzhiyun }
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun /*
491*4882a593Smuzhiyun * What does mtdpstore_flush_removed() do?
492*4882a593Smuzhiyun * When user remove any log file on pstore filesystem, mtdpstore should do
493*4882a593Smuzhiyun * something to ensure log file removed. If the whole block is no longer used,
494*4882a593Smuzhiyun * it's nice to erase the block. However if the block still contains valid log,
495*4882a593Smuzhiyun * what mtdpstore can do is to erase and write the valid log back.
496*4882a593Smuzhiyun */
mtdpstore_flush_removed(struct mtdpstore_context * cxt)497*4882a593Smuzhiyun static int mtdpstore_flush_removed(struct mtdpstore_context *cxt)
498*4882a593Smuzhiyun {
499*4882a593Smuzhiyun struct mtd_info *mtd = cxt->mtd;
500*4882a593Smuzhiyun int ret;
501*4882a593Smuzhiyun loff_t off;
502*4882a593Smuzhiyun u32 blkcnt = (u32)div_u64(mtd->size, mtd->erasesize);
503*4882a593Smuzhiyun
504*4882a593Smuzhiyun for (off = 0; blkcnt > 0; blkcnt--, off += mtd->erasesize) {
505*4882a593Smuzhiyun ret = mtdpstore_block_isbad(cxt, off);
506*4882a593Smuzhiyun if (ret)
507*4882a593Smuzhiyun continue;
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun ret = mtdpstore_block_is_removed(cxt, off);
510*4882a593Smuzhiyun if (!ret)
511*4882a593Smuzhiyun continue;
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun ret = mtdpstore_flush_removed_do(cxt, off, mtd->erasesize);
514*4882a593Smuzhiyun if (ret)
515*4882a593Smuzhiyun return ret;
516*4882a593Smuzhiyun }
517*4882a593Smuzhiyun return 0;
518*4882a593Smuzhiyun }
519*4882a593Smuzhiyun
mtdpstore_notify_remove(struct mtd_info * mtd)520*4882a593Smuzhiyun static void mtdpstore_notify_remove(struct mtd_info *mtd)
521*4882a593Smuzhiyun {
522*4882a593Smuzhiyun struct mtdpstore_context *cxt = &oops_cxt;
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun if (mtd->index != cxt->index || cxt->index < 0)
525*4882a593Smuzhiyun return;
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun mtdpstore_flush_removed(cxt);
528*4882a593Smuzhiyun
529*4882a593Smuzhiyun unregister_pstore_device(&cxt->dev);
530*4882a593Smuzhiyun kfree(cxt->badmap);
531*4882a593Smuzhiyun kfree(cxt->usedmap);
532*4882a593Smuzhiyun kfree(cxt->rmmap);
533*4882a593Smuzhiyun cxt->mtd = NULL;
534*4882a593Smuzhiyun cxt->index = -1;
535*4882a593Smuzhiyun }
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun static struct mtd_notifier mtdpstore_notifier = {
538*4882a593Smuzhiyun .add = mtdpstore_notify_add,
539*4882a593Smuzhiyun .remove = mtdpstore_notify_remove,
540*4882a593Smuzhiyun };
541*4882a593Smuzhiyun
mtdpstore_init(void)542*4882a593Smuzhiyun static int __init mtdpstore_init(void)
543*4882a593Smuzhiyun {
544*4882a593Smuzhiyun int ret;
545*4882a593Smuzhiyun struct mtdpstore_context *cxt = &oops_cxt;
546*4882a593Smuzhiyun struct pstore_blk_config *info = &cxt->info;
547*4882a593Smuzhiyun
548*4882a593Smuzhiyun ret = pstore_blk_get_config(info);
549*4882a593Smuzhiyun if (unlikely(ret))
550*4882a593Smuzhiyun return ret;
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun if (strlen(info->device) == 0) {
553*4882a593Smuzhiyun pr_err("mtd device must be supplied (device name is empty)\n");
554*4882a593Smuzhiyun return -EINVAL;
555*4882a593Smuzhiyun }
556*4882a593Smuzhiyun if (!info->kmsg_size) {
557*4882a593Smuzhiyun pr_err("no backend enabled (kmsg_size is 0)\n");
558*4882a593Smuzhiyun return -EINVAL;
559*4882a593Smuzhiyun }
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun /* Setup the MTD device to use */
562*4882a593Smuzhiyun ret = kstrtoint((char *)info->device, 0, &cxt->index);
563*4882a593Smuzhiyun if (ret)
564*4882a593Smuzhiyun cxt->index = -1;
565*4882a593Smuzhiyun
566*4882a593Smuzhiyun register_mtd_user(&mtdpstore_notifier);
567*4882a593Smuzhiyun return 0;
568*4882a593Smuzhiyun }
569*4882a593Smuzhiyun module_init(mtdpstore_init);
570*4882a593Smuzhiyun
mtdpstore_exit(void)571*4882a593Smuzhiyun static void __exit mtdpstore_exit(void)
572*4882a593Smuzhiyun {
573*4882a593Smuzhiyun unregister_mtd_user(&mtdpstore_notifier);
574*4882a593Smuzhiyun }
575*4882a593Smuzhiyun module_exit(mtdpstore_exit);
576*4882a593Smuzhiyun
577*4882a593Smuzhiyun MODULE_LICENSE("GPL");
578*4882a593Smuzhiyun MODULE_AUTHOR("WeiXiong Liao <liaoweixiong@allwinnertech.com>");
579*4882a593Smuzhiyun MODULE_DESCRIPTION("MTD backend for pstore/blk");
580