1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Swap block device support for MTDs
4*4882a593Smuzhiyun * Turns an MTD device into a swap device with block wear leveling
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Copyright © 2007,2011 Nokia Corporation. All rights reserved.
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * Authors: Jarkko Lavinen <jarkko.lavinen@nokia.com>
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * Based on Richard Purdie's earlier implementation in 2007. Background
11*4882a593Smuzhiyun * support and lock-less operation written by Adrian Hunter.
12*4882a593Smuzhiyun */
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun #include <linux/kernel.h>
15*4882a593Smuzhiyun #include <linux/module.h>
16*4882a593Smuzhiyun #include <linux/mtd/mtd.h>
17*4882a593Smuzhiyun #include <linux/mtd/blktrans.h>
18*4882a593Smuzhiyun #include <linux/rbtree.h>
19*4882a593Smuzhiyun #include <linux/sched.h>
20*4882a593Smuzhiyun #include <linux/slab.h>
21*4882a593Smuzhiyun #include <linux/vmalloc.h>
22*4882a593Smuzhiyun #include <linux/genhd.h>
23*4882a593Smuzhiyun #include <linux/swap.h>
24*4882a593Smuzhiyun #include <linux/debugfs.h>
25*4882a593Smuzhiyun #include <linux/seq_file.h>
26*4882a593Smuzhiyun #include <linux/device.h>
27*4882a593Smuzhiyun #include <linux/math64.h>
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun #define MTDSWAP_PREFIX "mtdswap"
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun /*
32*4882a593Smuzhiyun * The number of free eraseblocks when GC should stop
33*4882a593Smuzhiyun */
34*4882a593Smuzhiyun #define CLEAN_BLOCK_THRESHOLD 20
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun /*
37*4882a593Smuzhiyun * Number of free eraseblocks below which GC can also collect low frag
38*4882a593Smuzhiyun * blocks.
39*4882a593Smuzhiyun */
40*4882a593Smuzhiyun #define LOW_FRAG_GC_THRESHOLD 5
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun /*
43*4882a593Smuzhiyun * Wear level cost amortization. We want to do wear leveling on the background
44*4882a593Smuzhiyun * without disturbing gc too much. This is made by defining max GC frequency.
45*4882a593Smuzhiyun * Frequency value 6 means 1/6 of the GC passes will pick an erase block based
46*4882a593Smuzhiyun * on the biggest wear difference rather than the biggest dirtiness.
47*4882a593Smuzhiyun *
48*4882a593Smuzhiyun * The lower freq2 should be chosen so that it makes sure the maximum erase
49*4882a593Smuzhiyun * difference will decrease even if a malicious application is deliberately
50*4882a593Smuzhiyun * trying to make erase differences large.
51*4882a593Smuzhiyun */
52*4882a593Smuzhiyun #define MAX_ERASE_DIFF 4000
53*4882a593Smuzhiyun #define COLLECT_NONDIRTY_BASE MAX_ERASE_DIFF
54*4882a593Smuzhiyun #define COLLECT_NONDIRTY_FREQ1 6
55*4882a593Smuzhiyun #define COLLECT_NONDIRTY_FREQ2 4
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun #define PAGE_UNDEF UINT_MAX
58*4882a593Smuzhiyun #define BLOCK_UNDEF UINT_MAX
59*4882a593Smuzhiyun #define BLOCK_ERROR (UINT_MAX - 1)
60*4882a593Smuzhiyun #define BLOCK_MAX (UINT_MAX - 2)
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun #define EBLOCK_BAD (1 << 0)
63*4882a593Smuzhiyun #define EBLOCK_NOMAGIC (1 << 1)
64*4882a593Smuzhiyun #define EBLOCK_BITFLIP (1 << 2)
65*4882a593Smuzhiyun #define EBLOCK_FAILED (1 << 3)
66*4882a593Smuzhiyun #define EBLOCK_READERR (1 << 4)
67*4882a593Smuzhiyun #define EBLOCK_IDX_SHIFT 5
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun struct swap_eb {
70*4882a593Smuzhiyun struct rb_node rb;
71*4882a593Smuzhiyun struct rb_root *root;
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun unsigned int flags;
74*4882a593Smuzhiyun unsigned int active_count;
75*4882a593Smuzhiyun unsigned int erase_count;
76*4882a593Smuzhiyun unsigned int pad; /* speeds up pointer decrement */
77*4882a593Smuzhiyun };
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun #define MTDSWAP_ECNT_MIN(rbroot) (rb_entry(rb_first(rbroot), struct swap_eb, \
80*4882a593Smuzhiyun rb)->erase_count)
81*4882a593Smuzhiyun #define MTDSWAP_ECNT_MAX(rbroot) (rb_entry(rb_last(rbroot), struct swap_eb, \
82*4882a593Smuzhiyun rb)->erase_count)
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun struct mtdswap_tree {
85*4882a593Smuzhiyun struct rb_root root;
86*4882a593Smuzhiyun unsigned int count;
87*4882a593Smuzhiyun };
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun enum {
90*4882a593Smuzhiyun MTDSWAP_CLEAN,
91*4882a593Smuzhiyun MTDSWAP_USED,
92*4882a593Smuzhiyun MTDSWAP_LOWFRAG,
93*4882a593Smuzhiyun MTDSWAP_HIFRAG,
94*4882a593Smuzhiyun MTDSWAP_DIRTY,
95*4882a593Smuzhiyun MTDSWAP_BITFLIP,
96*4882a593Smuzhiyun MTDSWAP_FAILING,
97*4882a593Smuzhiyun MTDSWAP_TREE_CNT,
98*4882a593Smuzhiyun };
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun struct mtdswap_dev {
101*4882a593Smuzhiyun struct mtd_blktrans_dev *mbd_dev;
102*4882a593Smuzhiyun struct mtd_info *mtd;
103*4882a593Smuzhiyun struct device *dev;
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun unsigned int *page_data;
106*4882a593Smuzhiyun unsigned int *revmap;
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun unsigned int eblks;
109*4882a593Smuzhiyun unsigned int spare_eblks;
110*4882a593Smuzhiyun unsigned int pages_per_eblk;
111*4882a593Smuzhiyun unsigned int max_erase_count;
112*4882a593Smuzhiyun struct swap_eb *eb_data;
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun struct mtdswap_tree trees[MTDSWAP_TREE_CNT];
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun unsigned long long sect_read_count;
117*4882a593Smuzhiyun unsigned long long sect_write_count;
118*4882a593Smuzhiyun unsigned long long mtd_write_count;
119*4882a593Smuzhiyun unsigned long long mtd_read_count;
120*4882a593Smuzhiyun unsigned long long discard_count;
121*4882a593Smuzhiyun unsigned long long discard_page_count;
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun unsigned int curr_write_pos;
124*4882a593Smuzhiyun struct swap_eb *curr_write;
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun char *page_buf;
127*4882a593Smuzhiyun char *oob_buf;
128*4882a593Smuzhiyun };
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun struct mtdswap_oobdata {
131*4882a593Smuzhiyun __le16 magic;
132*4882a593Smuzhiyun __le32 count;
133*4882a593Smuzhiyun } __packed;
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun #define MTDSWAP_MAGIC_CLEAN 0x2095
136*4882a593Smuzhiyun #define MTDSWAP_MAGIC_DIRTY (MTDSWAP_MAGIC_CLEAN + 1)
137*4882a593Smuzhiyun #define MTDSWAP_TYPE_CLEAN 0
138*4882a593Smuzhiyun #define MTDSWAP_TYPE_DIRTY 1
139*4882a593Smuzhiyun #define MTDSWAP_OOBSIZE sizeof(struct mtdswap_oobdata)
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun #define MTDSWAP_ERASE_RETRIES 3 /* Before marking erase block bad */
142*4882a593Smuzhiyun #define MTDSWAP_IO_RETRIES 3
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun enum {
145*4882a593Smuzhiyun MTDSWAP_SCANNED_CLEAN,
146*4882a593Smuzhiyun MTDSWAP_SCANNED_DIRTY,
147*4882a593Smuzhiyun MTDSWAP_SCANNED_BITFLIP,
148*4882a593Smuzhiyun MTDSWAP_SCANNED_BAD,
149*4882a593Smuzhiyun };
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun /*
152*4882a593Smuzhiyun * In the worst case mtdswap_writesect() has allocated the last clean
153*4882a593Smuzhiyun * page from the current block and is then pre-empted by the GC
154*4882a593Smuzhiyun * thread. The thread can consume a full erase block when moving a
155*4882a593Smuzhiyun * block.
156*4882a593Smuzhiyun */
157*4882a593Smuzhiyun #define MIN_SPARE_EBLOCKS 2
158*4882a593Smuzhiyun #define MIN_ERASE_BLOCKS (MIN_SPARE_EBLOCKS + 1)
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun #define TREE_ROOT(d, name) (&d->trees[MTDSWAP_ ## name].root)
161*4882a593Smuzhiyun #define TREE_EMPTY(d, name) (TREE_ROOT(d, name)->rb_node == NULL)
162*4882a593Smuzhiyun #define TREE_NONEMPTY(d, name) (!TREE_EMPTY(d, name))
163*4882a593Smuzhiyun #define TREE_COUNT(d, name) (d->trees[MTDSWAP_ ## name].count)
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun #define MTDSWAP_MBD_TO_MTDSWAP(dev) ((struct mtdswap_dev *)dev->priv)
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun static char partitions[128] = "";
168*4882a593Smuzhiyun module_param_string(partitions, partitions, sizeof(partitions), 0444);
169*4882a593Smuzhiyun MODULE_PARM_DESC(partitions, "MTD partition numbers to use as swap "
170*4882a593Smuzhiyun "partitions=\"1,3,5\"");
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun static unsigned int spare_eblocks = 10;
173*4882a593Smuzhiyun module_param(spare_eblocks, uint, 0444);
174*4882a593Smuzhiyun MODULE_PARM_DESC(spare_eblocks, "Percentage of spare erase blocks for "
175*4882a593Smuzhiyun "garbage collection (default 10%)");
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun static bool header; /* false */
178*4882a593Smuzhiyun module_param(header, bool, 0444);
179*4882a593Smuzhiyun MODULE_PARM_DESC(header,
180*4882a593Smuzhiyun "Include builtin swap header (default 0, without header)");
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun static int mtdswap_gc(struct mtdswap_dev *d, unsigned int background);
183*4882a593Smuzhiyun
mtdswap_eb_offset(struct mtdswap_dev * d,struct swap_eb * eb)184*4882a593Smuzhiyun static loff_t mtdswap_eb_offset(struct mtdswap_dev *d, struct swap_eb *eb)
185*4882a593Smuzhiyun {
186*4882a593Smuzhiyun return (loff_t)(eb - d->eb_data) * d->mtd->erasesize;
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun
mtdswap_eb_detach(struct mtdswap_dev * d,struct swap_eb * eb)189*4882a593Smuzhiyun static void mtdswap_eb_detach(struct mtdswap_dev *d, struct swap_eb *eb)
190*4882a593Smuzhiyun {
191*4882a593Smuzhiyun unsigned int oldidx;
192*4882a593Smuzhiyun struct mtdswap_tree *tp;
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun if (eb->root) {
195*4882a593Smuzhiyun tp = container_of(eb->root, struct mtdswap_tree, root);
196*4882a593Smuzhiyun oldidx = tp - &d->trees[0];
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun d->trees[oldidx].count--;
199*4882a593Smuzhiyun rb_erase(&eb->rb, eb->root);
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun
__mtdswap_rb_add(struct rb_root * root,struct swap_eb * eb)203*4882a593Smuzhiyun static void __mtdswap_rb_add(struct rb_root *root, struct swap_eb *eb)
204*4882a593Smuzhiyun {
205*4882a593Smuzhiyun struct rb_node **p, *parent = NULL;
206*4882a593Smuzhiyun struct swap_eb *cur;
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun p = &root->rb_node;
209*4882a593Smuzhiyun while (*p) {
210*4882a593Smuzhiyun parent = *p;
211*4882a593Smuzhiyun cur = rb_entry(parent, struct swap_eb, rb);
212*4882a593Smuzhiyun if (eb->erase_count > cur->erase_count)
213*4882a593Smuzhiyun p = &(*p)->rb_right;
214*4882a593Smuzhiyun else
215*4882a593Smuzhiyun p = &(*p)->rb_left;
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun rb_link_node(&eb->rb, parent, p);
219*4882a593Smuzhiyun rb_insert_color(&eb->rb, root);
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun
mtdswap_rb_add(struct mtdswap_dev * d,struct swap_eb * eb,int idx)222*4882a593Smuzhiyun static void mtdswap_rb_add(struct mtdswap_dev *d, struct swap_eb *eb, int idx)
223*4882a593Smuzhiyun {
224*4882a593Smuzhiyun struct rb_root *root;
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun if (eb->root == &d->trees[idx].root)
227*4882a593Smuzhiyun return;
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun mtdswap_eb_detach(d, eb);
230*4882a593Smuzhiyun root = &d->trees[idx].root;
231*4882a593Smuzhiyun __mtdswap_rb_add(root, eb);
232*4882a593Smuzhiyun eb->root = root;
233*4882a593Smuzhiyun d->trees[idx].count++;
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun
mtdswap_rb_index(struct rb_root * root,unsigned int idx)236*4882a593Smuzhiyun static struct rb_node *mtdswap_rb_index(struct rb_root *root, unsigned int idx)
237*4882a593Smuzhiyun {
238*4882a593Smuzhiyun struct rb_node *p;
239*4882a593Smuzhiyun unsigned int i;
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun p = rb_first(root);
242*4882a593Smuzhiyun i = 0;
243*4882a593Smuzhiyun while (i < idx && p) {
244*4882a593Smuzhiyun p = rb_next(p);
245*4882a593Smuzhiyun i++;
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun return p;
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun
mtdswap_handle_badblock(struct mtdswap_dev * d,struct swap_eb * eb)251*4882a593Smuzhiyun static int mtdswap_handle_badblock(struct mtdswap_dev *d, struct swap_eb *eb)
252*4882a593Smuzhiyun {
253*4882a593Smuzhiyun int ret;
254*4882a593Smuzhiyun loff_t offset;
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun d->spare_eblks--;
257*4882a593Smuzhiyun eb->flags |= EBLOCK_BAD;
258*4882a593Smuzhiyun mtdswap_eb_detach(d, eb);
259*4882a593Smuzhiyun eb->root = NULL;
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun /* badblocks not supported */
262*4882a593Smuzhiyun if (!mtd_can_have_bb(d->mtd))
263*4882a593Smuzhiyun return 1;
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun offset = mtdswap_eb_offset(d, eb);
266*4882a593Smuzhiyun dev_warn(d->dev, "Marking bad block at %08llx\n", offset);
267*4882a593Smuzhiyun ret = mtd_block_markbad(d->mtd, offset);
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun if (ret) {
270*4882a593Smuzhiyun dev_warn(d->dev, "Mark block bad failed for block at %08llx "
271*4882a593Smuzhiyun "error %d\n", offset, ret);
272*4882a593Smuzhiyun return ret;
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun return 1;
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun
mtdswap_handle_write_error(struct mtdswap_dev * d,struct swap_eb * eb)279*4882a593Smuzhiyun static int mtdswap_handle_write_error(struct mtdswap_dev *d, struct swap_eb *eb)
280*4882a593Smuzhiyun {
281*4882a593Smuzhiyun unsigned int marked = eb->flags & EBLOCK_FAILED;
282*4882a593Smuzhiyun struct swap_eb *curr_write = d->curr_write;
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun eb->flags |= EBLOCK_FAILED;
285*4882a593Smuzhiyun if (curr_write == eb) {
286*4882a593Smuzhiyun d->curr_write = NULL;
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun if (!marked && d->curr_write_pos != 0) {
289*4882a593Smuzhiyun mtdswap_rb_add(d, eb, MTDSWAP_FAILING);
290*4882a593Smuzhiyun return 0;
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun return mtdswap_handle_badblock(d, eb);
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun
mtdswap_read_oob(struct mtdswap_dev * d,loff_t from,struct mtd_oob_ops * ops)297*4882a593Smuzhiyun static int mtdswap_read_oob(struct mtdswap_dev *d, loff_t from,
298*4882a593Smuzhiyun struct mtd_oob_ops *ops)
299*4882a593Smuzhiyun {
300*4882a593Smuzhiyun int ret = mtd_read_oob(d->mtd, from, ops);
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun if (mtd_is_bitflip(ret))
303*4882a593Smuzhiyun return ret;
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun if (ret) {
306*4882a593Smuzhiyun dev_warn(d->dev, "Read OOB failed %d for block at %08llx\n",
307*4882a593Smuzhiyun ret, from);
308*4882a593Smuzhiyun return ret;
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun if (ops->oobretlen < ops->ooblen) {
312*4882a593Smuzhiyun dev_warn(d->dev, "Read OOB return short read (%zd bytes not "
313*4882a593Smuzhiyun "%zd) for block at %08llx\n",
314*4882a593Smuzhiyun ops->oobretlen, ops->ooblen, from);
315*4882a593Smuzhiyun return -EIO;
316*4882a593Smuzhiyun }
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun return 0;
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun
mtdswap_read_markers(struct mtdswap_dev * d,struct swap_eb * eb)321*4882a593Smuzhiyun static int mtdswap_read_markers(struct mtdswap_dev *d, struct swap_eb *eb)
322*4882a593Smuzhiyun {
323*4882a593Smuzhiyun struct mtdswap_oobdata *data, *data2;
324*4882a593Smuzhiyun int ret;
325*4882a593Smuzhiyun loff_t offset;
326*4882a593Smuzhiyun struct mtd_oob_ops ops;
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun offset = mtdswap_eb_offset(d, eb);
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun /* Check first if the block is bad. */
331*4882a593Smuzhiyun if (mtd_can_have_bb(d->mtd) && mtd_block_isbad(d->mtd, offset))
332*4882a593Smuzhiyun return MTDSWAP_SCANNED_BAD;
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun ops.ooblen = 2 * d->mtd->oobavail;
335*4882a593Smuzhiyun ops.oobbuf = d->oob_buf;
336*4882a593Smuzhiyun ops.ooboffs = 0;
337*4882a593Smuzhiyun ops.datbuf = NULL;
338*4882a593Smuzhiyun ops.mode = MTD_OPS_AUTO_OOB;
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun ret = mtdswap_read_oob(d, offset, &ops);
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun if (ret && !mtd_is_bitflip(ret))
343*4882a593Smuzhiyun return ret;
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun data = (struct mtdswap_oobdata *)d->oob_buf;
346*4882a593Smuzhiyun data2 = (struct mtdswap_oobdata *)
347*4882a593Smuzhiyun (d->oob_buf + d->mtd->oobavail);
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun if (le16_to_cpu(data->magic) == MTDSWAP_MAGIC_CLEAN) {
350*4882a593Smuzhiyun eb->erase_count = le32_to_cpu(data->count);
351*4882a593Smuzhiyun if (mtd_is_bitflip(ret))
352*4882a593Smuzhiyun ret = MTDSWAP_SCANNED_BITFLIP;
353*4882a593Smuzhiyun else {
354*4882a593Smuzhiyun if (le16_to_cpu(data2->magic) == MTDSWAP_MAGIC_DIRTY)
355*4882a593Smuzhiyun ret = MTDSWAP_SCANNED_DIRTY;
356*4882a593Smuzhiyun else
357*4882a593Smuzhiyun ret = MTDSWAP_SCANNED_CLEAN;
358*4882a593Smuzhiyun }
359*4882a593Smuzhiyun } else {
360*4882a593Smuzhiyun eb->flags |= EBLOCK_NOMAGIC;
361*4882a593Smuzhiyun ret = MTDSWAP_SCANNED_DIRTY;
362*4882a593Smuzhiyun }
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun return ret;
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun
mtdswap_write_marker(struct mtdswap_dev * d,struct swap_eb * eb,u16 marker)367*4882a593Smuzhiyun static int mtdswap_write_marker(struct mtdswap_dev *d, struct swap_eb *eb,
368*4882a593Smuzhiyun u16 marker)
369*4882a593Smuzhiyun {
370*4882a593Smuzhiyun struct mtdswap_oobdata n;
371*4882a593Smuzhiyun int ret;
372*4882a593Smuzhiyun loff_t offset;
373*4882a593Smuzhiyun struct mtd_oob_ops ops;
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun ops.ooboffs = 0;
376*4882a593Smuzhiyun ops.oobbuf = (uint8_t *)&n;
377*4882a593Smuzhiyun ops.mode = MTD_OPS_AUTO_OOB;
378*4882a593Smuzhiyun ops.datbuf = NULL;
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun if (marker == MTDSWAP_TYPE_CLEAN) {
381*4882a593Smuzhiyun n.magic = cpu_to_le16(MTDSWAP_MAGIC_CLEAN);
382*4882a593Smuzhiyun n.count = cpu_to_le32(eb->erase_count);
383*4882a593Smuzhiyun ops.ooblen = MTDSWAP_OOBSIZE;
384*4882a593Smuzhiyun offset = mtdswap_eb_offset(d, eb);
385*4882a593Smuzhiyun } else {
386*4882a593Smuzhiyun n.magic = cpu_to_le16(MTDSWAP_MAGIC_DIRTY);
387*4882a593Smuzhiyun ops.ooblen = sizeof(n.magic);
388*4882a593Smuzhiyun offset = mtdswap_eb_offset(d, eb) + d->mtd->writesize;
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun ret = mtd_write_oob(d->mtd, offset, &ops);
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun if (ret) {
394*4882a593Smuzhiyun dev_warn(d->dev, "Write OOB failed for block at %08llx "
395*4882a593Smuzhiyun "error %d\n", offset, ret);
396*4882a593Smuzhiyun if (ret == -EIO || mtd_is_eccerr(ret))
397*4882a593Smuzhiyun mtdswap_handle_write_error(d, eb);
398*4882a593Smuzhiyun return ret;
399*4882a593Smuzhiyun }
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun if (ops.oobretlen != ops.ooblen) {
402*4882a593Smuzhiyun dev_warn(d->dev, "Short OOB write for block at %08llx: "
403*4882a593Smuzhiyun "%zd not %zd\n",
404*4882a593Smuzhiyun offset, ops.oobretlen, ops.ooblen);
405*4882a593Smuzhiyun return ret;
406*4882a593Smuzhiyun }
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun return 0;
409*4882a593Smuzhiyun }
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun /*
412*4882a593Smuzhiyun * Are there any erase blocks without MAGIC_CLEAN header, presumably
413*4882a593Smuzhiyun * because power was cut off after erase but before header write? We
414*4882a593Smuzhiyun * need to guestimate the erase count.
415*4882a593Smuzhiyun */
mtdswap_check_counts(struct mtdswap_dev * d)416*4882a593Smuzhiyun static void mtdswap_check_counts(struct mtdswap_dev *d)
417*4882a593Smuzhiyun {
418*4882a593Smuzhiyun struct rb_root hist_root = RB_ROOT;
419*4882a593Smuzhiyun struct rb_node *medrb;
420*4882a593Smuzhiyun struct swap_eb *eb;
421*4882a593Smuzhiyun unsigned int i, cnt, median;
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun cnt = 0;
424*4882a593Smuzhiyun for (i = 0; i < d->eblks; i++) {
425*4882a593Smuzhiyun eb = d->eb_data + i;
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun if (eb->flags & (EBLOCK_NOMAGIC | EBLOCK_BAD | EBLOCK_READERR))
428*4882a593Smuzhiyun continue;
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun __mtdswap_rb_add(&hist_root, eb);
431*4882a593Smuzhiyun cnt++;
432*4882a593Smuzhiyun }
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun if (cnt == 0)
435*4882a593Smuzhiyun return;
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun medrb = mtdswap_rb_index(&hist_root, cnt / 2);
438*4882a593Smuzhiyun median = rb_entry(medrb, struct swap_eb, rb)->erase_count;
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun d->max_erase_count = MTDSWAP_ECNT_MAX(&hist_root);
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun for (i = 0; i < d->eblks; i++) {
443*4882a593Smuzhiyun eb = d->eb_data + i;
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun if (eb->flags & (EBLOCK_NOMAGIC | EBLOCK_READERR))
446*4882a593Smuzhiyun eb->erase_count = median;
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun if (eb->flags & (EBLOCK_NOMAGIC | EBLOCK_BAD | EBLOCK_READERR))
449*4882a593Smuzhiyun continue;
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun rb_erase(&eb->rb, &hist_root);
452*4882a593Smuzhiyun }
453*4882a593Smuzhiyun }
454*4882a593Smuzhiyun
mtdswap_scan_eblks(struct mtdswap_dev * d)455*4882a593Smuzhiyun static void mtdswap_scan_eblks(struct mtdswap_dev *d)
456*4882a593Smuzhiyun {
457*4882a593Smuzhiyun int status;
458*4882a593Smuzhiyun unsigned int i, idx;
459*4882a593Smuzhiyun struct swap_eb *eb;
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun for (i = 0; i < d->eblks; i++) {
462*4882a593Smuzhiyun eb = d->eb_data + i;
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun status = mtdswap_read_markers(d, eb);
465*4882a593Smuzhiyun if (status < 0)
466*4882a593Smuzhiyun eb->flags |= EBLOCK_READERR;
467*4882a593Smuzhiyun else if (status == MTDSWAP_SCANNED_BAD) {
468*4882a593Smuzhiyun eb->flags |= EBLOCK_BAD;
469*4882a593Smuzhiyun continue;
470*4882a593Smuzhiyun }
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun switch (status) {
473*4882a593Smuzhiyun case MTDSWAP_SCANNED_CLEAN:
474*4882a593Smuzhiyun idx = MTDSWAP_CLEAN;
475*4882a593Smuzhiyun break;
476*4882a593Smuzhiyun case MTDSWAP_SCANNED_DIRTY:
477*4882a593Smuzhiyun case MTDSWAP_SCANNED_BITFLIP:
478*4882a593Smuzhiyun idx = MTDSWAP_DIRTY;
479*4882a593Smuzhiyun break;
480*4882a593Smuzhiyun default:
481*4882a593Smuzhiyun idx = MTDSWAP_FAILING;
482*4882a593Smuzhiyun }
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun eb->flags |= (idx << EBLOCK_IDX_SHIFT);
485*4882a593Smuzhiyun }
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun mtdswap_check_counts(d);
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun for (i = 0; i < d->eblks; i++) {
490*4882a593Smuzhiyun eb = d->eb_data + i;
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun if (eb->flags & EBLOCK_BAD)
493*4882a593Smuzhiyun continue;
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun idx = eb->flags >> EBLOCK_IDX_SHIFT;
496*4882a593Smuzhiyun mtdswap_rb_add(d, eb, idx);
497*4882a593Smuzhiyun }
498*4882a593Smuzhiyun }
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun /*
501*4882a593Smuzhiyun * Place eblk into a tree corresponding to its number of active blocks
502*4882a593Smuzhiyun * it contains.
503*4882a593Smuzhiyun */
mtdswap_store_eb(struct mtdswap_dev * d,struct swap_eb * eb)504*4882a593Smuzhiyun static void mtdswap_store_eb(struct mtdswap_dev *d, struct swap_eb *eb)
505*4882a593Smuzhiyun {
506*4882a593Smuzhiyun unsigned int weight = eb->active_count;
507*4882a593Smuzhiyun unsigned int maxweight = d->pages_per_eblk;
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun if (eb == d->curr_write)
510*4882a593Smuzhiyun return;
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun if (eb->flags & EBLOCK_BITFLIP)
513*4882a593Smuzhiyun mtdswap_rb_add(d, eb, MTDSWAP_BITFLIP);
514*4882a593Smuzhiyun else if (eb->flags & (EBLOCK_READERR | EBLOCK_FAILED))
515*4882a593Smuzhiyun mtdswap_rb_add(d, eb, MTDSWAP_FAILING);
516*4882a593Smuzhiyun if (weight == maxweight)
517*4882a593Smuzhiyun mtdswap_rb_add(d, eb, MTDSWAP_USED);
518*4882a593Smuzhiyun else if (weight == 0)
519*4882a593Smuzhiyun mtdswap_rb_add(d, eb, MTDSWAP_DIRTY);
520*4882a593Smuzhiyun else if (weight > (maxweight/2))
521*4882a593Smuzhiyun mtdswap_rb_add(d, eb, MTDSWAP_LOWFRAG);
522*4882a593Smuzhiyun else
523*4882a593Smuzhiyun mtdswap_rb_add(d, eb, MTDSWAP_HIFRAG);
524*4882a593Smuzhiyun }
525*4882a593Smuzhiyun
mtdswap_erase_block(struct mtdswap_dev * d,struct swap_eb * eb)526*4882a593Smuzhiyun static int mtdswap_erase_block(struct mtdswap_dev *d, struct swap_eb *eb)
527*4882a593Smuzhiyun {
528*4882a593Smuzhiyun struct mtd_info *mtd = d->mtd;
529*4882a593Smuzhiyun struct erase_info erase;
530*4882a593Smuzhiyun unsigned int retries = 0;
531*4882a593Smuzhiyun int ret;
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun eb->erase_count++;
534*4882a593Smuzhiyun if (eb->erase_count > d->max_erase_count)
535*4882a593Smuzhiyun d->max_erase_count = eb->erase_count;
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun retry:
538*4882a593Smuzhiyun memset(&erase, 0, sizeof(struct erase_info));
539*4882a593Smuzhiyun erase.addr = mtdswap_eb_offset(d, eb);
540*4882a593Smuzhiyun erase.len = mtd->erasesize;
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun ret = mtd_erase(mtd, &erase);
543*4882a593Smuzhiyun if (ret) {
544*4882a593Smuzhiyun if (retries++ < MTDSWAP_ERASE_RETRIES) {
545*4882a593Smuzhiyun dev_warn(d->dev,
546*4882a593Smuzhiyun "erase of erase block %#llx on %s failed",
547*4882a593Smuzhiyun erase.addr, mtd->name);
548*4882a593Smuzhiyun yield();
549*4882a593Smuzhiyun goto retry;
550*4882a593Smuzhiyun }
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun dev_err(d->dev, "Cannot erase erase block %#llx on %s\n",
553*4882a593Smuzhiyun erase.addr, mtd->name);
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun mtdswap_handle_badblock(d, eb);
556*4882a593Smuzhiyun return -EIO;
557*4882a593Smuzhiyun }
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun return 0;
560*4882a593Smuzhiyun }
561*4882a593Smuzhiyun
mtdswap_map_free_block(struct mtdswap_dev * d,unsigned int page,unsigned int * block)562*4882a593Smuzhiyun static int mtdswap_map_free_block(struct mtdswap_dev *d, unsigned int page,
563*4882a593Smuzhiyun unsigned int *block)
564*4882a593Smuzhiyun {
565*4882a593Smuzhiyun int ret;
566*4882a593Smuzhiyun struct swap_eb *old_eb = d->curr_write;
567*4882a593Smuzhiyun struct rb_root *clean_root;
568*4882a593Smuzhiyun struct swap_eb *eb;
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun if (old_eb == NULL || d->curr_write_pos >= d->pages_per_eblk) {
571*4882a593Smuzhiyun do {
572*4882a593Smuzhiyun if (TREE_EMPTY(d, CLEAN))
573*4882a593Smuzhiyun return -ENOSPC;
574*4882a593Smuzhiyun
575*4882a593Smuzhiyun clean_root = TREE_ROOT(d, CLEAN);
576*4882a593Smuzhiyun eb = rb_entry(rb_first(clean_root), struct swap_eb, rb);
577*4882a593Smuzhiyun rb_erase(&eb->rb, clean_root);
578*4882a593Smuzhiyun eb->root = NULL;
579*4882a593Smuzhiyun TREE_COUNT(d, CLEAN)--;
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun ret = mtdswap_write_marker(d, eb, MTDSWAP_TYPE_DIRTY);
582*4882a593Smuzhiyun } while (ret == -EIO || mtd_is_eccerr(ret));
583*4882a593Smuzhiyun
584*4882a593Smuzhiyun if (ret)
585*4882a593Smuzhiyun return ret;
586*4882a593Smuzhiyun
587*4882a593Smuzhiyun d->curr_write_pos = 0;
588*4882a593Smuzhiyun d->curr_write = eb;
589*4882a593Smuzhiyun if (old_eb)
590*4882a593Smuzhiyun mtdswap_store_eb(d, old_eb);
591*4882a593Smuzhiyun }
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun *block = (d->curr_write - d->eb_data) * d->pages_per_eblk +
594*4882a593Smuzhiyun d->curr_write_pos;
595*4882a593Smuzhiyun
596*4882a593Smuzhiyun d->curr_write->active_count++;
597*4882a593Smuzhiyun d->revmap[*block] = page;
598*4882a593Smuzhiyun d->curr_write_pos++;
599*4882a593Smuzhiyun
600*4882a593Smuzhiyun return 0;
601*4882a593Smuzhiyun }
602*4882a593Smuzhiyun
mtdswap_free_page_cnt(struct mtdswap_dev * d)603*4882a593Smuzhiyun static unsigned int mtdswap_free_page_cnt(struct mtdswap_dev *d)
604*4882a593Smuzhiyun {
605*4882a593Smuzhiyun return TREE_COUNT(d, CLEAN) * d->pages_per_eblk +
606*4882a593Smuzhiyun d->pages_per_eblk - d->curr_write_pos;
607*4882a593Smuzhiyun }
608*4882a593Smuzhiyun
mtdswap_enough_free_pages(struct mtdswap_dev * d)609*4882a593Smuzhiyun static unsigned int mtdswap_enough_free_pages(struct mtdswap_dev *d)
610*4882a593Smuzhiyun {
611*4882a593Smuzhiyun return mtdswap_free_page_cnt(d) > d->pages_per_eblk;
612*4882a593Smuzhiyun }
613*4882a593Smuzhiyun
mtdswap_write_block(struct mtdswap_dev * d,char * buf,unsigned int page,unsigned int * bp,int gc_context)614*4882a593Smuzhiyun static int mtdswap_write_block(struct mtdswap_dev *d, char *buf,
615*4882a593Smuzhiyun unsigned int page, unsigned int *bp, int gc_context)
616*4882a593Smuzhiyun {
617*4882a593Smuzhiyun struct mtd_info *mtd = d->mtd;
618*4882a593Smuzhiyun struct swap_eb *eb;
619*4882a593Smuzhiyun size_t retlen;
620*4882a593Smuzhiyun loff_t writepos;
621*4882a593Smuzhiyun int ret;
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun retry:
624*4882a593Smuzhiyun if (!gc_context)
625*4882a593Smuzhiyun while (!mtdswap_enough_free_pages(d))
626*4882a593Smuzhiyun if (mtdswap_gc(d, 0) > 0)
627*4882a593Smuzhiyun return -ENOSPC;
628*4882a593Smuzhiyun
629*4882a593Smuzhiyun ret = mtdswap_map_free_block(d, page, bp);
630*4882a593Smuzhiyun eb = d->eb_data + (*bp / d->pages_per_eblk);
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun if (ret == -EIO || mtd_is_eccerr(ret)) {
633*4882a593Smuzhiyun d->curr_write = NULL;
634*4882a593Smuzhiyun eb->active_count--;
635*4882a593Smuzhiyun d->revmap[*bp] = PAGE_UNDEF;
636*4882a593Smuzhiyun goto retry;
637*4882a593Smuzhiyun }
638*4882a593Smuzhiyun
639*4882a593Smuzhiyun if (ret < 0)
640*4882a593Smuzhiyun return ret;
641*4882a593Smuzhiyun
642*4882a593Smuzhiyun writepos = (loff_t)*bp << PAGE_SHIFT;
643*4882a593Smuzhiyun ret = mtd_write(mtd, writepos, PAGE_SIZE, &retlen, buf);
644*4882a593Smuzhiyun if (ret == -EIO || mtd_is_eccerr(ret)) {
645*4882a593Smuzhiyun d->curr_write_pos--;
646*4882a593Smuzhiyun eb->active_count--;
647*4882a593Smuzhiyun d->revmap[*bp] = PAGE_UNDEF;
648*4882a593Smuzhiyun mtdswap_handle_write_error(d, eb);
649*4882a593Smuzhiyun goto retry;
650*4882a593Smuzhiyun }
651*4882a593Smuzhiyun
652*4882a593Smuzhiyun if (ret < 0) {
653*4882a593Smuzhiyun dev_err(d->dev, "Write to MTD device failed: %d (%zd written)",
654*4882a593Smuzhiyun ret, retlen);
655*4882a593Smuzhiyun goto err;
656*4882a593Smuzhiyun }
657*4882a593Smuzhiyun
658*4882a593Smuzhiyun if (retlen != PAGE_SIZE) {
659*4882a593Smuzhiyun dev_err(d->dev, "Short write to MTD device: %zd written",
660*4882a593Smuzhiyun retlen);
661*4882a593Smuzhiyun ret = -EIO;
662*4882a593Smuzhiyun goto err;
663*4882a593Smuzhiyun }
664*4882a593Smuzhiyun
665*4882a593Smuzhiyun return ret;
666*4882a593Smuzhiyun
667*4882a593Smuzhiyun err:
668*4882a593Smuzhiyun d->curr_write_pos--;
669*4882a593Smuzhiyun eb->active_count--;
670*4882a593Smuzhiyun d->revmap[*bp] = PAGE_UNDEF;
671*4882a593Smuzhiyun
672*4882a593Smuzhiyun return ret;
673*4882a593Smuzhiyun }
674*4882a593Smuzhiyun
mtdswap_move_block(struct mtdswap_dev * d,unsigned int oldblock,unsigned int * newblock)675*4882a593Smuzhiyun static int mtdswap_move_block(struct mtdswap_dev *d, unsigned int oldblock,
676*4882a593Smuzhiyun unsigned int *newblock)
677*4882a593Smuzhiyun {
678*4882a593Smuzhiyun struct mtd_info *mtd = d->mtd;
679*4882a593Smuzhiyun struct swap_eb *eb, *oldeb;
680*4882a593Smuzhiyun int ret;
681*4882a593Smuzhiyun size_t retlen;
682*4882a593Smuzhiyun unsigned int page, retries;
683*4882a593Smuzhiyun loff_t readpos;
684*4882a593Smuzhiyun
685*4882a593Smuzhiyun page = d->revmap[oldblock];
686*4882a593Smuzhiyun readpos = (loff_t) oldblock << PAGE_SHIFT;
687*4882a593Smuzhiyun retries = 0;
688*4882a593Smuzhiyun
689*4882a593Smuzhiyun retry:
690*4882a593Smuzhiyun ret = mtd_read(mtd, readpos, PAGE_SIZE, &retlen, d->page_buf);
691*4882a593Smuzhiyun
692*4882a593Smuzhiyun if (ret < 0 && !mtd_is_bitflip(ret)) {
693*4882a593Smuzhiyun oldeb = d->eb_data + oldblock / d->pages_per_eblk;
694*4882a593Smuzhiyun oldeb->flags |= EBLOCK_READERR;
695*4882a593Smuzhiyun
696*4882a593Smuzhiyun dev_err(d->dev, "Read Error: %d (block %u)\n", ret,
697*4882a593Smuzhiyun oldblock);
698*4882a593Smuzhiyun retries++;
699*4882a593Smuzhiyun if (retries < MTDSWAP_IO_RETRIES)
700*4882a593Smuzhiyun goto retry;
701*4882a593Smuzhiyun
702*4882a593Smuzhiyun goto read_error;
703*4882a593Smuzhiyun }
704*4882a593Smuzhiyun
705*4882a593Smuzhiyun if (retlen != PAGE_SIZE) {
706*4882a593Smuzhiyun dev_err(d->dev, "Short read: %zd (block %u)\n", retlen,
707*4882a593Smuzhiyun oldblock);
708*4882a593Smuzhiyun ret = -EIO;
709*4882a593Smuzhiyun goto read_error;
710*4882a593Smuzhiyun }
711*4882a593Smuzhiyun
712*4882a593Smuzhiyun ret = mtdswap_write_block(d, d->page_buf, page, newblock, 1);
713*4882a593Smuzhiyun if (ret < 0) {
714*4882a593Smuzhiyun d->page_data[page] = BLOCK_ERROR;
715*4882a593Smuzhiyun dev_err(d->dev, "Write error: %d\n", ret);
716*4882a593Smuzhiyun return ret;
717*4882a593Smuzhiyun }
718*4882a593Smuzhiyun
719*4882a593Smuzhiyun eb = d->eb_data + *newblock / d->pages_per_eblk;
720*4882a593Smuzhiyun d->page_data[page] = *newblock;
721*4882a593Smuzhiyun d->revmap[oldblock] = PAGE_UNDEF;
722*4882a593Smuzhiyun eb = d->eb_data + oldblock / d->pages_per_eblk;
723*4882a593Smuzhiyun eb->active_count--;
724*4882a593Smuzhiyun
725*4882a593Smuzhiyun return 0;
726*4882a593Smuzhiyun
727*4882a593Smuzhiyun read_error:
728*4882a593Smuzhiyun d->page_data[page] = BLOCK_ERROR;
729*4882a593Smuzhiyun d->revmap[oldblock] = PAGE_UNDEF;
730*4882a593Smuzhiyun return ret;
731*4882a593Smuzhiyun }
732*4882a593Smuzhiyun
mtdswap_gc_eblock(struct mtdswap_dev * d,struct swap_eb * eb)733*4882a593Smuzhiyun static int mtdswap_gc_eblock(struct mtdswap_dev *d, struct swap_eb *eb)
734*4882a593Smuzhiyun {
735*4882a593Smuzhiyun unsigned int i, block, eblk_base, newblock;
736*4882a593Smuzhiyun int ret, errcode;
737*4882a593Smuzhiyun
738*4882a593Smuzhiyun errcode = 0;
739*4882a593Smuzhiyun eblk_base = (eb - d->eb_data) * d->pages_per_eblk;
740*4882a593Smuzhiyun
741*4882a593Smuzhiyun for (i = 0; i < d->pages_per_eblk; i++) {
742*4882a593Smuzhiyun if (d->spare_eblks < MIN_SPARE_EBLOCKS)
743*4882a593Smuzhiyun return -ENOSPC;
744*4882a593Smuzhiyun
745*4882a593Smuzhiyun block = eblk_base + i;
746*4882a593Smuzhiyun if (d->revmap[block] == PAGE_UNDEF)
747*4882a593Smuzhiyun continue;
748*4882a593Smuzhiyun
749*4882a593Smuzhiyun ret = mtdswap_move_block(d, block, &newblock);
750*4882a593Smuzhiyun if (ret < 0 && !errcode)
751*4882a593Smuzhiyun errcode = ret;
752*4882a593Smuzhiyun }
753*4882a593Smuzhiyun
754*4882a593Smuzhiyun return errcode;
755*4882a593Smuzhiyun }
756*4882a593Smuzhiyun
__mtdswap_choose_gc_tree(struct mtdswap_dev * d)757*4882a593Smuzhiyun static int __mtdswap_choose_gc_tree(struct mtdswap_dev *d)
758*4882a593Smuzhiyun {
759*4882a593Smuzhiyun int idx, stopat;
760*4882a593Smuzhiyun
761*4882a593Smuzhiyun if (TREE_COUNT(d, CLEAN) < LOW_FRAG_GC_THRESHOLD)
762*4882a593Smuzhiyun stopat = MTDSWAP_LOWFRAG;
763*4882a593Smuzhiyun else
764*4882a593Smuzhiyun stopat = MTDSWAP_HIFRAG;
765*4882a593Smuzhiyun
766*4882a593Smuzhiyun for (idx = MTDSWAP_BITFLIP; idx >= stopat; idx--)
767*4882a593Smuzhiyun if (d->trees[idx].root.rb_node != NULL)
768*4882a593Smuzhiyun return idx;
769*4882a593Smuzhiyun
770*4882a593Smuzhiyun return -1;
771*4882a593Smuzhiyun }
772*4882a593Smuzhiyun
mtdswap_wlfreq(unsigned int maxdiff)773*4882a593Smuzhiyun static int mtdswap_wlfreq(unsigned int maxdiff)
774*4882a593Smuzhiyun {
775*4882a593Smuzhiyun unsigned int h, x, y, dist, base;
776*4882a593Smuzhiyun
777*4882a593Smuzhiyun /*
778*4882a593Smuzhiyun * Calculate linear ramp down from f1 to f2 when maxdiff goes from
779*4882a593Smuzhiyun * MAX_ERASE_DIFF to MAX_ERASE_DIFF + COLLECT_NONDIRTY_BASE. Similar
780*4882a593Smuzhiyun * to triangle with height f1 - f1 and width COLLECT_NONDIRTY_BASE.
781*4882a593Smuzhiyun */
782*4882a593Smuzhiyun
783*4882a593Smuzhiyun dist = maxdiff - MAX_ERASE_DIFF;
784*4882a593Smuzhiyun if (dist > COLLECT_NONDIRTY_BASE)
785*4882a593Smuzhiyun dist = COLLECT_NONDIRTY_BASE;
786*4882a593Smuzhiyun
787*4882a593Smuzhiyun /*
788*4882a593Smuzhiyun * Modelling the slop as right angular triangle with base
789*4882a593Smuzhiyun * COLLECT_NONDIRTY_BASE and height freq1 - freq2. The ratio y/x is
790*4882a593Smuzhiyun * equal to the ratio h/base.
791*4882a593Smuzhiyun */
792*4882a593Smuzhiyun h = COLLECT_NONDIRTY_FREQ1 - COLLECT_NONDIRTY_FREQ2;
793*4882a593Smuzhiyun base = COLLECT_NONDIRTY_BASE;
794*4882a593Smuzhiyun
795*4882a593Smuzhiyun x = dist - base;
796*4882a593Smuzhiyun y = (x * h + base / 2) / base;
797*4882a593Smuzhiyun
798*4882a593Smuzhiyun return COLLECT_NONDIRTY_FREQ2 + y;
799*4882a593Smuzhiyun }
800*4882a593Smuzhiyun
mtdswap_choose_wl_tree(struct mtdswap_dev * d)801*4882a593Smuzhiyun static int mtdswap_choose_wl_tree(struct mtdswap_dev *d)
802*4882a593Smuzhiyun {
803*4882a593Smuzhiyun static unsigned int pick_cnt;
804*4882a593Smuzhiyun unsigned int i, idx = -1, wear, max;
805*4882a593Smuzhiyun struct rb_root *root;
806*4882a593Smuzhiyun
807*4882a593Smuzhiyun max = 0;
808*4882a593Smuzhiyun for (i = 0; i <= MTDSWAP_DIRTY; i++) {
809*4882a593Smuzhiyun root = &d->trees[i].root;
810*4882a593Smuzhiyun if (root->rb_node == NULL)
811*4882a593Smuzhiyun continue;
812*4882a593Smuzhiyun
813*4882a593Smuzhiyun wear = d->max_erase_count - MTDSWAP_ECNT_MIN(root);
814*4882a593Smuzhiyun if (wear > max) {
815*4882a593Smuzhiyun max = wear;
816*4882a593Smuzhiyun idx = i;
817*4882a593Smuzhiyun }
818*4882a593Smuzhiyun }
819*4882a593Smuzhiyun
820*4882a593Smuzhiyun if (max > MAX_ERASE_DIFF && pick_cnt >= mtdswap_wlfreq(max) - 1) {
821*4882a593Smuzhiyun pick_cnt = 0;
822*4882a593Smuzhiyun return idx;
823*4882a593Smuzhiyun }
824*4882a593Smuzhiyun
825*4882a593Smuzhiyun pick_cnt++;
826*4882a593Smuzhiyun return -1;
827*4882a593Smuzhiyun }
828*4882a593Smuzhiyun
mtdswap_choose_gc_tree(struct mtdswap_dev * d,unsigned int background)829*4882a593Smuzhiyun static int mtdswap_choose_gc_tree(struct mtdswap_dev *d,
830*4882a593Smuzhiyun unsigned int background)
831*4882a593Smuzhiyun {
832*4882a593Smuzhiyun int idx;
833*4882a593Smuzhiyun
834*4882a593Smuzhiyun if (TREE_NONEMPTY(d, FAILING) &&
835*4882a593Smuzhiyun (background || (TREE_EMPTY(d, CLEAN) && TREE_EMPTY(d, DIRTY))))
836*4882a593Smuzhiyun return MTDSWAP_FAILING;
837*4882a593Smuzhiyun
838*4882a593Smuzhiyun idx = mtdswap_choose_wl_tree(d);
839*4882a593Smuzhiyun if (idx >= MTDSWAP_CLEAN)
840*4882a593Smuzhiyun return idx;
841*4882a593Smuzhiyun
842*4882a593Smuzhiyun return __mtdswap_choose_gc_tree(d);
843*4882a593Smuzhiyun }
844*4882a593Smuzhiyun
mtdswap_pick_gc_eblk(struct mtdswap_dev * d,unsigned int background)845*4882a593Smuzhiyun static struct swap_eb *mtdswap_pick_gc_eblk(struct mtdswap_dev *d,
846*4882a593Smuzhiyun unsigned int background)
847*4882a593Smuzhiyun {
848*4882a593Smuzhiyun struct rb_root *rp = NULL;
849*4882a593Smuzhiyun struct swap_eb *eb = NULL;
850*4882a593Smuzhiyun int idx;
851*4882a593Smuzhiyun
852*4882a593Smuzhiyun if (background && TREE_COUNT(d, CLEAN) > CLEAN_BLOCK_THRESHOLD &&
853*4882a593Smuzhiyun TREE_EMPTY(d, DIRTY) && TREE_EMPTY(d, FAILING))
854*4882a593Smuzhiyun return NULL;
855*4882a593Smuzhiyun
856*4882a593Smuzhiyun idx = mtdswap_choose_gc_tree(d, background);
857*4882a593Smuzhiyun if (idx < 0)
858*4882a593Smuzhiyun return NULL;
859*4882a593Smuzhiyun
860*4882a593Smuzhiyun rp = &d->trees[idx].root;
861*4882a593Smuzhiyun eb = rb_entry(rb_first(rp), struct swap_eb, rb);
862*4882a593Smuzhiyun
863*4882a593Smuzhiyun rb_erase(&eb->rb, rp);
864*4882a593Smuzhiyun eb->root = NULL;
865*4882a593Smuzhiyun d->trees[idx].count--;
866*4882a593Smuzhiyun return eb;
867*4882a593Smuzhiyun }
868*4882a593Smuzhiyun
mtdswap_test_patt(unsigned int i)869*4882a593Smuzhiyun static unsigned int mtdswap_test_patt(unsigned int i)
870*4882a593Smuzhiyun {
871*4882a593Smuzhiyun return i % 2 ? 0x55555555 : 0xAAAAAAAA;
872*4882a593Smuzhiyun }
873*4882a593Smuzhiyun
mtdswap_eblk_passes(struct mtdswap_dev * d,struct swap_eb * eb)874*4882a593Smuzhiyun static unsigned int mtdswap_eblk_passes(struct mtdswap_dev *d,
875*4882a593Smuzhiyun struct swap_eb *eb)
876*4882a593Smuzhiyun {
877*4882a593Smuzhiyun struct mtd_info *mtd = d->mtd;
878*4882a593Smuzhiyun unsigned int test, i, j, patt, mtd_pages;
879*4882a593Smuzhiyun loff_t base, pos;
880*4882a593Smuzhiyun unsigned int *p1 = (unsigned int *)d->page_buf;
881*4882a593Smuzhiyun unsigned char *p2 = (unsigned char *)d->oob_buf;
882*4882a593Smuzhiyun struct mtd_oob_ops ops;
883*4882a593Smuzhiyun int ret;
884*4882a593Smuzhiyun
885*4882a593Smuzhiyun ops.mode = MTD_OPS_AUTO_OOB;
886*4882a593Smuzhiyun ops.len = mtd->writesize;
887*4882a593Smuzhiyun ops.ooblen = mtd->oobavail;
888*4882a593Smuzhiyun ops.ooboffs = 0;
889*4882a593Smuzhiyun ops.datbuf = d->page_buf;
890*4882a593Smuzhiyun ops.oobbuf = d->oob_buf;
891*4882a593Smuzhiyun base = mtdswap_eb_offset(d, eb);
892*4882a593Smuzhiyun mtd_pages = d->pages_per_eblk * PAGE_SIZE / mtd->writesize;
893*4882a593Smuzhiyun
894*4882a593Smuzhiyun for (test = 0; test < 2; test++) {
895*4882a593Smuzhiyun pos = base;
896*4882a593Smuzhiyun for (i = 0; i < mtd_pages; i++) {
897*4882a593Smuzhiyun patt = mtdswap_test_patt(test + i);
898*4882a593Smuzhiyun memset(d->page_buf, patt, mtd->writesize);
899*4882a593Smuzhiyun memset(d->oob_buf, patt, mtd->oobavail);
900*4882a593Smuzhiyun ret = mtd_write_oob(mtd, pos, &ops);
901*4882a593Smuzhiyun if (ret)
902*4882a593Smuzhiyun goto error;
903*4882a593Smuzhiyun
904*4882a593Smuzhiyun pos += mtd->writesize;
905*4882a593Smuzhiyun }
906*4882a593Smuzhiyun
907*4882a593Smuzhiyun pos = base;
908*4882a593Smuzhiyun for (i = 0; i < mtd_pages; i++) {
909*4882a593Smuzhiyun ret = mtd_read_oob(mtd, pos, &ops);
910*4882a593Smuzhiyun if (ret)
911*4882a593Smuzhiyun goto error;
912*4882a593Smuzhiyun
913*4882a593Smuzhiyun patt = mtdswap_test_patt(test + i);
914*4882a593Smuzhiyun for (j = 0; j < mtd->writesize/sizeof(int); j++)
915*4882a593Smuzhiyun if (p1[j] != patt)
916*4882a593Smuzhiyun goto error;
917*4882a593Smuzhiyun
918*4882a593Smuzhiyun for (j = 0; j < mtd->oobavail; j++)
919*4882a593Smuzhiyun if (p2[j] != (unsigned char)patt)
920*4882a593Smuzhiyun goto error;
921*4882a593Smuzhiyun
922*4882a593Smuzhiyun pos += mtd->writesize;
923*4882a593Smuzhiyun }
924*4882a593Smuzhiyun
925*4882a593Smuzhiyun ret = mtdswap_erase_block(d, eb);
926*4882a593Smuzhiyun if (ret)
927*4882a593Smuzhiyun goto error;
928*4882a593Smuzhiyun }
929*4882a593Smuzhiyun
930*4882a593Smuzhiyun eb->flags &= ~EBLOCK_READERR;
931*4882a593Smuzhiyun return 1;
932*4882a593Smuzhiyun
933*4882a593Smuzhiyun error:
934*4882a593Smuzhiyun mtdswap_handle_badblock(d, eb);
935*4882a593Smuzhiyun return 0;
936*4882a593Smuzhiyun }
937*4882a593Smuzhiyun
mtdswap_gc(struct mtdswap_dev * d,unsigned int background)938*4882a593Smuzhiyun static int mtdswap_gc(struct mtdswap_dev *d, unsigned int background)
939*4882a593Smuzhiyun {
940*4882a593Smuzhiyun struct swap_eb *eb;
941*4882a593Smuzhiyun int ret;
942*4882a593Smuzhiyun
943*4882a593Smuzhiyun if (d->spare_eblks < MIN_SPARE_EBLOCKS)
944*4882a593Smuzhiyun return 1;
945*4882a593Smuzhiyun
946*4882a593Smuzhiyun eb = mtdswap_pick_gc_eblk(d, background);
947*4882a593Smuzhiyun if (!eb)
948*4882a593Smuzhiyun return 1;
949*4882a593Smuzhiyun
950*4882a593Smuzhiyun ret = mtdswap_gc_eblock(d, eb);
951*4882a593Smuzhiyun if (ret == -ENOSPC)
952*4882a593Smuzhiyun return 1;
953*4882a593Smuzhiyun
954*4882a593Smuzhiyun if (eb->flags & EBLOCK_FAILED) {
955*4882a593Smuzhiyun mtdswap_handle_badblock(d, eb);
956*4882a593Smuzhiyun return 0;
957*4882a593Smuzhiyun }
958*4882a593Smuzhiyun
959*4882a593Smuzhiyun eb->flags &= ~EBLOCK_BITFLIP;
960*4882a593Smuzhiyun ret = mtdswap_erase_block(d, eb);
961*4882a593Smuzhiyun if ((eb->flags & EBLOCK_READERR) &&
962*4882a593Smuzhiyun (ret || !mtdswap_eblk_passes(d, eb)))
963*4882a593Smuzhiyun return 0;
964*4882a593Smuzhiyun
965*4882a593Smuzhiyun if (ret == 0)
966*4882a593Smuzhiyun ret = mtdswap_write_marker(d, eb, MTDSWAP_TYPE_CLEAN);
967*4882a593Smuzhiyun
968*4882a593Smuzhiyun if (ret == 0)
969*4882a593Smuzhiyun mtdswap_rb_add(d, eb, MTDSWAP_CLEAN);
970*4882a593Smuzhiyun else if (ret != -EIO && !mtd_is_eccerr(ret))
971*4882a593Smuzhiyun mtdswap_rb_add(d, eb, MTDSWAP_DIRTY);
972*4882a593Smuzhiyun
973*4882a593Smuzhiyun return 0;
974*4882a593Smuzhiyun }
975*4882a593Smuzhiyun
mtdswap_background(struct mtd_blktrans_dev * dev)976*4882a593Smuzhiyun static void mtdswap_background(struct mtd_blktrans_dev *dev)
977*4882a593Smuzhiyun {
978*4882a593Smuzhiyun struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev);
979*4882a593Smuzhiyun int ret;
980*4882a593Smuzhiyun
981*4882a593Smuzhiyun while (1) {
982*4882a593Smuzhiyun ret = mtdswap_gc(d, 1);
983*4882a593Smuzhiyun if (ret || mtd_blktrans_cease_background(dev))
984*4882a593Smuzhiyun return;
985*4882a593Smuzhiyun }
986*4882a593Smuzhiyun }
987*4882a593Smuzhiyun
mtdswap_cleanup(struct mtdswap_dev * d)988*4882a593Smuzhiyun static void mtdswap_cleanup(struct mtdswap_dev *d)
989*4882a593Smuzhiyun {
990*4882a593Smuzhiyun vfree(d->eb_data);
991*4882a593Smuzhiyun vfree(d->revmap);
992*4882a593Smuzhiyun vfree(d->page_data);
993*4882a593Smuzhiyun kfree(d->oob_buf);
994*4882a593Smuzhiyun kfree(d->page_buf);
995*4882a593Smuzhiyun }
996*4882a593Smuzhiyun
mtdswap_flush(struct mtd_blktrans_dev * dev)997*4882a593Smuzhiyun static int mtdswap_flush(struct mtd_blktrans_dev *dev)
998*4882a593Smuzhiyun {
999*4882a593Smuzhiyun struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev);
1000*4882a593Smuzhiyun
1001*4882a593Smuzhiyun mtd_sync(d->mtd);
1002*4882a593Smuzhiyun return 0;
1003*4882a593Smuzhiyun }
1004*4882a593Smuzhiyun
mtdswap_badblocks(struct mtd_info * mtd,uint64_t size)1005*4882a593Smuzhiyun static unsigned int mtdswap_badblocks(struct mtd_info *mtd, uint64_t size)
1006*4882a593Smuzhiyun {
1007*4882a593Smuzhiyun loff_t offset;
1008*4882a593Smuzhiyun unsigned int badcnt;
1009*4882a593Smuzhiyun
1010*4882a593Smuzhiyun badcnt = 0;
1011*4882a593Smuzhiyun
1012*4882a593Smuzhiyun if (mtd_can_have_bb(mtd))
1013*4882a593Smuzhiyun for (offset = 0; offset < size; offset += mtd->erasesize)
1014*4882a593Smuzhiyun if (mtd_block_isbad(mtd, offset))
1015*4882a593Smuzhiyun badcnt++;
1016*4882a593Smuzhiyun
1017*4882a593Smuzhiyun return badcnt;
1018*4882a593Smuzhiyun }
1019*4882a593Smuzhiyun
mtdswap_writesect(struct mtd_blktrans_dev * dev,unsigned long page,char * buf)1020*4882a593Smuzhiyun static int mtdswap_writesect(struct mtd_blktrans_dev *dev,
1021*4882a593Smuzhiyun unsigned long page, char *buf)
1022*4882a593Smuzhiyun {
1023*4882a593Smuzhiyun struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev);
1024*4882a593Smuzhiyun unsigned int newblock, mapped;
1025*4882a593Smuzhiyun struct swap_eb *eb;
1026*4882a593Smuzhiyun int ret;
1027*4882a593Smuzhiyun
1028*4882a593Smuzhiyun d->sect_write_count++;
1029*4882a593Smuzhiyun
1030*4882a593Smuzhiyun if (d->spare_eblks < MIN_SPARE_EBLOCKS)
1031*4882a593Smuzhiyun return -ENOSPC;
1032*4882a593Smuzhiyun
1033*4882a593Smuzhiyun if (header) {
1034*4882a593Smuzhiyun /* Ignore writes to the header page */
1035*4882a593Smuzhiyun if (unlikely(page == 0))
1036*4882a593Smuzhiyun return 0;
1037*4882a593Smuzhiyun
1038*4882a593Smuzhiyun page--;
1039*4882a593Smuzhiyun }
1040*4882a593Smuzhiyun
1041*4882a593Smuzhiyun mapped = d->page_data[page];
1042*4882a593Smuzhiyun if (mapped <= BLOCK_MAX) {
1043*4882a593Smuzhiyun eb = d->eb_data + (mapped / d->pages_per_eblk);
1044*4882a593Smuzhiyun eb->active_count--;
1045*4882a593Smuzhiyun mtdswap_store_eb(d, eb);
1046*4882a593Smuzhiyun d->page_data[page] = BLOCK_UNDEF;
1047*4882a593Smuzhiyun d->revmap[mapped] = PAGE_UNDEF;
1048*4882a593Smuzhiyun }
1049*4882a593Smuzhiyun
1050*4882a593Smuzhiyun ret = mtdswap_write_block(d, buf, page, &newblock, 0);
1051*4882a593Smuzhiyun d->mtd_write_count++;
1052*4882a593Smuzhiyun
1053*4882a593Smuzhiyun if (ret < 0)
1054*4882a593Smuzhiyun return ret;
1055*4882a593Smuzhiyun
1056*4882a593Smuzhiyun eb = d->eb_data + (newblock / d->pages_per_eblk);
1057*4882a593Smuzhiyun d->page_data[page] = newblock;
1058*4882a593Smuzhiyun
1059*4882a593Smuzhiyun return 0;
1060*4882a593Smuzhiyun }
1061*4882a593Smuzhiyun
1062*4882a593Smuzhiyun /* Provide a dummy swap header for the kernel */
mtdswap_auto_header(struct mtdswap_dev * d,char * buf)1063*4882a593Smuzhiyun static int mtdswap_auto_header(struct mtdswap_dev *d, char *buf)
1064*4882a593Smuzhiyun {
1065*4882a593Smuzhiyun union swap_header *hd = (union swap_header *)(buf);
1066*4882a593Smuzhiyun
1067*4882a593Smuzhiyun memset(buf, 0, PAGE_SIZE - 10);
1068*4882a593Smuzhiyun
1069*4882a593Smuzhiyun hd->info.version = 1;
1070*4882a593Smuzhiyun hd->info.last_page = d->mbd_dev->size - 1;
1071*4882a593Smuzhiyun hd->info.nr_badpages = 0;
1072*4882a593Smuzhiyun
1073*4882a593Smuzhiyun memcpy(buf + PAGE_SIZE - 10, "SWAPSPACE2", 10);
1074*4882a593Smuzhiyun
1075*4882a593Smuzhiyun return 0;
1076*4882a593Smuzhiyun }
1077*4882a593Smuzhiyun
mtdswap_readsect(struct mtd_blktrans_dev * dev,unsigned long page,char * buf)1078*4882a593Smuzhiyun static int mtdswap_readsect(struct mtd_blktrans_dev *dev,
1079*4882a593Smuzhiyun unsigned long page, char *buf)
1080*4882a593Smuzhiyun {
1081*4882a593Smuzhiyun struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev);
1082*4882a593Smuzhiyun struct mtd_info *mtd = d->mtd;
1083*4882a593Smuzhiyun unsigned int realblock, retries;
1084*4882a593Smuzhiyun loff_t readpos;
1085*4882a593Smuzhiyun struct swap_eb *eb;
1086*4882a593Smuzhiyun size_t retlen;
1087*4882a593Smuzhiyun int ret;
1088*4882a593Smuzhiyun
1089*4882a593Smuzhiyun d->sect_read_count++;
1090*4882a593Smuzhiyun
1091*4882a593Smuzhiyun if (header) {
1092*4882a593Smuzhiyun if (unlikely(page == 0))
1093*4882a593Smuzhiyun return mtdswap_auto_header(d, buf);
1094*4882a593Smuzhiyun
1095*4882a593Smuzhiyun page--;
1096*4882a593Smuzhiyun }
1097*4882a593Smuzhiyun
1098*4882a593Smuzhiyun realblock = d->page_data[page];
1099*4882a593Smuzhiyun if (realblock > BLOCK_MAX) {
1100*4882a593Smuzhiyun memset(buf, 0x0, PAGE_SIZE);
1101*4882a593Smuzhiyun if (realblock == BLOCK_UNDEF)
1102*4882a593Smuzhiyun return 0;
1103*4882a593Smuzhiyun else
1104*4882a593Smuzhiyun return -EIO;
1105*4882a593Smuzhiyun }
1106*4882a593Smuzhiyun
1107*4882a593Smuzhiyun eb = d->eb_data + (realblock / d->pages_per_eblk);
1108*4882a593Smuzhiyun BUG_ON(d->revmap[realblock] == PAGE_UNDEF);
1109*4882a593Smuzhiyun
1110*4882a593Smuzhiyun readpos = (loff_t)realblock << PAGE_SHIFT;
1111*4882a593Smuzhiyun retries = 0;
1112*4882a593Smuzhiyun
1113*4882a593Smuzhiyun retry:
1114*4882a593Smuzhiyun ret = mtd_read(mtd, readpos, PAGE_SIZE, &retlen, buf);
1115*4882a593Smuzhiyun
1116*4882a593Smuzhiyun d->mtd_read_count++;
1117*4882a593Smuzhiyun if (mtd_is_bitflip(ret)) {
1118*4882a593Smuzhiyun eb->flags |= EBLOCK_BITFLIP;
1119*4882a593Smuzhiyun mtdswap_rb_add(d, eb, MTDSWAP_BITFLIP);
1120*4882a593Smuzhiyun ret = 0;
1121*4882a593Smuzhiyun }
1122*4882a593Smuzhiyun
1123*4882a593Smuzhiyun if (ret < 0) {
1124*4882a593Smuzhiyun dev_err(d->dev, "Read error %d\n", ret);
1125*4882a593Smuzhiyun eb->flags |= EBLOCK_READERR;
1126*4882a593Smuzhiyun mtdswap_rb_add(d, eb, MTDSWAP_FAILING);
1127*4882a593Smuzhiyun retries++;
1128*4882a593Smuzhiyun if (retries < MTDSWAP_IO_RETRIES)
1129*4882a593Smuzhiyun goto retry;
1130*4882a593Smuzhiyun
1131*4882a593Smuzhiyun return ret;
1132*4882a593Smuzhiyun }
1133*4882a593Smuzhiyun
1134*4882a593Smuzhiyun if (retlen != PAGE_SIZE) {
1135*4882a593Smuzhiyun dev_err(d->dev, "Short read %zd\n", retlen);
1136*4882a593Smuzhiyun return -EIO;
1137*4882a593Smuzhiyun }
1138*4882a593Smuzhiyun
1139*4882a593Smuzhiyun return 0;
1140*4882a593Smuzhiyun }
1141*4882a593Smuzhiyun
mtdswap_discard(struct mtd_blktrans_dev * dev,unsigned long first,unsigned nr_pages)1142*4882a593Smuzhiyun static int mtdswap_discard(struct mtd_blktrans_dev *dev, unsigned long first,
1143*4882a593Smuzhiyun unsigned nr_pages)
1144*4882a593Smuzhiyun {
1145*4882a593Smuzhiyun struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev);
1146*4882a593Smuzhiyun unsigned long page;
1147*4882a593Smuzhiyun struct swap_eb *eb;
1148*4882a593Smuzhiyun unsigned int mapped;
1149*4882a593Smuzhiyun
1150*4882a593Smuzhiyun d->discard_count++;
1151*4882a593Smuzhiyun
1152*4882a593Smuzhiyun for (page = first; page < first + nr_pages; page++) {
1153*4882a593Smuzhiyun mapped = d->page_data[page];
1154*4882a593Smuzhiyun if (mapped <= BLOCK_MAX) {
1155*4882a593Smuzhiyun eb = d->eb_data + (mapped / d->pages_per_eblk);
1156*4882a593Smuzhiyun eb->active_count--;
1157*4882a593Smuzhiyun mtdswap_store_eb(d, eb);
1158*4882a593Smuzhiyun d->page_data[page] = BLOCK_UNDEF;
1159*4882a593Smuzhiyun d->revmap[mapped] = PAGE_UNDEF;
1160*4882a593Smuzhiyun d->discard_page_count++;
1161*4882a593Smuzhiyun } else if (mapped == BLOCK_ERROR) {
1162*4882a593Smuzhiyun d->page_data[page] = BLOCK_UNDEF;
1163*4882a593Smuzhiyun d->discard_page_count++;
1164*4882a593Smuzhiyun }
1165*4882a593Smuzhiyun }
1166*4882a593Smuzhiyun
1167*4882a593Smuzhiyun return 0;
1168*4882a593Smuzhiyun }
1169*4882a593Smuzhiyun
mtdswap_show(struct seq_file * s,void * data)1170*4882a593Smuzhiyun static int mtdswap_show(struct seq_file *s, void *data)
1171*4882a593Smuzhiyun {
1172*4882a593Smuzhiyun struct mtdswap_dev *d = (struct mtdswap_dev *) s->private;
1173*4882a593Smuzhiyun unsigned long sum;
1174*4882a593Smuzhiyun unsigned int count[MTDSWAP_TREE_CNT];
1175*4882a593Smuzhiyun unsigned int min[MTDSWAP_TREE_CNT];
1176*4882a593Smuzhiyun unsigned int max[MTDSWAP_TREE_CNT];
1177*4882a593Smuzhiyun unsigned int i, cw = 0, cwp = 0, cwecount = 0, bb_cnt, mapped, pages;
1178*4882a593Smuzhiyun uint64_t use_size;
1179*4882a593Smuzhiyun static const char * const name[] = {
1180*4882a593Smuzhiyun "clean", "used", "low", "high", "dirty", "bitflip", "failing"
1181*4882a593Smuzhiyun };
1182*4882a593Smuzhiyun
1183*4882a593Smuzhiyun mutex_lock(&d->mbd_dev->lock);
1184*4882a593Smuzhiyun
1185*4882a593Smuzhiyun for (i = 0; i < MTDSWAP_TREE_CNT; i++) {
1186*4882a593Smuzhiyun struct rb_root *root = &d->trees[i].root;
1187*4882a593Smuzhiyun
1188*4882a593Smuzhiyun if (root->rb_node) {
1189*4882a593Smuzhiyun count[i] = d->trees[i].count;
1190*4882a593Smuzhiyun min[i] = MTDSWAP_ECNT_MIN(root);
1191*4882a593Smuzhiyun max[i] = MTDSWAP_ECNT_MAX(root);
1192*4882a593Smuzhiyun } else
1193*4882a593Smuzhiyun count[i] = 0;
1194*4882a593Smuzhiyun }
1195*4882a593Smuzhiyun
1196*4882a593Smuzhiyun if (d->curr_write) {
1197*4882a593Smuzhiyun cw = 1;
1198*4882a593Smuzhiyun cwp = d->curr_write_pos;
1199*4882a593Smuzhiyun cwecount = d->curr_write->erase_count;
1200*4882a593Smuzhiyun }
1201*4882a593Smuzhiyun
1202*4882a593Smuzhiyun sum = 0;
1203*4882a593Smuzhiyun for (i = 0; i < d->eblks; i++)
1204*4882a593Smuzhiyun sum += d->eb_data[i].erase_count;
1205*4882a593Smuzhiyun
1206*4882a593Smuzhiyun use_size = (uint64_t)d->eblks * d->mtd->erasesize;
1207*4882a593Smuzhiyun bb_cnt = mtdswap_badblocks(d->mtd, use_size);
1208*4882a593Smuzhiyun
1209*4882a593Smuzhiyun mapped = 0;
1210*4882a593Smuzhiyun pages = d->mbd_dev->size;
1211*4882a593Smuzhiyun for (i = 0; i < pages; i++)
1212*4882a593Smuzhiyun if (d->page_data[i] != BLOCK_UNDEF)
1213*4882a593Smuzhiyun mapped++;
1214*4882a593Smuzhiyun
1215*4882a593Smuzhiyun mutex_unlock(&d->mbd_dev->lock);
1216*4882a593Smuzhiyun
1217*4882a593Smuzhiyun for (i = 0; i < MTDSWAP_TREE_CNT; i++) {
1218*4882a593Smuzhiyun if (!count[i])
1219*4882a593Smuzhiyun continue;
1220*4882a593Smuzhiyun
1221*4882a593Smuzhiyun if (min[i] != max[i])
1222*4882a593Smuzhiyun seq_printf(s, "%s:\t%5d erase blocks, erased min %d, "
1223*4882a593Smuzhiyun "max %d times\n",
1224*4882a593Smuzhiyun name[i], count[i], min[i], max[i]);
1225*4882a593Smuzhiyun else
1226*4882a593Smuzhiyun seq_printf(s, "%s:\t%5d erase blocks, all erased %d "
1227*4882a593Smuzhiyun "times\n", name[i], count[i], min[i]);
1228*4882a593Smuzhiyun }
1229*4882a593Smuzhiyun
1230*4882a593Smuzhiyun if (bb_cnt)
1231*4882a593Smuzhiyun seq_printf(s, "bad:\t%5u erase blocks\n", bb_cnt);
1232*4882a593Smuzhiyun
1233*4882a593Smuzhiyun if (cw)
1234*4882a593Smuzhiyun seq_printf(s, "current erase block: %u pages used, %u free, "
1235*4882a593Smuzhiyun "erased %u times\n",
1236*4882a593Smuzhiyun cwp, d->pages_per_eblk - cwp, cwecount);
1237*4882a593Smuzhiyun
1238*4882a593Smuzhiyun seq_printf(s, "total erasures: %lu\n", sum);
1239*4882a593Smuzhiyun
1240*4882a593Smuzhiyun seq_puts(s, "\n");
1241*4882a593Smuzhiyun
1242*4882a593Smuzhiyun seq_printf(s, "mtdswap_readsect count: %llu\n", d->sect_read_count);
1243*4882a593Smuzhiyun seq_printf(s, "mtdswap_writesect count: %llu\n", d->sect_write_count);
1244*4882a593Smuzhiyun seq_printf(s, "mtdswap_discard count: %llu\n", d->discard_count);
1245*4882a593Smuzhiyun seq_printf(s, "mtd read count: %llu\n", d->mtd_read_count);
1246*4882a593Smuzhiyun seq_printf(s, "mtd write count: %llu\n", d->mtd_write_count);
1247*4882a593Smuzhiyun seq_printf(s, "discarded pages count: %llu\n", d->discard_page_count);
1248*4882a593Smuzhiyun
1249*4882a593Smuzhiyun seq_puts(s, "\n");
1250*4882a593Smuzhiyun seq_printf(s, "total pages: %u\n", pages);
1251*4882a593Smuzhiyun seq_printf(s, "pages mapped: %u\n", mapped);
1252*4882a593Smuzhiyun
1253*4882a593Smuzhiyun return 0;
1254*4882a593Smuzhiyun }
1255*4882a593Smuzhiyun DEFINE_SHOW_ATTRIBUTE(mtdswap);
1256*4882a593Smuzhiyun
mtdswap_add_debugfs(struct mtdswap_dev * d)1257*4882a593Smuzhiyun static int mtdswap_add_debugfs(struct mtdswap_dev *d)
1258*4882a593Smuzhiyun {
1259*4882a593Smuzhiyun struct dentry *root = d->mtd->dbg.dfs_dir;
1260*4882a593Smuzhiyun
1261*4882a593Smuzhiyun if (!IS_ENABLED(CONFIG_DEBUG_FS))
1262*4882a593Smuzhiyun return 0;
1263*4882a593Smuzhiyun
1264*4882a593Smuzhiyun if (IS_ERR_OR_NULL(root))
1265*4882a593Smuzhiyun return -1;
1266*4882a593Smuzhiyun
1267*4882a593Smuzhiyun debugfs_create_file("mtdswap_stats", S_IRUSR, root, d, &mtdswap_fops);
1268*4882a593Smuzhiyun
1269*4882a593Smuzhiyun return 0;
1270*4882a593Smuzhiyun }
1271*4882a593Smuzhiyun
mtdswap_init(struct mtdswap_dev * d,unsigned int eblocks,unsigned int spare_cnt)1272*4882a593Smuzhiyun static int mtdswap_init(struct mtdswap_dev *d, unsigned int eblocks,
1273*4882a593Smuzhiyun unsigned int spare_cnt)
1274*4882a593Smuzhiyun {
1275*4882a593Smuzhiyun struct mtd_info *mtd = d->mbd_dev->mtd;
1276*4882a593Smuzhiyun unsigned int i, eblk_bytes, pages, blocks;
1277*4882a593Smuzhiyun int ret = -ENOMEM;
1278*4882a593Smuzhiyun
1279*4882a593Smuzhiyun d->mtd = mtd;
1280*4882a593Smuzhiyun d->eblks = eblocks;
1281*4882a593Smuzhiyun d->spare_eblks = spare_cnt;
1282*4882a593Smuzhiyun d->pages_per_eblk = mtd->erasesize >> PAGE_SHIFT;
1283*4882a593Smuzhiyun
1284*4882a593Smuzhiyun pages = d->mbd_dev->size;
1285*4882a593Smuzhiyun blocks = eblocks * d->pages_per_eblk;
1286*4882a593Smuzhiyun
1287*4882a593Smuzhiyun for (i = 0; i < MTDSWAP_TREE_CNT; i++)
1288*4882a593Smuzhiyun d->trees[i].root = RB_ROOT;
1289*4882a593Smuzhiyun
1290*4882a593Smuzhiyun d->page_data = vmalloc(array_size(pages, sizeof(int)));
1291*4882a593Smuzhiyun if (!d->page_data)
1292*4882a593Smuzhiyun goto page_data_fail;
1293*4882a593Smuzhiyun
1294*4882a593Smuzhiyun d->revmap = vmalloc(array_size(blocks, sizeof(int)));
1295*4882a593Smuzhiyun if (!d->revmap)
1296*4882a593Smuzhiyun goto revmap_fail;
1297*4882a593Smuzhiyun
1298*4882a593Smuzhiyun eblk_bytes = sizeof(struct swap_eb)*d->eblks;
1299*4882a593Smuzhiyun d->eb_data = vzalloc(eblk_bytes);
1300*4882a593Smuzhiyun if (!d->eb_data)
1301*4882a593Smuzhiyun goto eb_data_fail;
1302*4882a593Smuzhiyun
1303*4882a593Smuzhiyun for (i = 0; i < pages; i++)
1304*4882a593Smuzhiyun d->page_data[i] = BLOCK_UNDEF;
1305*4882a593Smuzhiyun
1306*4882a593Smuzhiyun for (i = 0; i < blocks; i++)
1307*4882a593Smuzhiyun d->revmap[i] = PAGE_UNDEF;
1308*4882a593Smuzhiyun
1309*4882a593Smuzhiyun d->page_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
1310*4882a593Smuzhiyun if (!d->page_buf)
1311*4882a593Smuzhiyun goto page_buf_fail;
1312*4882a593Smuzhiyun
1313*4882a593Smuzhiyun d->oob_buf = kmalloc_array(2, mtd->oobavail, GFP_KERNEL);
1314*4882a593Smuzhiyun if (!d->oob_buf)
1315*4882a593Smuzhiyun goto oob_buf_fail;
1316*4882a593Smuzhiyun
1317*4882a593Smuzhiyun mtdswap_scan_eblks(d);
1318*4882a593Smuzhiyun
1319*4882a593Smuzhiyun return 0;
1320*4882a593Smuzhiyun
1321*4882a593Smuzhiyun oob_buf_fail:
1322*4882a593Smuzhiyun kfree(d->page_buf);
1323*4882a593Smuzhiyun page_buf_fail:
1324*4882a593Smuzhiyun vfree(d->eb_data);
1325*4882a593Smuzhiyun eb_data_fail:
1326*4882a593Smuzhiyun vfree(d->revmap);
1327*4882a593Smuzhiyun revmap_fail:
1328*4882a593Smuzhiyun vfree(d->page_data);
1329*4882a593Smuzhiyun page_data_fail:
1330*4882a593Smuzhiyun printk(KERN_ERR "%s: init failed (%d)\n", MTDSWAP_PREFIX, ret);
1331*4882a593Smuzhiyun return ret;
1332*4882a593Smuzhiyun }
1333*4882a593Smuzhiyun
mtdswap_add_mtd(struct mtd_blktrans_ops * tr,struct mtd_info * mtd)1334*4882a593Smuzhiyun static void mtdswap_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
1335*4882a593Smuzhiyun {
1336*4882a593Smuzhiyun struct mtdswap_dev *d;
1337*4882a593Smuzhiyun struct mtd_blktrans_dev *mbd_dev;
1338*4882a593Smuzhiyun char *parts;
1339*4882a593Smuzhiyun char *this_opt;
1340*4882a593Smuzhiyun unsigned long part;
1341*4882a593Smuzhiyun unsigned int eblocks, eavailable, bad_blocks, spare_cnt;
1342*4882a593Smuzhiyun uint64_t swap_size, use_size, size_limit;
1343*4882a593Smuzhiyun int ret;
1344*4882a593Smuzhiyun
1345*4882a593Smuzhiyun parts = &partitions[0];
1346*4882a593Smuzhiyun if (!*parts)
1347*4882a593Smuzhiyun return;
1348*4882a593Smuzhiyun
1349*4882a593Smuzhiyun while ((this_opt = strsep(&parts, ",")) != NULL) {
1350*4882a593Smuzhiyun if (kstrtoul(this_opt, 0, &part) < 0)
1351*4882a593Smuzhiyun return;
1352*4882a593Smuzhiyun
1353*4882a593Smuzhiyun if (mtd->index == part)
1354*4882a593Smuzhiyun break;
1355*4882a593Smuzhiyun }
1356*4882a593Smuzhiyun
1357*4882a593Smuzhiyun if (mtd->index != part)
1358*4882a593Smuzhiyun return;
1359*4882a593Smuzhiyun
1360*4882a593Smuzhiyun if (mtd->erasesize < PAGE_SIZE || mtd->erasesize % PAGE_SIZE) {
1361*4882a593Smuzhiyun printk(KERN_ERR "%s: Erase size %u not multiple of PAGE_SIZE "
1362*4882a593Smuzhiyun "%lu\n", MTDSWAP_PREFIX, mtd->erasesize, PAGE_SIZE);
1363*4882a593Smuzhiyun return;
1364*4882a593Smuzhiyun }
1365*4882a593Smuzhiyun
1366*4882a593Smuzhiyun if (PAGE_SIZE % mtd->writesize || mtd->writesize > PAGE_SIZE) {
1367*4882a593Smuzhiyun printk(KERN_ERR "%s: PAGE_SIZE %lu not multiple of write size"
1368*4882a593Smuzhiyun " %u\n", MTDSWAP_PREFIX, PAGE_SIZE, mtd->writesize);
1369*4882a593Smuzhiyun return;
1370*4882a593Smuzhiyun }
1371*4882a593Smuzhiyun
1372*4882a593Smuzhiyun if (!mtd->oobsize || mtd->oobavail < MTDSWAP_OOBSIZE) {
1373*4882a593Smuzhiyun printk(KERN_ERR "%s: Not enough free bytes in OOB, "
1374*4882a593Smuzhiyun "%d available, %zu needed.\n",
1375*4882a593Smuzhiyun MTDSWAP_PREFIX, mtd->oobavail, MTDSWAP_OOBSIZE);
1376*4882a593Smuzhiyun return;
1377*4882a593Smuzhiyun }
1378*4882a593Smuzhiyun
1379*4882a593Smuzhiyun if (spare_eblocks > 100)
1380*4882a593Smuzhiyun spare_eblocks = 100;
1381*4882a593Smuzhiyun
1382*4882a593Smuzhiyun use_size = mtd->size;
1383*4882a593Smuzhiyun size_limit = (uint64_t) BLOCK_MAX * PAGE_SIZE;
1384*4882a593Smuzhiyun
1385*4882a593Smuzhiyun if (mtd->size > size_limit) {
1386*4882a593Smuzhiyun printk(KERN_WARNING "%s: Device too large. Limiting size to "
1387*4882a593Smuzhiyun "%llu bytes\n", MTDSWAP_PREFIX, size_limit);
1388*4882a593Smuzhiyun use_size = size_limit;
1389*4882a593Smuzhiyun }
1390*4882a593Smuzhiyun
1391*4882a593Smuzhiyun eblocks = mtd_div_by_eb(use_size, mtd);
1392*4882a593Smuzhiyun use_size = (uint64_t)eblocks * mtd->erasesize;
1393*4882a593Smuzhiyun bad_blocks = mtdswap_badblocks(mtd, use_size);
1394*4882a593Smuzhiyun eavailable = eblocks - bad_blocks;
1395*4882a593Smuzhiyun
1396*4882a593Smuzhiyun if (eavailable < MIN_ERASE_BLOCKS) {
1397*4882a593Smuzhiyun printk(KERN_ERR "%s: Not enough erase blocks. %u available, "
1398*4882a593Smuzhiyun "%d needed\n", MTDSWAP_PREFIX, eavailable,
1399*4882a593Smuzhiyun MIN_ERASE_BLOCKS);
1400*4882a593Smuzhiyun return;
1401*4882a593Smuzhiyun }
1402*4882a593Smuzhiyun
1403*4882a593Smuzhiyun spare_cnt = div_u64((uint64_t)eavailable * spare_eblocks, 100);
1404*4882a593Smuzhiyun
1405*4882a593Smuzhiyun if (spare_cnt < MIN_SPARE_EBLOCKS)
1406*4882a593Smuzhiyun spare_cnt = MIN_SPARE_EBLOCKS;
1407*4882a593Smuzhiyun
1408*4882a593Smuzhiyun if (spare_cnt > eavailable - 1)
1409*4882a593Smuzhiyun spare_cnt = eavailable - 1;
1410*4882a593Smuzhiyun
1411*4882a593Smuzhiyun swap_size = (uint64_t)(eavailable - spare_cnt) * mtd->erasesize +
1412*4882a593Smuzhiyun (header ? PAGE_SIZE : 0);
1413*4882a593Smuzhiyun
1414*4882a593Smuzhiyun printk(KERN_INFO "%s: Enabling MTD swap on device %lu, size %llu KB, "
1415*4882a593Smuzhiyun "%u spare, %u bad blocks\n",
1416*4882a593Smuzhiyun MTDSWAP_PREFIX, part, swap_size / 1024, spare_cnt, bad_blocks);
1417*4882a593Smuzhiyun
1418*4882a593Smuzhiyun d = kzalloc(sizeof(struct mtdswap_dev), GFP_KERNEL);
1419*4882a593Smuzhiyun if (!d)
1420*4882a593Smuzhiyun return;
1421*4882a593Smuzhiyun
1422*4882a593Smuzhiyun mbd_dev = kzalloc(sizeof(struct mtd_blktrans_dev), GFP_KERNEL);
1423*4882a593Smuzhiyun if (!mbd_dev) {
1424*4882a593Smuzhiyun kfree(d);
1425*4882a593Smuzhiyun return;
1426*4882a593Smuzhiyun }
1427*4882a593Smuzhiyun
1428*4882a593Smuzhiyun d->mbd_dev = mbd_dev;
1429*4882a593Smuzhiyun mbd_dev->priv = d;
1430*4882a593Smuzhiyun
1431*4882a593Smuzhiyun mbd_dev->mtd = mtd;
1432*4882a593Smuzhiyun mbd_dev->devnum = mtd->index;
1433*4882a593Smuzhiyun mbd_dev->size = swap_size >> PAGE_SHIFT;
1434*4882a593Smuzhiyun mbd_dev->tr = tr;
1435*4882a593Smuzhiyun
1436*4882a593Smuzhiyun if (!(mtd->flags & MTD_WRITEABLE))
1437*4882a593Smuzhiyun mbd_dev->readonly = 1;
1438*4882a593Smuzhiyun
1439*4882a593Smuzhiyun if (mtdswap_init(d, eblocks, spare_cnt) < 0)
1440*4882a593Smuzhiyun goto init_failed;
1441*4882a593Smuzhiyun
1442*4882a593Smuzhiyun if (add_mtd_blktrans_dev(mbd_dev) < 0)
1443*4882a593Smuzhiyun goto cleanup;
1444*4882a593Smuzhiyun
1445*4882a593Smuzhiyun d->dev = disk_to_dev(mbd_dev->disk);
1446*4882a593Smuzhiyun
1447*4882a593Smuzhiyun ret = mtdswap_add_debugfs(d);
1448*4882a593Smuzhiyun if (ret < 0)
1449*4882a593Smuzhiyun goto debugfs_failed;
1450*4882a593Smuzhiyun
1451*4882a593Smuzhiyun return;
1452*4882a593Smuzhiyun
1453*4882a593Smuzhiyun debugfs_failed:
1454*4882a593Smuzhiyun del_mtd_blktrans_dev(mbd_dev);
1455*4882a593Smuzhiyun
1456*4882a593Smuzhiyun cleanup:
1457*4882a593Smuzhiyun mtdswap_cleanup(d);
1458*4882a593Smuzhiyun
1459*4882a593Smuzhiyun init_failed:
1460*4882a593Smuzhiyun kfree(mbd_dev);
1461*4882a593Smuzhiyun kfree(d);
1462*4882a593Smuzhiyun }
1463*4882a593Smuzhiyun
mtdswap_remove_dev(struct mtd_blktrans_dev * dev)1464*4882a593Smuzhiyun static void mtdswap_remove_dev(struct mtd_blktrans_dev *dev)
1465*4882a593Smuzhiyun {
1466*4882a593Smuzhiyun struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev);
1467*4882a593Smuzhiyun
1468*4882a593Smuzhiyun del_mtd_blktrans_dev(dev);
1469*4882a593Smuzhiyun mtdswap_cleanup(d);
1470*4882a593Smuzhiyun kfree(d);
1471*4882a593Smuzhiyun }
1472*4882a593Smuzhiyun
1473*4882a593Smuzhiyun static struct mtd_blktrans_ops mtdswap_ops = {
1474*4882a593Smuzhiyun .name = "mtdswap",
1475*4882a593Smuzhiyun .major = 0,
1476*4882a593Smuzhiyun .part_bits = 0,
1477*4882a593Smuzhiyun .blksize = PAGE_SIZE,
1478*4882a593Smuzhiyun .flush = mtdswap_flush,
1479*4882a593Smuzhiyun .readsect = mtdswap_readsect,
1480*4882a593Smuzhiyun .writesect = mtdswap_writesect,
1481*4882a593Smuzhiyun .discard = mtdswap_discard,
1482*4882a593Smuzhiyun .background = mtdswap_background,
1483*4882a593Smuzhiyun .add_mtd = mtdswap_add_mtd,
1484*4882a593Smuzhiyun .remove_dev = mtdswap_remove_dev,
1485*4882a593Smuzhiyun .owner = THIS_MODULE,
1486*4882a593Smuzhiyun };
1487*4882a593Smuzhiyun
mtdswap_modinit(void)1488*4882a593Smuzhiyun static int __init mtdswap_modinit(void)
1489*4882a593Smuzhiyun {
1490*4882a593Smuzhiyun return register_mtd_blktrans(&mtdswap_ops);
1491*4882a593Smuzhiyun }
1492*4882a593Smuzhiyun
mtdswap_modexit(void)1493*4882a593Smuzhiyun static void __exit mtdswap_modexit(void)
1494*4882a593Smuzhiyun {
1495*4882a593Smuzhiyun deregister_mtd_blktrans(&mtdswap_ops);
1496*4882a593Smuzhiyun }
1497*4882a593Smuzhiyun
1498*4882a593Smuzhiyun module_init(mtdswap_modinit);
1499*4882a593Smuzhiyun module_exit(mtdswap_modexit);
1500*4882a593Smuzhiyun
1501*4882a593Smuzhiyun
1502*4882a593Smuzhiyun MODULE_LICENSE("GPL");
1503*4882a593Smuzhiyun MODULE_AUTHOR("Jarkko Lavinen <jarkko.lavinen@nokia.com>");
1504*4882a593Smuzhiyun MODULE_DESCRIPTION("Block device access to an MTD suitable for using as "
1505*4882a593Smuzhiyun "swap space");
1506