1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (c) International Business Machines Corp., 2006
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Authors: Artem Bityutskiy (Битюцкий Артём), Thomas Gleixner
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun /*
9*4882a593Smuzhiyun * UBI wear-leveling sub-system.
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun * This sub-system is responsible for wear-leveling. It works in terms of
12*4882a593Smuzhiyun * physical eraseblocks and erase counters and knows nothing about logical
13*4882a593Smuzhiyun * eraseblocks, volumes, etc. From this sub-system's perspective all physical
14*4882a593Smuzhiyun * eraseblocks are of two types - used and free. Used physical eraseblocks are
15*4882a593Smuzhiyun * those that were "get" by the 'ubi_wl_get_peb()' function, and free physical
16*4882a593Smuzhiyun * eraseblocks are those that were put by the 'ubi_wl_put_peb()' function.
17*4882a593Smuzhiyun *
18*4882a593Smuzhiyun * Physical eraseblocks returned by 'ubi_wl_get_peb()' have only erase counter
19*4882a593Smuzhiyun * header. The rest of the physical eraseblock contains only %0xFF bytes.
20*4882a593Smuzhiyun *
21*4882a593Smuzhiyun * When physical eraseblocks are returned to the WL sub-system by means of the
22*4882a593Smuzhiyun * 'ubi_wl_put_peb()' function, they are scheduled for erasure. The erasure is
23*4882a593Smuzhiyun * done asynchronously in context of the per-UBI device background thread,
24*4882a593Smuzhiyun * which is also managed by the WL sub-system.
25*4882a593Smuzhiyun *
26*4882a593Smuzhiyun * The wear-leveling is ensured by means of moving the contents of used
27*4882a593Smuzhiyun * physical eraseblocks with low erase counter to free physical eraseblocks
28*4882a593Smuzhiyun * with high erase counter.
29*4882a593Smuzhiyun *
30*4882a593Smuzhiyun * If the WL sub-system fails to erase a physical eraseblock, it marks it as
31*4882a593Smuzhiyun * bad.
32*4882a593Smuzhiyun *
33*4882a593Smuzhiyun * This sub-system is also responsible for scrubbing. If a bit-flip is detected
34*4882a593Smuzhiyun * in a physical eraseblock, it has to be moved. Technically this is the same
35*4882a593Smuzhiyun * as moving it for wear-leveling reasons.
36*4882a593Smuzhiyun *
37*4882a593Smuzhiyun * As it was said, for the UBI sub-system all physical eraseblocks are either
38*4882a593Smuzhiyun * "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while
39*4882a593Smuzhiyun * used eraseblocks are kept in @wl->used, @wl->erroneous, or @wl->scrub
40*4882a593Smuzhiyun * RB-trees, as well as (temporarily) in the @wl->pq queue.
41*4882a593Smuzhiyun *
42*4882a593Smuzhiyun * When the WL sub-system returns a physical eraseblock, the physical
43*4882a593Smuzhiyun * eraseblock is protected from being moved for some "time". For this reason,
44*4882a593Smuzhiyun * the physical eraseblock is not directly moved from the @wl->free tree to the
45*4882a593Smuzhiyun * @wl->used tree. There is a protection queue in between where this
46*4882a593Smuzhiyun * physical eraseblock is temporarily stored (@wl->pq).
47*4882a593Smuzhiyun *
48*4882a593Smuzhiyun * All this protection stuff is needed because:
49*4882a593Smuzhiyun * o we don't want to move physical eraseblocks just after we have given them
50*4882a593Smuzhiyun * to the user; instead, we first want to let users fill them up with data;
51*4882a593Smuzhiyun *
52*4882a593Smuzhiyun * o there is a chance that the user will put the physical eraseblock very
53*4882a593Smuzhiyun * soon, so it makes sense not to move it for some time, but wait.
54*4882a593Smuzhiyun *
55*4882a593Smuzhiyun * Physical eraseblocks stay protected only for limited time. But the "time" is
56*4882a593Smuzhiyun * measured in erase cycles in this case. This is implemented with help of the
57*4882a593Smuzhiyun * protection queue. Eraseblocks are put to the tail of this queue when they
58*4882a593Smuzhiyun * are returned by the 'ubi_wl_get_peb()', and eraseblocks are removed from the
59*4882a593Smuzhiyun * head of the queue on each erase operation (for any eraseblock). So the
60*4882a593Smuzhiyun * length of the queue defines how may (global) erase cycles PEBs are protected.
61*4882a593Smuzhiyun *
62*4882a593Smuzhiyun * To put it differently, each physical eraseblock has 2 main states: free and
63*4882a593Smuzhiyun * used. The former state corresponds to the @wl->free tree. The latter state
64*4882a593Smuzhiyun * is split up on several sub-states:
65*4882a593Smuzhiyun * o the WL movement is allowed (@wl->used tree);
66*4882a593Smuzhiyun * o the WL movement is disallowed (@wl->erroneous) because the PEB is
67*4882a593Smuzhiyun * erroneous - e.g., there was a read error;
68*4882a593Smuzhiyun * o the WL movement is temporarily prohibited (@wl->pq queue);
69*4882a593Smuzhiyun * o scrubbing is needed (@wl->scrub tree).
70*4882a593Smuzhiyun *
71*4882a593Smuzhiyun * Depending on the sub-state, wear-leveling entries of the used physical
72*4882a593Smuzhiyun * eraseblocks may be kept in one of those structures.
73*4882a593Smuzhiyun *
74*4882a593Smuzhiyun * Note, in this implementation, we keep a small in-RAM object for each physical
75*4882a593Smuzhiyun * eraseblock. This is surely not a scalable solution. But it appears to be good
76*4882a593Smuzhiyun * enough for moderately large flashes and it is simple. In future, one may
77*4882a593Smuzhiyun * re-work this sub-system and make it more scalable.
78*4882a593Smuzhiyun *
79*4882a593Smuzhiyun * At the moment this sub-system does not utilize the sequence number, which
80*4882a593Smuzhiyun * was introduced relatively recently. But it would be wise to do this because
81*4882a593Smuzhiyun * the sequence number of a logical eraseblock characterizes how old is it. For
82*4882a593Smuzhiyun * example, when we move a PEB with low erase counter, and we need to pick the
83*4882a593Smuzhiyun * target PEB, we pick a PEB with the highest EC if our PEB is "old" and we
84*4882a593Smuzhiyun * pick target PEB with an average EC if our PEB is not very "old". This is a
85*4882a593Smuzhiyun * room for future re-works of the WL sub-system.
86*4882a593Smuzhiyun */
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun #include <linux/slab.h>
89*4882a593Smuzhiyun #include <linux/crc32.h>
90*4882a593Smuzhiyun #include <linux/freezer.h>
91*4882a593Smuzhiyun #include <linux/kthread.h>
92*4882a593Smuzhiyun #include "ubi.h"
93*4882a593Smuzhiyun #include "wl.h"
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun /* Number of physical eraseblocks reserved for wear-leveling purposes */
96*4882a593Smuzhiyun #define WL_RESERVED_PEBS 1
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun /*
99*4882a593Smuzhiyun * Maximum difference between two erase counters. If this threshold is
100*4882a593Smuzhiyun * exceeded, the WL sub-system starts moving data from used physical
101*4882a593Smuzhiyun * eraseblocks with low erase counter to free physical eraseblocks with high
102*4882a593Smuzhiyun * erase counter.
103*4882a593Smuzhiyun */
104*4882a593Smuzhiyun #define UBI_WL_THRESHOLD CONFIG_MTD_UBI_WL_THRESHOLD
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun /*
107*4882a593Smuzhiyun * When a physical eraseblock is moved, the WL sub-system has to pick the target
108*4882a593Smuzhiyun * physical eraseblock to move to. The simplest way would be just to pick the
109*4882a593Smuzhiyun * one with the highest erase counter. But in certain workloads this could lead
110*4882a593Smuzhiyun * to an unlimited wear of one or few physical eraseblock. Indeed, imagine a
111*4882a593Smuzhiyun * situation when the picked physical eraseblock is constantly erased after the
112*4882a593Smuzhiyun * data is written to it. So, we have a constant which limits the highest erase
113*4882a593Smuzhiyun * counter of the free physical eraseblock to pick. Namely, the WL sub-system
114*4882a593Smuzhiyun * does not pick eraseblocks with erase counter greater than the lowest erase
115*4882a593Smuzhiyun * counter plus %WL_FREE_MAX_DIFF.
116*4882a593Smuzhiyun */
117*4882a593Smuzhiyun #define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD)
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun /*
120*4882a593Smuzhiyun * Maximum number of consecutive background thread failures which is enough to
121*4882a593Smuzhiyun * switch to read-only mode.
122*4882a593Smuzhiyun */
123*4882a593Smuzhiyun #define WL_MAX_FAILURES 32
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun static int self_check_ec(struct ubi_device *ubi, int pnum, int ec);
126*4882a593Smuzhiyun static int self_check_in_wl_tree(const struct ubi_device *ubi,
127*4882a593Smuzhiyun struct ubi_wl_entry *e, struct rb_root *root);
128*4882a593Smuzhiyun static int self_check_in_pq(const struct ubi_device *ubi,
129*4882a593Smuzhiyun struct ubi_wl_entry *e);
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun /**
132*4882a593Smuzhiyun * wl_tree_add - add a wear-leveling entry to a WL RB-tree.
133*4882a593Smuzhiyun * @e: the wear-leveling entry to add
134*4882a593Smuzhiyun * @root: the root of the tree
135*4882a593Smuzhiyun *
136*4882a593Smuzhiyun * Note, we use (erase counter, physical eraseblock number) pairs as keys in
137*4882a593Smuzhiyun * the @ubi->used and @ubi->free RB-trees.
138*4882a593Smuzhiyun */
wl_tree_add(struct ubi_wl_entry * e,struct rb_root * root)139*4882a593Smuzhiyun static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
140*4882a593Smuzhiyun {
141*4882a593Smuzhiyun struct rb_node **p, *parent = NULL;
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun p = &root->rb_node;
144*4882a593Smuzhiyun while (*p) {
145*4882a593Smuzhiyun struct ubi_wl_entry *e1;
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun parent = *p;
148*4882a593Smuzhiyun e1 = rb_entry(parent, struct ubi_wl_entry, u.rb);
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun if (e->ec < e1->ec)
151*4882a593Smuzhiyun p = &(*p)->rb_left;
152*4882a593Smuzhiyun else if (e->ec > e1->ec)
153*4882a593Smuzhiyun p = &(*p)->rb_right;
154*4882a593Smuzhiyun else {
155*4882a593Smuzhiyun ubi_assert(e->pnum != e1->pnum);
156*4882a593Smuzhiyun if (e->pnum < e1->pnum)
157*4882a593Smuzhiyun p = &(*p)->rb_left;
158*4882a593Smuzhiyun else
159*4882a593Smuzhiyun p = &(*p)->rb_right;
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun rb_link_node(&e->u.rb, parent, p);
164*4882a593Smuzhiyun rb_insert_color(&e->u.rb, root);
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun /**
168*4882a593Smuzhiyun * wl_tree_destroy - destroy a wear-leveling entry.
169*4882a593Smuzhiyun * @ubi: UBI device description object
170*4882a593Smuzhiyun * @e: the wear-leveling entry to add
171*4882a593Smuzhiyun *
172*4882a593Smuzhiyun * This function destroys a wear leveling entry and removes
173*4882a593Smuzhiyun * the reference from the lookup table.
174*4882a593Smuzhiyun */
wl_entry_destroy(struct ubi_device * ubi,struct ubi_wl_entry * e)175*4882a593Smuzhiyun static void wl_entry_destroy(struct ubi_device *ubi, struct ubi_wl_entry *e)
176*4882a593Smuzhiyun {
177*4882a593Smuzhiyun ubi->lookuptbl[e->pnum] = NULL;
178*4882a593Smuzhiyun kmem_cache_free(ubi_wl_entry_slab, e);
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun /**
182*4882a593Smuzhiyun * do_work - do one pending work.
183*4882a593Smuzhiyun * @ubi: UBI device description object
184*4882a593Smuzhiyun *
185*4882a593Smuzhiyun * This function returns zero in case of success and a negative error code in
186*4882a593Smuzhiyun * case of failure.
187*4882a593Smuzhiyun */
do_work(struct ubi_device * ubi)188*4882a593Smuzhiyun static int do_work(struct ubi_device *ubi)
189*4882a593Smuzhiyun {
190*4882a593Smuzhiyun int err;
191*4882a593Smuzhiyun struct ubi_work *wrk;
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun cond_resched();
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun /*
196*4882a593Smuzhiyun * @ubi->work_sem is used to synchronize with the workers. Workers take
197*4882a593Smuzhiyun * it in read mode, so many of them may be doing works at a time. But
198*4882a593Smuzhiyun * the queue flush code has to be sure the whole queue of works is
199*4882a593Smuzhiyun * done, and it takes the mutex in write mode.
200*4882a593Smuzhiyun */
201*4882a593Smuzhiyun down_read(&ubi->work_sem);
202*4882a593Smuzhiyun spin_lock(&ubi->wl_lock);
203*4882a593Smuzhiyun if (list_empty(&ubi->works)) {
204*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
205*4882a593Smuzhiyun up_read(&ubi->work_sem);
206*4882a593Smuzhiyun return 0;
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun wrk = list_entry(ubi->works.next, struct ubi_work, list);
210*4882a593Smuzhiyun list_del(&wrk->list);
211*4882a593Smuzhiyun ubi->works_count -= 1;
212*4882a593Smuzhiyun ubi_assert(ubi->works_count >= 0);
213*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun /*
216*4882a593Smuzhiyun * Call the worker function. Do not touch the work structure
217*4882a593Smuzhiyun * after this call as it will have been freed or reused by that
218*4882a593Smuzhiyun * time by the worker function.
219*4882a593Smuzhiyun */
220*4882a593Smuzhiyun err = wrk->func(ubi, wrk, 0);
221*4882a593Smuzhiyun if (err)
222*4882a593Smuzhiyun ubi_err(ubi, "work failed with error code %d", err);
223*4882a593Smuzhiyun up_read(&ubi->work_sem);
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun return err;
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun /**
229*4882a593Smuzhiyun * in_wl_tree - check if wear-leveling entry is present in a WL RB-tree.
230*4882a593Smuzhiyun * @e: the wear-leveling entry to check
231*4882a593Smuzhiyun * @root: the root of the tree
232*4882a593Smuzhiyun *
233*4882a593Smuzhiyun * This function returns non-zero if @e is in the @root RB-tree and zero if it
234*4882a593Smuzhiyun * is not.
235*4882a593Smuzhiyun */
in_wl_tree(struct ubi_wl_entry * e,struct rb_root * root)236*4882a593Smuzhiyun static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root)
237*4882a593Smuzhiyun {
238*4882a593Smuzhiyun struct rb_node *p;
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun p = root->rb_node;
241*4882a593Smuzhiyun while (p) {
242*4882a593Smuzhiyun struct ubi_wl_entry *e1;
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun if (e->pnum == e1->pnum) {
247*4882a593Smuzhiyun ubi_assert(e == e1);
248*4882a593Smuzhiyun return 1;
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun if (e->ec < e1->ec)
252*4882a593Smuzhiyun p = p->rb_left;
253*4882a593Smuzhiyun else if (e->ec > e1->ec)
254*4882a593Smuzhiyun p = p->rb_right;
255*4882a593Smuzhiyun else {
256*4882a593Smuzhiyun ubi_assert(e->pnum != e1->pnum);
257*4882a593Smuzhiyun if (e->pnum < e1->pnum)
258*4882a593Smuzhiyun p = p->rb_left;
259*4882a593Smuzhiyun else
260*4882a593Smuzhiyun p = p->rb_right;
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun return 0;
265*4882a593Smuzhiyun }
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun /**
268*4882a593Smuzhiyun * in_pq - check if a wear-leveling entry is present in the protection queue.
269*4882a593Smuzhiyun * @ubi: UBI device description object
270*4882a593Smuzhiyun * @e: the wear-leveling entry to check
271*4882a593Smuzhiyun *
272*4882a593Smuzhiyun * This function returns non-zero if @e is in the protection queue and zero
273*4882a593Smuzhiyun * if it is not.
274*4882a593Smuzhiyun */
in_pq(const struct ubi_device * ubi,struct ubi_wl_entry * e)275*4882a593Smuzhiyun static inline int in_pq(const struct ubi_device *ubi, struct ubi_wl_entry *e)
276*4882a593Smuzhiyun {
277*4882a593Smuzhiyun struct ubi_wl_entry *p;
278*4882a593Smuzhiyun int i;
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i)
281*4882a593Smuzhiyun list_for_each_entry(p, &ubi->pq[i], u.list)
282*4882a593Smuzhiyun if (p == e)
283*4882a593Smuzhiyun return 1;
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun return 0;
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun /**
289*4882a593Smuzhiyun * prot_queue_add - add physical eraseblock to the protection queue.
290*4882a593Smuzhiyun * @ubi: UBI device description object
291*4882a593Smuzhiyun * @e: the physical eraseblock to add
292*4882a593Smuzhiyun *
293*4882a593Smuzhiyun * This function adds @e to the tail of the protection queue @ubi->pq, where
294*4882a593Smuzhiyun * @e will stay for %UBI_PROT_QUEUE_LEN erase operations and will be
295*4882a593Smuzhiyun * temporarily protected from the wear-leveling worker. Note, @wl->lock has to
296*4882a593Smuzhiyun * be locked.
297*4882a593Smuzhiyun */
prot_queue_add(struct ubi_device * ubi,struct ubi_wl_entry * e)298*4882a593Smuzhiyun static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
299*4882a593Smuzhiyun {
300*4882a593Smuzhiyun int pq_tail = ubi->pq_head - 1;
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun if (pq_tail < 0)
303*4882a593Smuzhiyun pq_tail = UBI_PROT_QUEUE_LEN - 1;
304*4882a593Smuzhiyun ubi_assert(pq_tail >= 0 && pq_tail < UBI_PROT_QUEUE_LEN);
305*4882a593Smuzhiyun list_add_tail(&e->u.list, &ubi->pq[pq_tail]);
306*4882a593Smuzhiyun dbg_wl("added PEB %d EC %d to the protection queue", e->pnum, e->ec);
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun /**
310*4882a593Smuzhiyun * find_wl_entry - find wear-leveling entry closest to certain erase counter.
311*4882a593Smuzhiyun * @ubi: UBI device description object
312*4882a593Smuzhiyun * @root: the RB-tree where to look for
313*4882a593Smuzhiyun * @diff: maximum possible difference from the smallest erase counter
314*4882a593Smuzhiyun *
315*4882a593Smuzhiyun * This function looks for a wear leveling entry with erase counter closest to
316*4882a593Smuzhiyun * min + @diff, where min is the smallest erase counter.
317*4882a593Smuzhiyun */
find_wl_entry(struct ubi_device * ubi,struct rb_root * root,int diff)318*4882a593Smuzhiyun static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi,
319*4882a593Smuzhiyun struct rb_root *root, int diff)
320*4882a593Smuzhiyun {
321*4882a593Smuzhiyun struct rb_node *p;
322*4882a593Smuzhiyun struct ubi_wl_entry *e;
323*4882a593Smuzhiyun int max;
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
326*4882a593Smuzhiyun max = e->ec + diff;
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun p = root->rb_node;
329*4882a593Smuzhiyun while (p) {
330*4882a593Smuzhiyun struct ubi_wl_entry *e1;
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
333*4882a593Smuzhiyun if (e1->ec >= max)
334*4882a593Smuzhiyun p = p->rb_left;
335*4882a593Smuzhiyun else {
336*4882a593Smuzhiyun p = p->rb_right;
337*4882a593Smuzhiyun e = e1;
338*4882a593Smuzhiyun }
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun return e;
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun /**
345*4882a593Smuzhiyun * find_mean_wl_entry - find wear-leveling entry with medium erase counter.
346*4882a593Smuzhiyun * @ubi: UBI device description object
347*4882a593Smuzhiyun * @root: the RB-tree where to look for
348*4882a593Smuzhiyun *
349*4882a593Smuzhiyun * This function looks for a wear leveling entry with medium erase counter,
350*4882a593Smuzhiyun * but not greater or equivalent than the lowest erase counter plus
351*4882a593Smuzhiyun * %WL_FREE_MAX_DIFF/2.
352*4882a593Smuzhiyun */
find_mean_wl_entry(struct ubi_device * ubi,struct rb_root * root)353*4882a593Smuzhiyun static struct ubi_wl_entry *find_mean_wl_entry(struct ubi_device *ubi,
354*4882a593Smuzhiyun struct rb_root *root)
355*4882a593Smuzhiyun {
356*4882a593Smuzhiyun struct ubi_wl_entry *e, *first, *last;
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun first = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
359*4882a593Smuzhiyun last = rb_entry(rb_last(root), struct ubi_wl_entry, u.rb);
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun if (last->ec - first->ec < WL_FREE_MAX_DIFF) {
362*4882a593Smuzhiyun e = rb_entry(root->rb_node, struct ubi_wl_entry, u.rb);
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun /* If no fastmap has been written and this WL entry can be used
365*4882a593Smuzhiyun * as anchor PEB, hold it back and return the second best
366*4882a593Smuzhiyun * WL entry such that fastmap can use the anchor PEB later. */
367*4882a593Smuzhiyun e = may_reserve_for_fm(ubi, e, root);
368*4882a593Smuzhiyun } else
369*4882a593Smuzhiyun e = find_wl_entry(ubi, root, WL_FREE_MAX_DIFF/2);
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun return e;
372*4882a593Smuzhiyun }
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun /**
375*4882a593Smuzhiyun * wl_get_wle - get a mean wl entry to be used by ubi_wl_get_peb() or
376*4882a593Smuzhiyun * refill_wl_user_pool().
377*4882a593Smuzhiyun * @ubi: UBI device description object
378*4882a593Smuzhiyun *
379*4882a593Smuzhiyun * This function returns a a wear leveling entry in case of success and
380*4882a593Smuzhiyun * NULL in case of failure.
381*4882a593Smuzhiyun */
wl_get_wle(struct ubi_device * ubi)382*4882a593Smuzhiyun static struct ubi_wl_entry *wl_get_wle(struct ubi_device *ubi)
383*4882a593Smuzhiyun {
384*4882a593Smuzhiyun struct ubi_wl_entry *e;
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun e = find_mean_wl_entry(ubi, &ubi->free);
387*4882a593Smuzhiyun if (!e) {
388*4882a593Smuzhiyun ubi_err(ubi, "no free eraseblocks");
389*4882a593Smuzhiyun return NULL;
390*4882a593Smuzhiyun }
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun self_check_in_wl_tree(ubi, e, &ubi->free);
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun /*
395*4882a593Smuzhiyun * Move the physical eraseblock to the protection queue where it will
396*4882a593Smuzhiyun * be protected from being moved for some time.
397*4882a593Smuzhiyun */
398*4882a593Smuzhiyun rb_erase(&e->u.rb, &ubi->free);
399*4882a593Smuzhiyun ubi->free_count--;
400*4882a593Smuzhiyun dbg_wl("PEB %d EC %d", e->pnum, e->ec);
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun return e;
403*4882a593Smuzhiyun }
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun /**
406*4882a593Smuzhiyun * prot_queue_del - remove a physical eraseblock from the protection queue.
407*4882a593Smuzhiyun * @ubi: UBI device description object
408*4882a593Smuzhiyun * @pnum: the physical eraseblock to remove
409*4882a593Smuzhiyun *
410*4882a593Smuzhiyun * This function deletes PEB @pnum from the protection queue and returns zero
411*4882a593Smuzhiyun * in case of success and %-ENODEV if the PEB was not found.
412*4882a593Smuzhiyun */
prot_queue_del(struct ubi_device * ubi,int pnum)413*4882a593Smuzhiyun static int prot_queue_del(struct ubi_device *ubi, int pnum)
414*4882a593Smuzhiyun {
415*4882a593Smuzhiyun struct ubi_wl_entry *e;
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun e = ubi->lookuptbl[pnum];
418*4882a593Smuzhiyun if (!e)
419*4882a593Smuzhiyun return -ENODEV;
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun if (self_check_in_pq(ubi, e))
422*4882a593Smuzhiyun return -ENODEV;
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun list_del(&e->u.list);
425*4882a593Smuzhiyun dbg_wl("deleted PEB %d from the protection queue", e->pnum);
426*4882a593Smuzhiyun return 0;
427*4882a593Smuzhiyun }
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun /**
430*4882a593Smuzhiyun * sync_erase - synchronously erase a physical eraseblock.
431*4882a593Smuzhiyun * @ubi: UBI device description object
432*4882a593Smuzhiyun * @e: the the physical eraseblock to erase
433*4882a593Smuzhiyun * @torture: if the physical eraseblock has to be tortured
434*4882a593Smuzhiyun *
435*4882a593Smuzhiyun * This function returns zero in case of success and a negative error code in
436*4882a593Smuzhiyun * case of failure.
437*4882a593Smuzhiyun */
sync_erase(struct ubi_device * ubi,struct ubi_wl_entry * e,int torture)438*4882a593Smuzhiyun static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
439*4882a593Smuzhiyun int torture)
440*4882a593Smuzhiyun {
441*4882a593Smuzhiyun int err;
442*4882a593Smuzhiyun struct ubi_ec_hdr *ec_hdr;
443*4882a593Smuzhiyun unsigned long long ec = e->ec;
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec);
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun err = self_check_ec(ubi, e->pnum, e->ec);
448*4882a593Smuzhiyun if (err)
449*4882a593Smuzhiyun return -EINVAL;
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
452*4882a593Smuzhiyun if (!ec_hdr)
453*4882a593Smuzhiyun return -ENOMEM;
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun err = ubi_io_sync_erase(ubi, e->pnum, torture);
456*4882a593Smuzhiyun if (err < 0)
457*4882a593Smuzhiyun goto out_free;
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun ec += err;
460*4882a593Smuzhiyun if (ec > UBI_MAX_ERASECOUNTER) {
461*4882a593Smuzhiyun /*
462*4882a593Smuzhiyun * Erase counter overflow. Upgrade UBI and use 64-bit
463*4882a593Smuzhiyun * erase counters internally.
464*4882a593Smuzhiyun */
465*4882a593Smuzhiyun ubi_err(ubi, "erase counter overflow at PEB %d, EC %llu",
466*4882a593Smuzhiyun e->pnum, ec);
467*4882a593Smuzhiyun err = -EINVAL;
468*4882a593Smuzhiyun goto out_free;
469*4882a593Smuzhiyun }
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun dbg_wl("erased PEB %d, new EC %llu", e->pnum, ec);
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun ec_hdr->ec = cpu_to_be64(ec);
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr);
476*4882a593Smuzhiyun if (err)
477*4882a593Smuzhiyun goto out_free;
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun e->ec = ec;
480*4882a593Smuzhiyun spin_lock(&ubi->wl_lock);
481*4882a593Smuzhiyun if (e->ec > ubi->max_ec)
482*4882a593Smuzhiyun ubi->max_ec = e->ec;
483*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun out_free:
486*4882a593Smuzhiyun kfree(ec_hdr);
487*4882a593Smuzhiyun return err;
488*4882a593Smuzhiyun }
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun /**
491*4882a593Smuzhiyun * serve_prot_queue - check if it is time to stop protecting PEBs.
492*4882a593Smuzhiyun * @ubi: UBI device description object
493*4882a593Smuzhiyun *
494*4882a593Smuzhiyun * This function is called after each erase operation and removes PEBs from the
495*4882a593Smuzhiyun * tail of the protection queue. These PEBs have been protected for long enough
496*4882a593Smuzhiyun * and should be moved to the used tree.
497*4882a593Smuzhiyun */
serve_prot_queue(struct ubi_device * ubi)498*4882a593Smuzhiyun static void serve_prot_queue(struct ubi_device *ubi)
499*4882a593Smuzhiyun {
500*4882a593Smuzhiyun struct ubi_wl_entry *e, *tmp;
501*4882a593Smuzhiyun int count;
502*4882a593Smuzhiyun
503*4882a593Smuzhiyun /*
504*4882a593Smuzhiyun * There may be several protected physical eraseblock to remove,
505*4882a593Smuzhiyun * process them all.
506*4882a593Smuzhiyun */
507*4882a593Smuzhiyun repeat:
508*4882a593Smuzhiyun count = 0;
509*4882a593Smuzhiyun spin_lock(&ubi->wl_lock);
510*4882a593Smuzhiyun list_for_each_entry_safe(e, tmp, &ubi->pq[ubi->pq_head], u.list) {
511*4882a593Smuzhiyun dbg_wl("PEB %d EC %d protection over, move to used tree",
512*4882a593Smuzhiyun e->pnum, e->ec);
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun list_del(&e->u.list);
515*4882a593Smuzhiyun wl_tree_add(e, &ubi->used);
516*4882a593Smuzhiyun if (count++ > 32) {
517*4882a593Smuzhiyun /*
518*4882a593Smuzhiyun * Let's be nice and avoid holding the spinlock for
519*4882a593Smuzhiyun * too long.
520*4882a593Smuzhiyun */
521*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
522*4882a593Smuzhiyun cond_resched();
523*4882a593Smuzhiyun goto repeat;
524*4882a593Smuzhiyun }
525*4882a593Smuzhiyun }
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun ubi->pq_head += 1;
528*4882a593Smuzhiyun if (ubi->pq_head == UBI_PROT_QUEUE_LEN)
529*4882a593Smuzhiyun ubi->pq_head = 0;
530*4882a593Smuzhiyun ubi_assert(ubi->pq_head >= 0 && ubi->pq_head < UBI_PROT_QUEUE_LEN);
531*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
532*4882a593Smuzhiyun }
533*4882a593Smuzhiyun
534*4882a593Smuzhiyun /**
535*4882a593Smuzhiyun * __schedule_ubi_work - schedule a work.
536*4882a593Smuzhiyun * @ubi: UBI device description object
537*4882a593Smuzhiyun * @wrk: the work to schedule
538*4882a593Smuzhiyun *
539*4882a593Smuzhiyun * This function adds a work defined by @wrk to the tail of the pending works
540*4882a593Smuzhiyun * list. Can only be used if ubi->work_sem is already held in read mode!
541*4882a593Smuzhiyun */
__schedule_ubi_work(struct ubi_device * ubi,struct ubi_work * wrk)542*4882a593Smuzhiyun static void __schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
543*4882a593Smuzhiyun {
544*4882a593Smuzhiyun spin_lock(&ubi->wl_lock);
545*4882a593Smuzhiyun list_add_tail(&wrk->list, &ubi->works);
546*4882a593Smuzhiyun ubi_assert(ubi->works_count >= 0);
547*4882a593Smuzhiyun ubi->works_count += 1;
548*4882a593Smuzhiyun if (ubi->thread_enabled && !ubi_dbg_is_bgt_disabled(ubi))
549*4882a593Smuzhiyun wake_up_process(ubi->bgt_thread);
550*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
551*4882a593Smuzhiyun }
552*4882a593Smuzhiyun
553*4882a593Smuzhiyun /**
554*4882a593Smuzhiyun * schedule_ubi_work - schedule a work.
555*4882a593Smuzhiyun * @ubi: UBI device description object
556*4882a593Smuzhiyun * @wrk: the work to schedule
557*4882a593Smuzhiyun *
558*4882a593Smuzhiyun * This function adds a work defined by @wrk to the tail of the pending works
559*4882a593Smuzhiyun * list.
560*4882a593Smuzhiyun */
schedule_ubi_work(struct ubi_device * ubi,struct ubi_work * wrk)561*4882a593Smuzhiyun static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
562*4882a593Smuzhiyun {
563*4882a593Smuzhiyun down_read(&ubi->work_sem);
564*4882a593Smuzhiyun __schedule_ubi_work(ubi, wrk);
565*4882a593Smuzhiyun up_read(&ubi->work_sem);
566*4882a593Smuzhiyun }
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
569*4882a593Smuzhiyun int shutdown);
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun /**
572*4882a593Smuzhiyun * schedule_erase - schedule an erase work.
573*4882a593Smuzhiyun * @ubi: UBI device description object
574*4882a593Smuzhiyun * @e: the WL entry of the physical eraseblock to erase
575*4882a593Smuzhiyun * @vol_id: the volume ID that last used this PEB
576*4882a593Smuzhiyun * @lnum: the last used logical eraseblock number for the PEB
577*4882a593Smuzhiyun * @torture: if the physical eraseblock has to be tortured
578*4882a593Smuzhiyun *
579*4882a593Smuzhiyun * This function returns zero in case of success and a %-ENOMEM in case of
580*4882a593Smuzhiyun * failure.
581*4882a593Smuzhiyun */
schedule_erase(struct ubi_device * ubi,struct ubi_wl_entry * e,int vol_id,int lnum,int torture,bool nested)582*4882a593Smuzhiyun static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
583*4882a593Smuzhiyun int vol_id, int lnum, int torture, bool nested)
584*4882a593Smuzhiyun {
585*4882a593Smuzhiyun struct ubi_work *wl_wrk;
586*4882a593Smuzhiyun
587*4882a593Smuzhiyun ubi_assert(e);
588*4882a593Smuzhiyun
589*4882a593Smuzhiyun dbg_wl("schedule erasure of PEB %d, EC %d, torture %d",
590*4882a593Smuzhiyun e->pnum, e->ec, torture);
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
593*4882a593Smuzhiyun if (!wl_wrk)
594*4882a593Smuzhiyun return -ENOMEM;
595*4882a593Smuzhiyun
596*4882a593Smuzhiyun wl_wrk->func = &erase_worker;
597*4882a593Smuzhiyun wl_wrk->e = e;
598*4882a593Smuzhiyun wl_wrk->vol_id = vol_id;
599*4882a593Smuzhiyun wl_wrk->lnum = lnum;
600*4882a593Smuzhiyun wl_wrk->torture = torture;
601*4882a593Smuzhiyun
602*4882a593Smuzhiyun if (nested)
603*4882a593Smuzhiyun __schedule_ubi_work(ubi, wl_wrk);
604*4882a593Smuzhiyun else
605*4882a593Smuzhiyun schedule_ubi_work(ubi, wl_wrk);
606*4882a593Smuzhiyun return 0;
607*4882a593Smuzhiyun }
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk);
610*4882a593Smuzhiyun /**
611*4882a593Smuzhiyun * do_sync_erase - run the erase worker synchronously.
612*4882a593Smuzhiyun * @ubi: UBI device description object
613*4882a593Smuzhiyun * @e: the WL entry of the physical eraseblock to erase
614*4882a593Smuzhiyun * @vol_id: the volume ID that last used this PEB
615*4882a593Smuzhiyun * @lnum: the last used logical eraseblock number for the PEB
616*4882a593Smuzhiyun * @torture: if the physical eraseblock has to be tortured
617*4882a593Smuzhiyun *
618*4882a593Smuzhiyun */
do_sync_erase(struct ubi_device * ubi,struct ubi_wl_entry * e,int vol_id,int lnum,int torture)619*4882a593Smuzhiyun static int do_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
620*4882a593Smuzhiyun int vol_id, int lnum, int torture)
621*4882a593Smuzhiyun {
622*4882a593Smuzhiyun struct ubi_work wl_wrk;
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun dbg_wl("sync erase of PEB %i", e->pnum);
625*4882a593Smuzhiyun
626*4882a593Smuzhiyun wl_wrk.e = e;
627*4882a593Smuzhiyun wl_wrk.vol_id = vol_id;
628*4882a593Smuzhiyun wl_wrk.lnum = lnum;
629*4882a593Smuzhiyun wl_wrk.torture = torture;
630*4882a593Smuzhiyun
631*4882a593Smuzhiyun return __erase_worker(ubi, &wl_wrk);
632*4882a593Smuzhiyun }
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun static int ensure_wear_leveling(struct ubi_device *ubi, int nested);
635*4882a593Smuzhiyun /**
636*4882a593Smuzhiyun * wear_leveling_worker - wear-leveling worker function.
637*4882a593Smuzhiyun * @ubi: UBI device description object
638*4882a593Smuzhiyun * @wrk: the work object
639*4882a593Smuzhiyun * @shutdown: non-zero if the worker has to free memory and exit
640*4882a593Smuzhiyun * because the WL-subsystem is shutting down
641*4882a593Smuzhiyun *
642*4882a593Smuzhiyun * This function copies a more worn out physical eraseblock to a less worn out
643*4882a593Smuzhiyun * one. Returns zero in case of success and a negative error code in case of
644*4882a593Smuzhiyun * failure.
645*4882a593Smuzhiyun */
wear_leveling_worker(struct ubi_device * ubi,struct ubi_work * wrk,int shutdown)646*4882a593Smuzhiyun static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
647*4882a593Smuzhiyun int shutdown)
648*4882a593Smuzhiyun {
649*4882a593Smuzhiyun int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
650*4882a593Smuzhiyun int erase = 0, keep = 0, vol_id = -1, lnum = -1;
651*4882a593Smuzhiyun struct ubi_wl_entry *e1, *e2;
652*4882a593Smuzhiyun struct ubi_vid_io_buf *vidb;
653*4882a593Smuzhiyun struct ubi_vid_hdr *vid_hdr;
654*4882a593Smuzhiyun int dst_leb_clean = 0;
655*4882a593Smuzhiyun
656*4882a593Smuzhiyun kfree(wrk);
657*4882a593Smuzhiyun if (shutdown)
658*4882a593Smuzhiyun return 0;
659*4882a593Smuzhiyun
660*4882a593Smuzhiyun vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
661*4882a593Smuzhiyun if (!vidb)
662*4882a593Smuzhiyun return -ENOMEM;
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun vid_hdr = ubi_get_vid_hdr(vidb);
665*4882a593Smuzhiyun
666*4882a593Smuzhiyun down_read(&ubi->fm_eba_sem);
667*4882a593Smuzhiyun mutex_lock(&ubi->move_mutex);
668*4882a593Smuzhiyun spin_lock(&ubi->wl_lock);
669*4882a593Smuzhiyun ubi_assert(!ubi->move_from && !ubi->move_to);
670*4882a593Smuzhiyun ubi_assert(!ubi->move_to_put);
671*4882a593Smuzhiyun
672*4882a593Smuzhiyun if (!ubi->free.rb_node ||
673*4882a593Smuzhiyun (!ubi->used.rb_node && !ubi->scrub.rb_node)) {
674*4882a593Smuzhiyun /*
675*4882a593Smuzhiyun * No free physical eraseblocks? Well, they must be waiting in
676*4882a593Smuzhiyun * the queue to be erased. Cancel movement - it will be
677*4882a593Smuzhiyun * triggered again when a free physical eraseblock appears.
678*4882a593Smuzhiyun *
679*4882a593Smuzhiyun * No used physical eraseblocks? They must be temporarily
680*4882a593Smuzhiyun * protected from being moved. They will be moved to the
681*4882a593Smuzhiyun * @ubi->used tree later and the wear-leveling will be
682*4882a593Smuzhiyun * triggered again.
683*4882a593Smuzhiyun */
684*4882a593Smuzhiyun dbg_wl("cancel WL, a list is empty: free %d, used %d",
685*4882a593Smuzhiyun !ubi->free.rb_node, !ubi->used.rb_node);
686*4882a593Smuzhiyun goto out_cancel;
687*4882a593Smuzhiyun }
688*4882a593Smuzhiyun
689*4882a593Smuzhiyun #ifdef CONFIG_MTD_UBI_FASTMAP
690*4882a593Smuzhiyun e1 = find_anchor_wl_entry(&ubi->used);
691*4882a593Smuzhiyun if (e1 && ubi->fm_anchor &&
692*4882a593Smuzhiyun (ubi->fm_anchor->ec - e1->ec >= UBI_WL_THRESHOLD)) {
693*4882a593Smuzhiyun ubi->fm_do_produce_anchor = 1;
694*4882a593Smuzhiyun /*
695*4882a593Smuzhiyun * fm_anchor is no longer considered a good anchor.
696*4882a593Smuzhiyun * NULL assignment also prevents multiple wear level checks
697*4882a593Smuzhiyun * of this PEB.
698*4882a593Smuzhiyun */
699*4882a593Smuzhiyun wl_tree_add(ubi->fm_anchor, &ubi->free);
700*4882a593Smuzhiyun ubi->fm_anchor = NULL;
701*4882a593Smuzhiyun ubi->free_count++;
702*4882a593Smuzhiyun }
703*4882a593Smuzhiyun
704*4882a593Smuzhiyun if (ubi->fm_do_produce_anchor) {
705*4882a593Smuzhiyun if (!e1)
706*4882a593Smuzhiyun goto out_cancel;
707*4882a593Smuzhiyun e2 = get_peb_for_wl(ubi);
708*4882a593Smuzhiyun if (!e2)
709*4882a593Smuzhiyun goto out_cancel;
710*4882a593Smuzhiyun
711*4882a593Smuzhiyun self_check_in_wl_tree(ubi, e1, &ubi->used);
712*4882a593Smuzhiyun rb_erase(&e1->u.rb, &ubi->used);
713*4882a593Smuzhiyun dbg_wl("anchor-move PEB %d to PEB %d", e1->pnum, e2->pnum);
714*4882a593Smuzhiyun ubi->fm_do_produce_anchor = 0;
715*4882a593Smuzhiyun } else if (!ubi->scrub.rb_node) {
716*4882a593Smuzhiyun #else
717*4882a593Smuzhiyun if (!ubi->scrub.rb_node) {
718*4882a593Smuzhiyun #endif
719*4882a593Smuzhiyun /*
720*4882a593Smuzhiyun * Now pick the least worn-out used physical eraseblock and a
721*4882a593Smuzhiyun * highly worn-out free physical eraseblock. If the erase
722*4882a593Smuzhiyun * counters differ much enough, start wear-leveling.
723*4882a593Smuzhiyun */
724*4882a593Smuzhiyun e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
725*4882a593Smuzhiyun e2 = get_peb_for_wl(ubi);
726*4882a593Smuzhiyun if (!e2)
727*4882a593Smuzhiyun goto out_cancel;
728*4882a593Smuzhiyun
729*4882a593Smuzhiyun if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
730*4882a593Smuzhiyun dbg_wl("no WL needed: min used EC %d, max free EC %d",
731*4882a593Smuzhiyun e1->ec, e2->ec);
732*4882a593Smuzhiyun
733*4882a593Smuzhiyun /* Give the unused PEB back */
734*4882a593Smuzhiyun wl_tree_add(e2, &ubi->free);
735*4882a593Smuzhiyun ubi->free_count++;
736*4882a593Smuzhiyun goto out_cancel;
737*4882a593Smuzhiyun }
738*4882a593Smuzhiyun self_check_in_wl_tree(ubi, e1, &ubi->used);
739*4882a593Smuzhiyun rb_erase(&e1->u.rb, &ubi->used);
740*4882a593Smuzhiyun dbg_wl("move PEB %d EC %d to PEB %d EC %d",
741*4882a593Smuzhiyun e1->pnum, e1->ec, e2->pnum, e2->ec);
742*4882a593Smuzhiyun } else {
743*4882a593Smuzhiyun /* Perform scrubbing */
744*4882a593Smuzhiyun scrubbing = 1;
745*4882a593Smuzhiyun e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
746*4882a593Smuzhiyun e2 = get_peb_for_wl(ubi);
747*4882a593Smuzhiyun if (!e2)
748*4882a593Smuzhiyun goto out_cancel;
749*4882a593Smuzhiyun
750*4882a593Smuzhiyun self_check_in_wl_tree(ubi, e1, &ubi->scrub);
751*4882a593Smuzhiyun rb_erase(&e1->u.rb, &ubi->scrub);
752*4882a593Smuzhiyun dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
753*4882a593Smuzhiyun }
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun ubi->move_from = e1;
756*4882a593Smuzhiyun ubi->move_to = e2;
757*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
758*4882a593Smuzhiyun
759*4882a593Smuzhiyun /*
760*4882a593Smuzhiyun * Now we are going to copy physical eraseblock @e1->pnum to @e2->pnum.
761*4882a593Smuzhiyun * We so far do not know which logical eraseblock our physical
762*4882a593Smuzhiyun * eraseblock (@e1) belongs to. We have to read the volume identifier
763*4882a593Smuzhiyun * header first.
764*4882a593Smuzhiyun *
765*4882a593Smuzhiyun * Note, we are protected from this PEB being unmapped and erased. The
766*4882a593Smuzhiyun * 'ubi_wl_put_peb()' would wait for moving to be finished if the PEB
767*4882a593Smuzhiyun * which is being moved was unmapped.
768*4882a593Smuzhiyun */
769*4882a593Smuzhiyun
770*4882a593Smuzhiyun err = ubi_io_read_vid_hdr(ubi, e1->pnum, vidb, 0);
771*4882a593Smuzhiyun if (err && err != UBI_IO_BITFLIPS) {
772*4882a593Smuzhiyun dst_leb_clean = 1;
773*4882a593Smuzhiyun if (err == UBI_IO_FF) {
774*4882a593Smuzhiyun /*
775*4882a593Smuzhiyun * We are trying to move PEB without a VID header. UBI
776*4882a593Smuzhiyun * always write VID headers shortly after the PEB was
777*4882a593Smuzhiyun * given, so we have a situation when it has not yet
778*4882a593Smuzhiyun * had a chance to write it, because it was preempted.
779*4882a593Smuzhiyun * So add this PEB to the protection queue so far,
780*4882a593Smuzhiyun * because presumably more data will be written there
781*4882a593Smuzhiyun * (including the missing VID header), and then we'll
782*4882a593Smuzhiyun * move it.
783*4882a593Smuzhiyun */
784*4882a593Smuzhiyun dbg_wl("PEB %d has no VID header", e1->pnum);
785*4882a593Smuzhiyun protect = 1;
786*4882a593Smuzhiyun goto out_not_moved;
787*4882a593Smuzhiyun } else if (err == UBI_IO_FF_BITFLIPS) {
788*4882a593Smuzhiyun /*
789*4882a593Smuzhiyun * The same situation as %UBI_IO_FF, but bit-flips were
790*4882a593Smuzhiyun * detected. It is better to schedule this PEB for
791*4882a593Smuzhiyun * scrubbing.
792*4882a593Smuzhiyun */
793*4882a593Smuzhiyun dbg_wl("PEB %d has no VID header but has bit-flips",
794*4882a593Smuzhiyun e1->pnum);
795*4882a593Smuzhiyun scrubbing = 1;
796*4882a593Smuzhiyun goto out_not_moved;
797*4882a593Smuzhiyun } else if (ubi->fast_attach && err == UBI_IO_BAD_HDR_EBADMSG) {
798*4882a593Smuzhiyun /*
799*4882a593Smuzhiyun * While a full scan would detect interrupted erasures
800*4882a593Smuzhiyun * at attach time we can face them here when attached from
801*4882a593Smuzhiyun * Fastmap.
802*4882a593Smuzhiyun */
803*4882a593Smuzhiyun dbg_wl("PEB %d has ECC errors, maybe from an interrupted erasure",
804*4882a593Smuzhiyun e1->pnum);
805*4882a593Smuzhiyun erase = 1;
806*4882a593Smuzhiyun goto out_not_moved;
807*4882a593Smuzhiyun }
808*4882a593Smuzhiyun
809*4882a593Smuzhiyun ubi_err(ubi, "error %d while reading VID header from PEB %d",
810*4882a593Smuzhiyun err, e1->pnum);
811*4882a593Smuzhiyun goto out_error;
812*4882a593Smuzhiyun }
813*4882a593Smuzhiyun
814*4882a593Smuzhiyun vol_id = be32_to_cpu(vid_hdr->vol_id);
815*4882a593Smuzhiyun lnum = be32_to_cpu(vid_hdr->lnum);
816*4882a593Smuzhiyun
817*4882a593Smuzhiyun err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vidb);
818*4882a593Smuzhiyun if (err) {
819*4882a593Smuzhiyun if (err == MOVE_CANCEL_RACE) {
820*4882a593Smuzhiyun /*
821*4882a593Smuzhiyun * The LEB has not been moved because the volume is
822*4882a593Smuzhiyun * being deleted or the PEB has been put meanwhile. We
823*4882a593Smuzhiyun * should prevent this PEB from being selected for
824*4882a593Smuzhiyun * wear-leveling movement again, so put it to the
825*4882a593Smuzhiyun * protection queue.
826*4882a593Smuzhiyun */
827*4882a593Smuzhiyun protect = 1;
828*4882a593Smuzhiyun dst_leb_clean = 1;
829*4882a593Smuzhiyun goto out_not_moved;
830*4882a593Smuzhiyun }
831*4882a593Smuzhiyun if (err == MOVE_RETRY) {
832*4882a593Smuzhiyun scrubbing = 1;
833*4882a593Smuzhiyun dst_leb_clean = 1;
834*4882a593Smuzhiyun goto out_not_moved;
835*4882a593Smuzhiyun }
836*4882a593Smuzhiyun if (err == MOVE_TARGET_BITFLIPS || err == MOVE_TARGET_WR_ERR ||
837*4882a593Smuzhiyun err == MOVE_TARGET_RD_ERR) {
838*4882a593Smuzhiyun /*
839*4882a593Smuzhiyun * Target PEB had bit-flips or write error - torture it.
840*4882a593Smuzhiyun */
841*4882a593Smuzhiyun torture = 1;
842*4882a593Smuzhiyun keep = 1;
843*4882a593Smuzhiyun goto out_not_moved;
844*4882a593Smuzhiyun }
845*4882a593Smuzhiyun
846*4882a593Smuzhiyun if (err == MOVE_SOURCE_RD_ERR) {
847*4882a593Smuzhiyun /*
848*4882a593Smuzhiyun * An error happened while reading the source PEB. Do
849*4882a593Smuzhiyun * not switch to R/O mode in this case, and give the
850*4882a593Smuzhiyun * upper layers a possibility to recover from this,
851*4882a593Smuzhiyun * e.g. by unmapping corresponding LEB. Instead, just
852*4882a593Smuzhiyun * put this PEB to the @ubi->erroneous list to prevent
853*4882a593Smuzhiyun * UBI from trying to move it over and over again.
854*4882a593Smuzhiyun */
855*4882a593Smuzhiyun if (ubi->erroneous_peb_count > ubi->max_erroneous) {
856*4882a593Smuzhiyun ubi_err(ubi, "too many erroneous eraseblocks (%d)",
857*4882a593Smuzhiyun ubi->erroneous_peb_count);
858*4882a593Smuzhiyun goto out_error;
859*4882a593Smuzhiyun }
860*4882a593Smuzhiyun dst_leb_clean = 1;
861*4882a593Smuzhiyun erroneous = 1;
862*4882a593Smuzhiyun goto out_not_moved;
863*4882a593Smuzhiyun }
864*4882a593Smuzhiyun
865*4882a593Smuzhiyun if (err < 0)
866*4882a593Smuzhiyun goto out_error;
867*4882a593Smuzhiyun
868*4882a593Smuzhiyun ubi_assert(0);
869*4882a593Smuzhiyun }
870*4882a593Smuzhiyun
871*4882a593Smuzhiyun /* The PEB has been successfully moved */
872*4882a593Smuzhiyun if (scrubbing)
873*4882a593Smuzhiyun ubi_msg(ubi, "scrubbed PEB %d (LEB %d:%d), data moved to PEB %d",
874*4882a593Smuzhiyun e1->pnum, vol_id, lnum, e2->pnum);
875*4882a593Smuzhiyun ubi_free_vid_buf(vidb);
876*4882a593Smuzhiyun
877*4882a593Smuzhiyun spin_lock(&ubi->wl_lock);
878*4882a593Smuzhiyun if (!ubi->move_to_put) {
879*4882a593Smuzhiyun wl_tree_add(e2, &ubi->used);
880*4882a593Smuzhiyun e2 = NULL;
881*4882a593Smuzhiyun }
882*4882a593Smuzhiyun ubi->move_from = ubi->move_to = NULL;
883*4882a593Smuzhiyun ubi->move_to_put = ubi->wl_scheduled = 0;
884*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
885*4882a593Smuzhiyun
886*4882a593Smuzhiyun err = do_sync_erase(ubi, e1, vol_id, lnum, 0);
887*4882a593Smuzhiyun if (err) {
888*4882a593Smuzhiyun if (e2)
889*4882a593Smuzhiyun wl_entry_destroy(ubi, e2);
890*4882a593Smuzhiyun goto out_ro;
891*4882a593Smuzhiyun }
892*4882a593Smuzhiyun
893*4882a593Smuzhiyun if (e2) {
894*4882a593Smuzhiyun /*
895*4882a593Smuzhiyun * Well, the target PEB was put meanwhile, schedule it for
896*4882a593Smuzhiyun * erasure.
897*4882a593Smuzhiyun */
898*4882a593Smuzhiyun dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase",
899*4882a593Smuzhiyun e2->pnum, vol_id, lnum);
900*4882a593Smuzhiyun err = do_sync_erase(ubi, e2, vol_id, lnum, 0);
901*4882a593Smuzhiyun if (err)
902*4882a593Smuzhiyun goto out_ro;
903*4882a593Smuzhiyun }
904*4882a593Smuzhiyun
905*4882a593Smuzhiyun dbg_wl("done");
906*4882a593Smuzhiyun mutex_unlock(&ubi->move_mutex);
907*4882a593Smuzhiyun up_read(&ubi->fm_eba_sem);
908*4882a593Smuzhiyun return 0;
909*4882a593Smuzhiyun
910*4882a593Smuzhiyun /*
911*4882a593Smuzhiyun * For some reasons the LEB was not moved, might be an error, might be
912*4882a593Smuzhiyun * something else. @e1 was not changed, so return it back. @e2 might
913*4882a593Smuzhiyun * have been changed, schedule it for erasure.
914*4882a593Smuzhiyun */
915*4882a593Smuzhiyun out_not_moved:
916*4882a593Smuzhiyun if (vol_id != -1)
917*4882a593Smuzhiyun dbg_wl("cancel moving PEB %d (LEB %d:%d) to PEB %d (%d)",
918*4882a593Smuzhiyun e1->pnum, vol_id, lnum, e2->pnum, err);
919*4882a593Smuzhiyun else
920*4882a593Smuzhiyun dbg_wl("cancel moving PEB %d to PEB %d (%d)",
921*4882a593Smuzhiyun e1->pnum, e2->pnum, err);
922*4882a593Smuzhiyun spin_lock(&ubi->wl_lock);
923*4882a593Smuzhiyun if (protect)
924*4882a593Smuzhiyun prot_queue_add(ubi, e1);
925*4882a593Smuzhiyun else if (erroneous) {
926*4882a593Smuzhiyun wl_tree_add(e1, &ubi->erroneous);
927*4882a593Smuzhiyun ubi->erroneous_peb_count += 1;
928*4882a593Smuzhiyun } else if (scrubbing)
929*4882a593Smuzhiyun wl_tree_add(e1, &ubi->scrub);
930*4882a593Smuzhiyun else if (keep)
931*4882a593Smuzhiyun wl_tree_add(e1, &ubi->used);
932*4882a593Smuzhiyun if (dst_leb_clean) {
933*4882a593Smuzhiyun wl_tree_add(e2, &ubi->free);
934*4882a593Smuzhiyun ubi->free_count++;
935*4882a593Smuzhiyun }
936*4882a593Smuzhiyun
937*4882a593Smuzhiyun ubi_assert(!ubi->move_to_put);
938*4882a593Smuzhiyun ubi->move_from = ubi->move_to = NULL;
939*4882a593Smuzhiyun ubi->wl_scheduled = 0;
940*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
941*4882a593Smuzhiyun
942*4882a593Smuzhiyun ubi_free_vid_buf(vidb);
943*4882a593Smuzhiyun if (dst_leb_clean) {
944*4882a593Smuzhiyun ensure_wear_leveling(ubi, 1);
945*4882a593Smuzhiyun } else {
946*4882a593Smuzhiyun err = do_sync_erase(ubi, e2, vol_id, lnum, torture);
947*4882a593Smuzhiyun if (err)
948*4882a593Smuzhiyun goto out_ro;
949*4882a593Smuzhiyun }
950*4882a593Smuzhiyun
951*4882a593Smuzhiyun if (erase) {
952*4882a593Smuzhiyun err = do_sync_erase(ubi, e1, vol_id, lnum, 1);
953*4882a593Smuzhiyun if (err)
954*4882a593Smuzhiyun goto out_ro;
955*4882a593Smuzhiyun }
956*4882a593Smuzhiyun
957*4882a593Smuzhiyun mutex_unlock(&ubi->move_mutex);
958*4882a593Smuzhiyun up_read(&ubi->fm_eba_sem);
959*4882a593Smuzhiyun return 0;
960*4882a593Smuzhiyun
961*4882a593Smuzhiyun out_error:
962*4882a593Smuzhiyun if (vol_id != -1)
963*4882a593Smuzhiyun ubi_err(ubi, "error %d while moving PEB %d to PEB %d",
964*4882a593Smuzhiyun err, e1->pnum, e2->pnum);
965*4882a593Smuzhiyun else
966*4882a593Smuzhiyun ubi_err(ubi, "error %d while moving PEB %d (LEB %d:%d) to PEB %d",
967*4882a593Smuzhiyun err, e1->pnum, vol_id, lnum, e2->pnum);
968*4882a593Smuzhiyun spin_lock(&ubi->wl_lock);
969*4882a593Smuzhiyun ubi->move_from = ubi->move_to = NULL;
970*4882a593Smuzhiyun ubi->move_to_put = ubi->wl_scheduled = 0;
971*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
972*4882a593Smuzhiyun
973*4882a593Smuzhiyun ubi_free_vid_buf(vidb);
974*4882a593Smuzhiyun wl_entry_destroy(ubi, e1);
975*4882a593Smuzhiyun wl_entry_destroy(ubi, e2);
976*4882a593Smuzhiyun
977*4882a593Smuzhiyun out_ro:
978*4882a593Smuzhiyun ubi_ro_mode(ubi);
979*4882a593Smuzhiyun mutex_unlock(&ubi->move_mutex);
980*4882a593Smuzhiyun up_read(&ubi->fm_eba_sem);
981*4882a593Smuzhiyun ubi_assert(err != 0);
982*4882a593Smuzhiyun return err < 0 ? err : -EIO;
983*4882a593Smuzhiyun
984*4882a593Smuzhiyun out_cancel:
985*4882a593Smuzhiyun ubi->wl_scheduled = 0;
986*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
987*4882a593Smuzhiyun mutex_unlock(&ubi->move_mutex);
988*4882a593Smuzhiyun up_read(&ubi->fm_eba_sem);
989*4882a593Smuzhiyun ubi_free_vid_buf(vidb);
990*4882a593Smuzhiyun return 0;
991*4882a593Smuzhiyun }
992*4882a593Smuzhiyun
993*4882a593Smuzhiyun /**
994*4882a593Smuzhiyun * ensure_wear_leveling - schedule wear-leveling if it is needed.
995*4882a593Smuzhiyun * @ubi: UBI device description object
996*4882a593Smuzhiyun * @nested: set to non-zero if this function is called from UBI worker
997*4882a593Smuzhiyun *
998*4882a593Smuzhiyun * This function checks if it is time to start wear-leveling and schedules it
999*4882a593Smuzhiyun * if yes. This function returns zero in case of success and a negative error
1000*4882a593Smuzhiyun * code in case of failure.
1001*4882a593Smuzhiyun */
1002*4882a593Smuzhiyun static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
1003*4882a593Smuzhiyun {
1004*4882a593Smuzhiyun int err = 0;
1005*4882a593Smuzhiyun struct ubi_wl_entry *e1;
1006*4882a593Smuzhiyun struct ubi_wl_entry *e2;
1007*4882a593Smuzhiyun struct ubi_work *wrk;
1008*4882a593Smuzhiyun
1009*4882a593Smuzhiyun spin_lock(&ubi->wl_lock);
1010*4882a593Smuzhiyun if (ubi->wl_scheduled)
1011*4882a593Smuzhiyun /* Wear-leveling is already in the work queue */
1012*4882a593Smuzhiyun goto out_unlock;
1013*4882a593Smuzhiyun
1014*4882a593Smuzhiyun /*
1015*4882a593Smuzhiyun * If the ubi->scrub tree is not empty, scrubbing is needed, and the
1016*4882a593Smuzhiyun * the WL worker has to be scheduled anyway.
1017*4882a593Smuzhiyun */
1018*4882a593Smuzhiyun if (!ubi->scrub.rb_node) {
1019*4882a593Smuzhiyun if (!ubi->used.rb_node || !ubi->free.rb_node)
1020*4882a593Smuzhiyun /* No physical eraseblocks - no deal */
1021*4882a593Smuzhiyun goto out_unlock;
1022*4882a593Smuzhiyun
1023*4882a593Smuzhiyun /*
1024*4882a593Smuzhiyun * We schedule wear-leveling only if the difference between the
1025*4882a593Smuzhiyun * lowest erase counter of used physical eraseblocks and a high
1026*4882a593Smuzhiyun * erase counter of free physical eraseblocks is greater than
1027*4882a593Smuzhiyun * %UBI_WL_THRESHOLD.
1028*4882a593Smuzhiyun */
1029*4882a593Smuzhiyun e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
1030*4882a593Smuzhiyun e2 = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
1031*4882a593Smuzhiyun
1032*4882a593Smuzhiyun if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
1033*4882a593Smuzhiyun goto out_unlock;
1034*4882a593Smuzhiyun dbg_wl("schedule wear-leveling");
1035*4882a593Smuzhiyun } else
1036*4882a593Smuzhiyun dbg_wl("schedule scrubbing");
1037*4882a593Smuzhiyun
1038*4882a593Smuzhiyun ubi->wl_scheduled = 1;
1039*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
1040*4882a593Smuzhiyun
1041*4882a593Smuzhiyun wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
1042*4882a593Smuzhiyun if (!wrk) {
1043*4882a593Smuzhiyun err = -ENOMEM;
1044*4882a593Smuzhiyun goto out_cancel;
1045*4882a593Smuzhiyun }
1046*4882a593Smuzhiyun
1047*4882a593Smuzhiyun wrk->func = &wear_leveling_worker;
1048*4882a593Smuzhiyun if (nested)
1049*4882a593Smuzhiyun __schedule_ubi_work(ubi, wrk);
1050*4882a593Smuzhiyun else
1051*4882a593Smuzhiyun schedule_ubi_work(ubi, wrk);
1052*4882a593Smuzhiyun return err;
1053*4882a593Smuzhiyun
1054*4882a593Smuzhiyun out_cancel:
1055*4882a593Smuzhiyun spin_lock(&ubi->wl_lock);
1056*4882a593Smuzhiyun ubi->wl_scheduled = 0;
1057*4882a593Smuzhiyun out_unlock:
1058*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
1059*4882a593Smuzhiyun return err;
1060*4882a593Smuzhiyun }
1061*4882a593Smuzhiyun
1062*4882a593Smuzhiyun /**
1063*4882a593Smuzhiyun * __erase_worker - physical eraseblock erase worker function.
1064*4882a593Smuzhiyun * @ubi: UBI device description object
1065*4882a593Smuzhiyun * @wl_wrk: the work object
1066*4882a593Smuzhiyun * @shutdown: non-zero if the worker has to free memory and exit
1067*4882a593Smuzhiyun * because the WL sub-system is shutting down
1068*4882a593Smuzhiyun *
1069*4882a593Smuzhiyun * This function erases a physical eraseblock and perform torture testing if
1070*4882a593Smuzhiyun * needed. It also takes care about marking the physical eraseblock bad if
1071*4882a593Smuzhiyun * needed. Returns zero in case of success and a negative error code in case of
1072*4882a593Smuzhiyun * failure.
1073*4882a593Smuzhiyun */
1074*4882a593Smuzhiyun static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
1075*4882a593Smuzhiyun {
1076*4882a593Smuzhiyun struct ubi_wl_entry *e = wl_wrk->e;
1077*4882a593Smuzhiyun int pnum = e->pnum;
1078*4882a593Smuzhiyun int vol_id = wl_wrk->vol_id;
1079*4882a593Smuzhiyun int lnum = wl_wrk->lnum;
1080*4882a593Smuzhiyun int err, available_consumed = 0;
1081*4882a593Smuzhiyun
1082*4882a593Smuzhiyun dbg_wl("erase PEB %d EC %d LEB %d:%d",
1083*4882a593Smuzhiyun pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum);
1084*4882a593Smuzhiyun
1085*4882a593Smuzhiyun err = sync_erase(ubi, e, wl_wrk->torture);
1086*4882a593Smuzhiyun if (!err) {
1087*4882a593Smuzhiyun spin_lock(&ubi->wl_lock);
1088*4882a593Smuzhiyun
1089*4882a593Smuzhiyun if (!ubi->fm_disabled && !ubi->fm_anchor &&
1090*4882a593Smuzhiyun e->pnum < UBI_FM_MAX_START) {
1091*4882a593Smuzhiyun /*
1092*4882a593Smuzhiyun * Abort anchor production, if needed it will be
1093*4882a593Smuzhiyun * enabled again in the wear leveling started below.
1094*4882a593Smuzhiyun */
1095*4882a593Smuzhiyun ubi->fm_anchor = e;
1096*4882a593Smuzhiyun ubi->fm_do_produce_anchor = 0;
1097*4882a593Smuzhiyun } else {
1098*4882a593Smuzhiyun wl_tree_add(e, &ubi->free);
1099*4882a593Smuzhiyun ubi->free_count++;
1100*4882a593Smuzhiyun }
1101*4882a593Smuzhiyun
1102*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
1103*4882a593Smuzhiyun
1104*4882a593Smuzhiyun /*
1105*4882a593Smuzhiyun * One more erase operation has happened, take care about
1106*4882a593Smuzhiyun * protected physical eraseblocks.
1107*4882a593Smuzhiyun */
1108*4882a593Smuzhiyun serve_prot_queue(ubi);
1109*4882a593Smuzhiyun
1110*4882a593Smuzhiyun /* And take care about wear-leveling */
1111*4882a593Smuzhiyun err = ensure_wear_leveling(ubi, 1);
1112*4882a593Smuzhiyun return err;
1113*4882a593Smuzhiyun }
1114*4882a593Smuzhiyun
1115*4882a593Smuzhiyun ubi_err(ubi, "failed to erase PEB %d, error %d", pnum, err);
1116*4882a593Smuzhiyun
1117*4882a593Smuzhiyun if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
1118*4882a593Smuzhiyun err == -EBUSY) {
1119*4882a593Smuzhiyun int err1;
1120*4882a593Smuzhiyun
1121*4882a593Smuzhiyun /* Re-schedule the LEB for erasure */
1122*4882a593Smuzhiyun err1 = schedule_erase(ubi, e, vol_id, lnum, 0, false);
1123*4882a593Smuzhiyun if (err1) {
1124*4882a593Smuzhiyun wl_entry_destroy(ubi, e);
1125*4882a593Smuzhiyun err = err1;
1126*4882a593Smuzhiyun goto out_ro;
1127*4882a593Smuzhiyun }
1128*4882a593Smuzhiyun return err;
1129*4882a593Smuzhiyun }
1130*4882a593Smuzhiyun
1131*4882a593Smuzhiyun wl_entry_destroy(ubi, e);
1132*4882a593Smuzhiyun if (err != -EIO)
1133*4882a593Smuzhiyun /*
1134*4882a593Smuzhiyun * If this is not %-EIO, we have no idea what to do. Scheduling
1135*4882a593Smuzhiyun * this physical eraseblock for erasure again would cause
1136*4882a593Smuzhiyun * errors again and again. Well, lets switch to R/O mode.
1137*4882a593Smuzhiyun */
1138*4882a593Smuzhiyun goto out_ro;
1139*4882a593Smuzhiyun
1140*4882a593Smuzhiyun /* It is %-EIO, the PEB went bad */
1141*4882a593Smuzhiyun
1142*4882a593Smuzhiyun if (!ubi->bad_allowed) {
1143*4882a593Smuzhiyun ubi_err(ubi, "bad physical eraseblock %d detected", pnum);
1144*4882a593Smuzhiyun goto out_ro;
1145*4882a593Smuzhiyun }
1146*4882a593Smuzhiyun
1147*4882a593Smuzhiyun spin_lock(&ubi->volumes_lock);
1148*4882a593Smuzhiyun if (ubi->beb_rsvd_pebs == 0) {
1149*4882a593Smuzhiyun if (ubi->avail_pebs == 0) {
1150*4882a593Smuzhiyun spin_unlock(&ubi->volumes_lock);
1151*4882a593Smuzhiyun ubi_err(ubi, "no reserved/available physical eraseblocks");
1152*4882a593Smuzhiyun goto out_ro;
1153*4882a593Smuzhiyun }
1154*4882a593Smuzhiyun ubi->avail_pebs -= 1;
1155*4882a593Smuzhiyun available_consumed = 1;
1156*4882a593Smuzhiyun }
1157*4882a593Smuzhiyun spin_unlock(&ubi->volumes_lock);
1158*4882a593Smuzhiyun
1159*4882a593Smuzhiyun ubi_msg(ubi, "mark PEB %d as bad", pnum);
1160*4882a593Smuzhiyun err = ubi_io_mark_bad(ubi, pnum);
1161*4882a593Smuzhiyun if (err)
1162*4882a593Smuzhiyun goto out_ro;
1163*4882a593Smuzhiyun
1164*4882a593Smuzhiyun spin_lock(&ubi->volumes_lock);
1165*4882a593Smuzhiyun if (ubi->beb_rsvd_pebs > 0) {
1166*4882a593Smuzhiyun if (available_consumed) {
1167*4882a593Smuzhiyun /*
1168*4882a593Smuzhiyun * The amount of reserved PEBs increased since we last
1169*4882a593Smuzhiyun * checked.
1170*4882a593Smuzhiyun */
1171*4882a593Smuzhiyun ubi->avail_pebs += 1;
1172*4882a593Smuzhiyun available_consumed = 0;
1173*4882a593Smuzhiyun }
1174*4882a593Smuzhiyun ubi->beb_rsvd_pebs -= 1;
1175*4882a593Smuzhiyun }
1176*4882a593Smuzhiyun ubi->bad_peb_count += 1;
1177*4882a593Smuzhiyun ubi->good_peb_count -= 1;
1178*4882a593Smuzhiyun ubi_calculate_reserved(ubi);
1179*4882a593Smuzhiyun if (available_consumed)
1180*4882a593Smuzhiyun ubi_warn(ubi, "no PEBs in the reserved pool, used an available PEB");
1181*4882a593Smuzhiyun else if (ubi->beb_rsvd_pebs)
1182*4882a593Smuzhiyun ubi_msg(ubi, "%d PEBs left in the reserve",
1183*4882a593Smuzhiyun ubi->beb_rsvd_pebs);
1184*4882a593Smuzhiyun else
1185*4882a593Smuzhiyun ubi_warn(ubi, "last PEB from the reserve was used");
1186*4882a593Smuzhiyun spin_unlock(&ubi->volumes_lock);
1187*4882a593Smuzhiyun
1188*4882a593Smuzhiyun return err;
1189*4882a593Smuzhiyun
1190*4882a593Smuzhiyun out_ro:
1191*4882a593Smuzhiyun if (available_consumed) {
1192*4882a593Smuzhiyun spin_lock(&ubi->volumes_lock);
1193*4882a593Smuzhiyun ubi->avail_pebs += 1;
1194*4882a593Smuzhiyun spin_unlock(&ubi->volumes_lock);
1195*4882a593Smuzhiyun }
1196*4882a593Smuzhiyun ubi_ro_mode(ubi);
1197*4882a593Smuzhiyun return err;
1198*4882a593Smuzhiyun }
1199*4882a593Smuzhiyun
1200*4882a593Smuzhiyun static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1201*4882a593Smuzhiyun int shutdown)
1202*4882a593Smuzhiyun {
1203*4882a593Smuzhiyun int ret;
1204*4882a593Smuzhiyun
1205*4882a593Smuzhiyun if (shutdown) {
1206*4882a593Smuzhiyun struct ubi_wl_entry *e = wl_wrk->e;
1207*4882a593Smuzhiyun
1208*4882a593Smuzhiyun dbg_wl("cancel erasure of PEB %d EC %d", e->pnum, e->ec);
1209*4882a593Smuzhiyun kfree(wl_wrk);
1210*4882a593Smuzhiyun wl_entry_destroy(ubi, e);
1211*4882a593Smuzhiyun return 0;
1212*4882a593Smuzhiyun }
1213*4882a593Smuzhiyun
1214*4882a593Smuzhiyun ret = __erase_worker(ubi, wl_wrk);
1215*4882a593Smuzhiyun kfree(wl_wrk);
1216*4882a593Smuzhiyun return ret;
1217*4882a593Smuzhiyun }
1218*4882a593Smuzhiyun
1219*4882a593Smuzhiyun /**
1220*4882a593Smuzhiyun * ubi_wl_put_peb - return a PEB to the wear-leveling sub-system.
1221*4882a593Smuzhiyun * @ubi: UBI device description object
1222*4882a593Smuzhiyun * @vol_id: the volume ID that last used this PEB
1223*4882a593Smuzhiyun * @lnum: the last used logical eraseblock number for the PEB
1224*4882a593Smuzhiyun * @pnum: physical eraseblock to return
1225*4882a593Smuzhiyun * @torture: if this physical eraseblock has to be tortured
1226*4882a593Smuzhiyun *
1227*4882a593Smuzhiyun * This function is called to return physical eraseblock @pnum to the pool of
1228*4882a593Smuzhiyun * free physical eraseblocks. The @torture flag has to be set if an I/O error
1229*4882a593Smuzhiyun * occurred to this @pnum and it has to be tested. This function returns zero
1230*4882a593Smuzhiyun * in case of success, and a negative error code in case of failure.
1231*4882a593Smuzhiyun */
1232*4882a593Smuzhiyun int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum,
1233*4882a593Smuzhiyun int pnum, int torture)
1234*4882a593Smuzhiyun {
1235*4882a593Smuzhiyun int err;
1236*4882a593Smuzhiyun struct ubi_wl_entry *e;
1237*4882a593Smuzhiyun
1238*4882a593Smuzhiyun dbg_wl("PEB %d", pnum);
1239*4882a593Smuzhiyun ubi_assert(pnum >= 0);
1240*4882a593Smuzhiyun ubi_assert(pnum < ubi->peb_count);
1241*4882a593Smuzhiyun
1242*4882a593Smuzhiyun down_read(&ubi->fm_protect);
1243*4882a593Smuzhiyun
1244*4882a593Smuzhiyun retry:
1245*4882a593Smuzhiyun spin_lock(&ubi->wl_lock);
1246*4882a593Smuzhiyun e = ubi->lookuptbl[pnum];
1247*4882a593Smuzhiyun if (e == ubi->move_from) {
1248*4882a593Smuzhiyun /*
1249*4882a593Smuzhiyun * User is putting the physical eraseblock which was selected to
1250*4882a593Smuzhiyun * be moved. It will be scheduled for erasure in the
1251*4882a593Smuzhiyun * wear-leveling worker.
1252*4882a593Smuzhiyun */
1253*4882a593Smuzhiyun dbg_wl("PEB %d is being moved, wait", pnum);
1254*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
1255*4882a593Smuzhiyun
1256*4882a593Smuzhiyun /* Wait for the WL worker by taking the @ubi->move_mutex */
1257*4882a593Smuzhiyun mutex_lock(&ubi->move_mutex);
1258*4882a593Smuzhiyun mutex_unlock(&ubi->move_mutex);
1259*4882a593Smuzhiyun goto retry;
1260*4882a593Smuzhiyun } else if (e == ubi->move_to) {
1261*4882a593Smuzhiyun /*
1262*4882a593Smuzhiyun * User is putting the physical eraseblock which was selected
1263*4882a593Smuzhiyun * as the target the data is moved to. It may happen if the EBA
1264*4882a593Smuzhiyun * sub-system already re-mapped the LEB in 'ubi_eba_copy_leb()'
1265*4882a593Smuzhiyun * but the WL sub-system has not put the PEB to the "used" tree
1266*4882a593Smuzhiyun * yet, but it is about to do this. So we just set a flag which
1267*4882a593Smuzhiyun * will tell the WL worker that the PEB is not needed anymore
1268*4882a593Smuzhiyun * and should be scheduled for erasure.
1269*4882a593Smuzhiyun */
1270*4882a593Smuzhiyun dbg_wl("PEB %d is the target of data moving", pnum);
1271*4882a593Smuzhiyun ubi_assert(!ubi->move_to_put);
1272*4882a593Smuzhiyun ubi->move_to_put = 1;
1273*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
1274*4882a593Smuzhiyun up_read(&ubi->fm_protect);
1275*4882a593Smuzhiyun return 0;
1276*4882a593Smuzhiyun } else {
1277*4882a593Smuzhiyun if (in_wl_tree(e, &ubi->used)) {
1278*4882a593Smuzhiyun self_check_in_wl_tree(ubi, e, &ubi->used);
1279*4882a593Smuzhiyun rb_erase(&e->u.rb, &ubi->used);
1280*4882a593Smuzhiyun } else if (in_wl_tree(e, &ubi->scrub)) {
1281*4882a593Smuzhiyun self_check_in_wl_tree(ubi, e, &ubi->scrub);
1282*4882a593Smuzhiyun rb_erase(&e->u.rb, &ubi->scrub);
1283*4882a593Smuzhiyun } else if (in_wl_tree(e, &ubi->erroneous)) {
1284*4882a593Smuzhiyun self_check_in_wl_tree(ubi, e, &ubi->erroneous);
1285*4882a593Smuzhiyun rb_erase(&e->u.rb, &ubi->erroneous);
1286*4882a593Smuzhiyun ubi->erroneous_peb_count -= 1;
1287*4882a593Smuzhiyun ubi_assert(ubi->erroneous_peb_count >= 0);
1288*4882a593Smuzhiyun /* Erroneous PEBs should be tortured */
1289*4882a593Smuzhiyun torture = 1;
1290*4882a593Smuzhiyun } else {
1291*4882a593Smuzhiyun err = prot_queue_del(ubi, e->pnum);
1292*4882a593Smuzhiyun if (err) {
1293*4882a593Smuzhiyun ubi_err(ubi, "PEB %d not found", pnum);
1294*4882a593Smuzhiyun ubi_ro_mode(ubi);
1295*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
1296*4882a593Smuzhiyun up_read(&ubi->fm_protect);
1297*4882a593Smuzhiyun return err;
1298*4882a593Smuzhiyun }
1299*4882a593Smuzhiyun }
1300*4882a593Smuzhiyun }
1301*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
1302*4882a593Smuzhiyun
1303*4882a593Smuzhiyun err = schedule_erase(ubi, e, vol_id, lnum, torture, false);
1304*4882a593Smuzhiyun if (err) {
1305*4882a593Smuzhiyun spin_lock(&ubi->wl_lock);
1306*4882a593Smuzhiyun wl_tree_add(e, &ubi->used);
1307*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
1308*4882a593Smuzhiyun }
1309*4882a593Smuzhiyun
1310*4882a593Smuzhiyun up_read(&ubi->fm_protect);
1311*4882a593Smuzhiyun return err;
1312*4882a593Smuzhiyun }
1313*4882a593Smuzhiyun
1314*4882a593Smuzhiyun /**
1315*4882a593Smuzhiyun * ubi_wl_scrub_peb - schedule a physical eraseblock for scrubbing.
1316*4882a593Smuzhiyun * @ubi: UBI device description object
1317*4882a593Smuzhiyun * @pnum: the physical eraseblock to schedule
1318*4882a593Smuzhiyun *
1319*4882a593Smuzhiyun * If a bit-flip in a physical eraseblock is detected, this physical eraseblock
1320*4882a593Smuzhiyun * needs scrubbing. This function schedules a physical eraseblock for
1321*4882a593Smuzhiyun * scrubbing which is done in background. This function returns zero in case of
1322*4882a593Smuzhiyun * success and a negative error code in case of failure.
1323*4882a593Smuzhiyun */
1324*4882a593Smuzhiyun int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
1325*4882a593Smuzhiyun {
1326*4882a593Smuzhiyun struct ubi_wl_entry *e;
1327*4882a593Smuzhiyun
1328*4882a593Smuzhiyun ubi_msg(ubi, "schedule PEB %d for scrubbing", pnum);
1329*4882a593Smuzhiyun
1330*4882a593Smuzhiyun retry:
1331*4882a593Smuzhiyun spin_lock(&ubi->wl_lock);
1332*4882a593Smuzhiyun e = ubi->lookuptbl[pnum];
1333*4882a593Smuzhiyun if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub) ||
1334*4882a593Smuzhiyun in_wl_tree(e, &ubi->erroneous)) {
1335*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
1336*4882a593Smuzhiyun return 0;
1337*4882a593Smuzhiyun }
1338*4882a593Smuzhiyun
1339*4882a593Smuzhiyun if (e == ubi->move_to) {
1340*4882a593Smuzhiyun /*
1341*4882a593Smuzhiyun * This physical eraseblock was used to move data to. The data
1342*4882a593Smuzhiyun * was moved but the PEB was not yet inserted to the proper
1343*4882a593Smuzhiyun * tree. We should just wait a little and let the WL worker
1344*4882a593Smuzhiyun * proceed.
1345*4882a593Smuzhiyun */
1346*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
1347*4882a593Smuzhiyun dbg_wl("the PEB %d is not in proper tree, retry", pnum);
1348*4882a593Smuzhiyun yield();
1349*4882a593Smuzhiyun goto retry;
1350*4882a593Smuzhiyun }
1351*4882a593Smuzhiyun
1352*4882a593Smuzhiyun if (in_wl_tree(e, &ubi->used)) {
1353*4882a593Smuzhiyun self_check_in_wl_tree(ubi, e, &ubi->used);
1354*4882a593Smuzhiyun rb_erase(&e->u.rb, &ubi->used);
1355*4882a593Smuzhiyun } else {
1356*4882a593Smuzhiyun int err;
1357*4882a593Smuzhiyun
1358*4882a593Smuzhiyun err = prot_queue_del(ubi, e->pnum);
1359*4882a593Smuzhiyun if (err) {
1360*4882a593Smuzhiyun ubi_err(ubi, "PEB %d not found", pnum);
1361*4882a593Smuzhiyun ubi_ro_mode(ubi);
1362*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
1363*4882a593Smuzhiyun return err;
1364*4882a593Smuzhiyun }
1365*4882a593Smuzhiyun }
1366*4882a593Smuzhiyun
1367*4882a593Smuzhiyun wl_tree_add(e, &ubi->scrub);
1368*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
1369*4882a593Smuzhiyun
1370*4882a593Smuzhiyun /*
1371*4882a593Smuzhiyun * Technically scrubbing is the same as wear-leveling, so it is done
1372*4882a593Smuzhiyun * by the WL worker.
1373*4882a593Smuzhiyun */
1374*4882a593Smuzhiyun return ensure_wear_leveling(ubi, 0);
1375*4882a593Smuzhiyun }
1376*4882a593Smuzhiyun
1377*4882a593Smuzhiyun /**
1378*4882a593Smuzhiyun * ubi_wl_flush - flush all pending works.
1379*4882a593Smuzhiyun * @ubi: UBI device description object
1380*4882a593Smuzhiyun * @vol_id: the volume id to flush for
1381*4882a593Smuzhiyun * @lnum: the logical eraseblock number to flush for
1382*4882a593Smuzhiyun *
1383*4882a593Smuzhiyun * This function executes all pending works for a particular volume id /
1384*4882a593Smuzhiyun * logical eraseblock number pair. If either value is set to %UBI_ALL, then it
1385*4882a593Smuzhiyun * acts as a wildcard for all of the corresponding volume numbers or logical
1386*4882a593Smuzhiyun * eraseblock numbers. It returns zero in case of success and a negative error
1387*4882a593Smuzhiyun * code in case of failure.
1388*4882a593Smuzhiyun */
1389*4882a593Smuzhiyun int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum)
1390*4882a593Smuzhiyun {
1391*4882a593Smuzhiyun int err = 0;
1392*4882a593Smuzhiyun int found = 1;
1393*4882a593Smuzhiyun
1394*4882a593Smuzhiyun /*
1395*4882a593Smuzhiyun * Erase while the pending works queue is not empty, but not more than
1396*4882a593Smuzhiyun * the number of currently pending works.
1397*4882a593Smuzhiyun */
1398*4882a593Smuzhiyun dbg_wl("flush pending work for LEB %d:%d (%d pending works)",
1399*4882a593Smuzhiyun vol_id, lnum, ubi->works_count);
1400*4882a593Smuzhiyun
1401*4882a593Smuzhiyun while (found) {
1402*4882a593Smuzhiyun struct ubi_work *wrk, *tmp;
1403*4882a593Smuzhiyun found = 0;
1404*4882a593Smuzhiyun
1405*4882a593Smuzhiyun down_read(&ubi->work_sem);
1406*4882a593Smuzhiyun spin_lock(&ubi->wl_lock);
1407*4882a593Smuzhiyun list_for_each_entry_safe(wrk, tmp, &ubi->works, list) {
1408*4882a593Smuzhiyun if ((vol_id == UBI_ALL || wrk->vol_id == vol_id) &&
1409*4882a593Smuzhiyun (lnum == UBI_ALL || wrk->lnum == lnum)) {
1410*4882a593Smuzhiyun list_del(&wrk->list);
1411*4882a593Smuzhiyun ubi->works_count -= 1;
1412*4882a593Smuzhiyun ubi_assert(ubi->works_count >= 0);
1413*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
1414*4882a593Smuzhiyun
1415*4882a593Smuzhiyun err = wrk->func(ubi, wrk, 0);
1416*4882a593Smuzhiyun if (err) {
1417*4882a593Smuzhiyun up_read(&ubi->work_sem);
1418*4882a593Smuzhiyun return err;
1419*4882a593Smuzhiyun }
1420*4882a593Smuzhiyun
1421*4882a593Smuzhiyun spin_lock(&ubi->wl_lock);
1422*4882a593Smuzhiyun found = 1;
1423*4882a593Smuzhiyun break;
1424*4882a593Smuzhiyun }
1425*4882a593Smuzhiyun }
1426*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
1427*4882a593Smuzhiyun up_read(&ubi->work_sem);
1428*4882a593Smuzhiyun }
1429*4882a593Smuzhiyun
1430*4882a593Smuzhiyun /*
1431*4882a593Smuzhiyun * Make sure all the works which have been done in parallel are
1432*4882a593Smuzhiyun * finished.
1433*4882a593Smuzhiyun */
1434*4882a593Smuzhiyun down_write(&ubi->work_sem);
1435*4882a593Smuzhiyun up_write(&ubi->work_sem);
1436*4882a593Smuzhiyun
1437*4882a593Smuzhiyun return err;
1438*4882a593Smuzhiyun }
1439*4882a593Smuzhiyun
1440*4882a593Smuzhiyun static bool scrub_possible(struct ubi_device *ubi, struct ubi_wl_entry *e)
1441*4882a593Smuzhiyun {
1442*4882a593Smuzhiyun if (in_wl_tree(e, &ubi->scrub))
1443*4882a593Smuzhiyun return false;
1444*4882a593Smuzhiyun else if (in_wl_tree(e, &ubi->erroneous))
1445*4882a593Smuzhiyun return false;
1446*4882a593Smuzhiyun else if (ubi->move_from == e)
1447*4882a593Smuzhiyun return false;
1448*4882a593Smuzhiyun else if (ubi->move_to == e)
1449*4882a593Smuzhiyun return false;
1450*4882a593Smuzhiyun
1451*4882a593Smuzhiyun return true;
1452*4882a593Smuzhiyun }
1453*4882a593Smuzhiyun
1454*4882a593Smuzhiyun /**
1455*4882a593Smuzhiyun * ubi_bitflip_check - Check an eraseblock for bitflips and scrub it if needed.
1456*4882a593Smuzhiyun * @ubi: UBI device description object
1457*4882a593Smuzhiyun * @pnum: the physical eraseblock to schedule
1458*4882a593Smuzhiyun * @force: dont't read the block, assume bitflips happened and take action.
1459*4882a593Smuzhiyun *
1460*4882a593Smuzhiyun * This function reads the given eraseblock and checks if bitflips occured.
1461*4882a593Smuzhiyun * In case of bitflips, the eraseblock is scheduled for scrubbing.
1462*4882a593Smuzhiyun * If scrubbing is forced with @force, the eraseblock is not read,
1463*4882a593Smuzhiyun * but scheduled for scrubbing right away.
1464*4882a593Smuzhiyun *
1465*4882a593Smuzhiyun * Returns:
1466*4882a593Smuzhiyun * %EINVAL, PEB is out of range
1467*4882a593Smuzhiyun * %ENOENT, PEB is no longer used by UBI
1468*4882a593Smuzhiyun * %EBUSY, PEB cannot be checked now or a check is currently running on it
1469*4882a593Smuzhiyun * %EAGAIN, bit flips happened but scrubbing is currently not possible
1470*4882a593Smuzhiyun * %EUCLEAN, bit flips happened and PEB is scheduled for scrubbing
1471*4882a593Smuzhiyun * %0, no bit flips detected
1472*4882a593Smuzhiyun */
1473*4882a593Smuzhiyun int ubi_bitflip_check(struct ubi_device *ubi, int pnum, int force)
1474*4882a593Smuzhiyun {
1475*4882a593Smuzhiyun int err = 0;
1476*4882a593Smuzhiyun struct ubi_wl_entry *e;
1477*4882a593Smuzhiyun
1478*4882a593Smuzhiyun if (pnum < 0 || pnum >= ubi->peb_count) {
1479*4882a593Smuzhiyun err = -EINVAL;
1480*4882a593Smuzhiyun goto out;
1481*4882a593Smuzhiyun }
1482*4882a593Smuzhiyun
1483*4882a593Smuzhiyun /*
1484*4882a593Smuzhiyun * Pause all parallel work, otherwise it can happen that the
1485*4882a593Smuzhiyun * erase worker frees a wl entry under us.
1486*4882a593Smuzhiyun */
1487*4882a593Smuzhiyun down_write(&ubi->work_sem);
1488*4882a593Smuzhiyun
1489*4882a593Smuzhiyun /*
1490*4882a593Smuzhiyun * Make sure that the wl entry does not change state while
1491*4882a593Smuzhiyun * inspecting it.
1492*4882a593Smuzhiyun */
1493*4882a593Smuzhiyun spin_lock(&ubi->wl_lock);
1494*4882a593Smuzhiyun e = ubi->lookuptbl[pnum];
1495*4882a593Smuzhiyun if (!e) {
1496*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
1497*4882a593Smuzhiyun err = -ENOENT;
1498*4882a593Smuzhiyun goto out_resume;
1499*4882a593Smuzhiyun }
1500*4882a593Smuzhiyun
1501*4882a593Smuzhiyun /*
1502*4882a593Smuzhiyun * Does it make sense to check this PEB?
1503*4882a593Smuzhiyun */
1504*4882a593Smuzhiyun if (!scrub_possible(ubi, e)) {
1505*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
1506*4882a593Smuzhiyun err = -EBUSY;
1507*4882a593Smuzhiyun goto out_resume;
1508*4882a593Smuzhiyun }
1509*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
1510*4882a593Smuzhiyun
1511*4882a593Smuzhiyun if (!force) {
1512*4882a593Smuzhiyun mutex_lock(&ubi->buf_mutex);
1513*4882a593Smuzhiyun err = ubi_io_read(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
1514*4882a593Smuzhiyun mutex_unlock(&ubi->buf_mutex);
1515*4882a593Smuzhiyun }
1516*4882a593Smuzhiyun
1517*4882a593Smuzhiyun if (force || err == UBI_IO_BITFLIPS) {
1518*4882a593Smuzhiyun /*
1519*4882a593Smuzhiyun * Okay, bit flip happened, let's figure out what we can do.
1520*4882a593Smuzhiyun */
1521*4882a593Smuzhiyun spin_lock(&ubi->wl_lock);
1522*4882a593Smuzhiyun
1523*4882a593Smuzhiyun /*
1524*4882a593Smuzhiyun * Recheck. We released wl_lock, UBI might have killed the
1525*4882a593Smuzhiyun * wl entry under us.
1526*4882a593Smuzhiyun */
1527*4882a593Smuzhiyun e = ubi->lookuptbl[pnum];
1528*4882a593Smuzhiyun if (!e) {
1529*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
1530*4882a593Smuzhiyun err = -ENOENT;
1531*4882a593Smuzhiyun goto out_resume;
1532*4882a593Smuzhiyun }
1533*4882a593Smuzhiyun
1534*4882a593Smuzhiyun /*
1535*4882a593Smuzhiyun * Need to re-check state
1536*4882a593Smuzhiyun */
1537*4882a593Smuzhiyun if (!scrub_possible(ubi, e)) {
1538*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
1539*4882a593Smuzhiyun err = -EBUSY;
1540*4882a593Smuzhiyun goto out_resume;
1541*4882a593Smuzhiyun }
1542*4882a593Smuzhiyun
1543*4882a593Smuzhiyun if (in_pq(ubi, e)) {
1544*4882a593Smuzhiyun prot_queue_del(ubi, e->pnum);
1545*4882a593Smuzhiyun wl_tree_add(e, &ubi->scrub);
1546*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
1547*4882a593Smuzhiyun
1548*4882a593Smuzhiyun err = ensure_wear_leveling(ubi, 1);
1549*4882a593Smuzhiyun } else if (in_wl_tree(e, &ubi->used)) {
1550*4882a593Smuzhiyun rb_erase(&e->u.rb, &ubi->used);
1551*4882a593Smuzhiyun wl_tree_add(e, &ubi->scrub);
1552*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
1553*4882a593Smuzhiyun
1554*4882a593Smuzhiyun err = ensure_wear_leveling(ubi, 1);
1555*4882a593Smuzhiyun } else if (in_wl_tree(e, &ubi->free)) {
1556*4882a593Smuzhiyun rb_erase(&e->u.rb, &ubi->free);
1557*4882a593Smuzhiyun ubi->free_count--;
1558*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
1559*4882a593Smuzhiyun
1560*4882a593Smuzhiyun /*
1561*4882a593Smuzhiyun * This PEB is empty we can schedule it for
1562*4882a593Smuzhiyun * erasure right away. No wear leveling needed.
1563*4882a593Smuzhiyun */
1564*4882a593Smuzhiyun err = schedule_erase(ubi, e, UBI_UNKNOWN, UBI_UNKNOWN,
1565*4882a593Smuzhiyun force ? 0 : 1, true);
1566*4882a593Smuzhiyun } else {
1567*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
1568*4882a593Smuzhiyun err = -EAGAIN;
1569*4882a593Smuzhiyun }
1570*4882a593Smuzhiyun
1571*4882a593Smuzhiyun if (!err && !force)
1572*4882a593Smuzhiyun err = -EUCLEAN;
1573*4882a593Smuzhiyun } else {
1574*4882a593Smuzhiyun err = 0;
1575*4882a593Smuzhiyun }
1576*4882a593Smuzhiyun
1577*4882a593Smuzhiyun out_resume:
1578*4882a593Smuzhiyun up_write(&ubi->work_sem);
1579*4882a593Smuzhiyun out:
1580*4882a593Smuzhiyun
1581*4882a593Smuzhiyun return err;
1582*4882a593Smuzhiyun }
1583*4882a593Smuzhiyun
1584*4882a593Smuzhiyun /**
1585*4882a593Smuzhiyun * tree_destroy - destroy an RB-tree.
1586*4882a593Smuzhiyun * @ubi: UBI device description object
1587*4882a593Smuzhiyun * @root: the root of the tree to destroy
1588*4882a593Smuzhiyun */
1589*4882a593Smuzhiyun static void tree_destroy(struct ubi_device *ubi, struct rb_root *root)
1590*4882a593Smuzhiyun {
1591*4882a593Smuzhiyun struct rb_node *rb;
1592*4882a593Smuzhiyun struct ubi_wl_entry *e;
1593*4882a593Smuzhiyun
1594*4882a593Smuzhiyun rb = root->rb_node;
1595*4882a593Smuzhiyun while (rb) {
1596*4882a593Smuzhiyun if (rb->rb_left)
1597*4882a593Smuzhiyun rb = rb->rb_left;
1598*4882a593Smuzhiyun else if (rb->rb_right)
1599*4882a593Smuzhiyun rb = rb->rb_right;
1600*4882a593Smuzhiyun else {
1601*4882a593Smuzhiyun e = rb_entry(rb, struct ubi_wl_entry, u.rb);
1602*4882a593Smuzhiyun
1603*4882a593Smuzhiyun rb = rb_parent(rb);
1604*4882a593Smuzhiyun if (rb) {
1605*4882a593Smuzhiyun if (rb->rb_left == &e->u.rb)
1606*4882a593Smuzhiyun rb->rb_left = NULL;
1607*4882a593Smuzhiyun else
1608*4882a593Smuzhiyun rb->rb_right = NULL;
1609*4882a593Smuzhiyun }
1610*4882a593Smuzhiyun
1611*4882a593Smuzhiyun wl_entry_destroy(ubi, e);
1612*4882a593Smuzhiyun }
1613*4882a593Smuzhiyun }
1614*4882a593Smuzhiyun }
1615*4882a593Smuzhiyun
1616*4882a593Smuzhiyun /**
1617*4882a593Smuzhiyun * ubi_thread - UBI background thread.
1618*4882a593Smuzhiyun * @u: the UBI device description object pointer
1619*4882a593Smuzhiyun */
1620*4882a593Smuzhiyun int ubi_thread(void *u)
1621*4882a593Smuzhiyun {
1622*4882a593Smuzhiyun int failures = 0;
1623*4882a593Smuzhiyun struct ubi_device *ubi = u;
1624*4882a593Smuzhiyun
1625*4882a593Smuzhiyun ubi_msg(ubi, "background thread \"%s\" started, PID %d",
1626*4882a593Smuzhiyun ubi->bgt_name, task_pid_nr(current));
1627*4882a593Smuzhiyun
1628*4882a593Smuzhiyun set_freezable();
1629*4882a593Smuzhiyun for (;;) {
1630*4882a593Smuzhiyun int err;
1631*4882a593Smuzhiyun
1632*4882a593Smuzhiyun if (kthread_should_stop())
1633*4882a593Smuzhiyun break;
1634*4882a593Smuzhiyun
1635*4882a593Smuzhiyun if (try_to_freeze())
1636*4882a593Smuzhiyun continue;
1637*4882a593Smuzhiyun
1638*4882a593Smuzhiyun spin_lock(&ubi->wl_lock);
1639*4882a593Smuzhiyun if (list_empty(&ubi->works) || ubi->ro_mode ||
1640*4882a593Smuzhiyun !ubi->thread_enabled || ubi_dbg_is_bgt_disabled(ubi)) {
1641*4882a593Smuzhiyun set_current_state(TASK_INTERRUPTIBLE);
1642*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
1643*4882a593Smuzhiyun
1644*4882a593Smuzhiyun /*
1645*4882a593Smuzhiyun * Check kthread_should_stop() after we set the task
1646*4882a593Smuzhiyun * state to guarantee that we either see the stop bit
1647*4882a593Smuzhiyun * and exit or the task state is reset to runnable such
1648*4882a593Smuzhiyun * that it's not scheduled out indefinitely and detects
1649*4882a593Smuzhiyun * the stop bit at kthread_should_stop().
1650*4882a593Smuzhiyun */
1651*4882a593Smuzhiyun if (kthread_should_stop()) {
1652*4882a593Smuzhiyun set_current_state(TASK_RUNNING);
1653*4882a593Smuzhiyun break;
1654*4882a593Smuzhiyun }
1655*4882a593Smuzhiyun
1656*4882a593Smuzhiyun schedule();
1657*4882a593Smuzhiyun continue;
1658*4882a593Smuzhiyun }
1659*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
1660*4882a593Smuzhiyun
1661*4882a593Smuzhiyun err = do_work(ubi);
1662*4882a593Smuzhiyun if (err) {
1663*4882a593Smuzhiyun ubi_err(ubi, "%s: work failed with error code %d",
1664*4882a593Smuzhiyun ubi->bgt_name, err);
1665*4882a593Smuzhiyun if (failures++ > WL_MAX_FAILURES) {
1666*4882a593Smuzhiyun /*
1667*4882a593Smuzhiyun * Too many failures, disable the thread and
1668*4882a593Smuzhiyun * switch to read-only mode.
1669*4882a593Smuzhiyun */
1670*4882a593Smuzhiyun ubi_msg(ubi, "%s: %d consecutive failures",
1671*4882a593Smuzhiyun ubi->bgt_name, WL_MAX_FAILURES);
1672*4882a593Smuzhiyun ubi_ro_mode(ubi);
1673*4882a593Smuzhiyun ubi->thread_enabled = 0;
1674*4882a593Smuzhiyun continue;
1675*4882a593Smuzhiyun }
1676*4882a593Smuzhiyun } else
1677*4882a593Smuzhiyun failures = 0;
1678*4882a593Smuzhiyun
1679*4882a593Smuzhiyun cond_resched();
1680*4882a593Smuzhiyun }
1681*4882a593Smuzhiyun
1682*4882a593Smuzhiyun dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
1683*4882a593Smuzhiyun ubi->thread_enabled = 0;
1684*4882a593Smuzhiyun return 0;
1685*4882a593Smuzhiyun }
1686*4882a593Smuzhiyun
1687*4882a593Smuzhiyun /**
1688*4882a593Smuzhiyun * shutdown_work - shutdown all pending works.
1689*4882a593Smuzhiyun * @ubi: UBI device description object
1690*4882a593Smuzhiyun */
1691*4882a593Smuzhiyun static void shutdown_work(struct ubi_device *ubi)
1692*4882a593Smuzhiyun {
1693*4882a593Smuzhiyun while (!list_empty(&ubi->works)) {
1694*4882a593Smuzhiyun struct ubi_work *wrk;
1695*4882a593Smuzhiyun
1696*4882a593Smuzhiyun wrk = list_entry(ubi->works.next, struct ubi_work, list);
1697*4882a593Smuzhiyun list_del(&wrk->list);
1698*4882a593Smuzhiyun wrk->func(ubi, wrk, 1);
1699*4882a593Smuzhiyun ubi->works_count -= 1;
1700*4882a593Smuzhiyun ubi_assert(ubi->works_count >= 0);
1701*4882a593Smuzhiyun }
1702*4882a593Smuzhiyun }
1703*4882a593Smuzhiyun
1704*4882a593Smuzhiyun /**
1705*4882a593Smuzhiyun * erase_aeb - erase a PEB given in UBI attach info PEB
1706*4882a593Smuzhiyun * @ubi: UBI device description object
1707*4882a593Smuzhiyun * @aeb: UBI attach info PEB
1708*4882a593Smuzhiyun * @sync: If true, erase synchronously. Otherwise schedule for erasure
1709*4882a593Smuzhiyun */
1710*4882a593Smuzhiyun static int erase_aeb(struct ubi_device *ubi, struct ubi_ainf_peb *aeb, bool sync)
1711*4882a593Smuzhiyun {
1712*4882a593Smuzhiyun struct ubi_wl_entry *e;
1713*4882a593Smuzhiyun int err;
1714*4882a593Smuzhiyun
1715*4882a593Smuzhiyun e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1716*4882a593Smuzhiyun if (!e)
1717*4882a593Smuzhiyun return -ENOMEM;
1718*4882a593Smuzhiyun
1719*4882a593Smuzhiyun e->pnum = aeb->pnum;
1720*4882a593Smuzhiyun e->ec = aeb->ec;
1721*4882a593Smuzhiyun ubi->lookuptbl[e->pnum] = e;
1722*4882a593Smuzhiyun
1723*4882a593Smuzhiyun if (sync) {
1724*4882a593Smuzhiyun err = sync_erase(ubi, e, false);
1725*4882a593Smuzhiyun if (err)
1726*4882a593Smuzhiyun goto out_free;
1727*4882a593Smuzhiyun
1728*4882a593Smuzhiyun wl_tree_add(e, &ubi->free);
1729*4882a593Smuzhiyun ubi->free_count++;
1730*4882a593Smuzhiyun } else {
1731*4882a593Smuzhiyun err = schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false);
1732*4882a593Smuzhiyun if (err)
1733*4882a593Smuzhiyun goto out_free;
1734*4882a593Smuzhiyun }
1735*4882a593Smuzhiyun
1736*4882a593Smuzhiyun return 0;
1737*4882a593Smuzhiyun
1738*4882a593Smuzhiyun out_free:
1739*4882a593Smuzhiyun wl_entry_destroy(ubi, e);
1740*4882a593Smuzhiyun
1741*4882a593Smuzhiyun return err;
1742*4882a593Smuzhiyun }
1743*4882a593Smuzhiyun
1744*4882a593Smuzhiyun /**
1745*4882a593Smuzhiyun * ubi_wl_init - initialize the WL sub-system using attaching information.
1746*4882a593Smuzhiyun * @ubi: UBI device description object
1747*4882a593Smuzhiyun * @ai: attaching information
1748*4882a593Smuzhiyun *
1749*4882a593Smuzhiyun * This function returns zero in case of success, and a negative error code in
1750*4882a593Smuzhiyun * case of failure.
1751*4882a593Smuzhiyun */
1752*4882a593Smuzhiyun int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
1753*4882a593Smuzhiyun {
1754*4882a593Smuzhiyun int err, i, reserved_pebs, found_pebs = 0;
1755*4882a593Smuzhiyun struct rb_node *rb1, *rb2;
1756*4882a593Smuzhiyun struct ubi_ainf_volume *av;
1757*4882a593Smuzhiyun struct ubi_ainf_peb *aeb, *tmp;
1758*4882a593Smuzhiyun struct ubi_wl_entry *e;
1759*4882a593Smuzhiyun
1760*4882a593Smuzhiyun ubi->used = ubi->erroneous = ubi->free = ubi->scrub = RB_ROOT;
1761*4882a593Smuzhiyun spin_lock_init(&ubi->wl_lock);
1762*4882a593Smuzhiyun mutex_init(&ubi->move_mutex);
1763*4882a593Smuzhiyun init_rwsem(&ubi->work_sem);
1764*4882a593Smuzhiyun ubi->max_ec = ai->max_ec;
1765*4882a593Smuzhiyun INIT_LIST_HEAD(&ubi->works);
1766*4882a593Smuzhiyun
1767*4882a593Smuzhiyun sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
1768*4882a593Smuzhiyun
1769*4882a593Smuzhiyun err = -ENOMEM;
1770*4882a593Smuzhiyun ubi->lookuptbl = kcalloc(ubi->peb_count, sizeof(void *), GFP_KERNEL);
1771*4882a593Smuzhiyun if (!ubi->lookuptbl)
1772*4882a593Smuzhiyun return err;
1773*4882a593Smuzhiyun
1774*4882a593Smuzhiyun for (i = 0; i < UBI_PROT_QUEUE_LEN; i++)
1775*4882a593Smuzhiyun INIT_LIST_HEAD(&ubi->pq[i]);
1776*4882a593Smuzhiyun ubi->pq_head = 0;
1777*4882a593Smuzhiyun
1778*4882a593Smuzhiyun ubi->free_count = 0;
1779*4882a593Smuzhiyun list_for_each_entry_safe(aeb, tmp, &ai->erase, u.list) {
1780*4882a593Smuzhiyun cond_resched();
1781*4882a593Smuzhiyun
1782*4882a593Smuzhiyun err = erase_aeb(ubi, aeb, false);
1783*4882a593Smuzhiyun if (err)
1784*4882a593Smuzhiyun goto out_free;
1785*4882a593Smuzhiyun
1786*4882a593Smuzhiyun found_pebs++;
1787*4882a593Smuzhiyun }
1788*4882a593Smuzhiyun
1789*4882a593Smuzhiyun list_for_each_entry(aeb, &ai->free, u.list) {
1790*4882a593Smuzhiyun cond_resched();
1791*4882a593Smuzhiyun
1792*4882a593Smuzhiyun e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1793*4882a593Smuzhiyun if (!e) {
1794*4882a593Smuzhiyun err = -ENOMEM;
1795*4882a593Smuzhiyun goto out_free;
1796*4882a593Smuzhiyun }
1797*4882a593Smuzhiyun
1798*4882a593Smuzhiyun e->pnum = aeb->pnum;
1799*4882a593Smuzhiyun e->ec = aeb->ec;
1800*4882a593Smuzhiyun ubi_assert(e->ec >= 0);
1801*4882a593Smuzhiyun
1802*4882a593Smuzhiyun wl_tree_add(e, &ubi->free);
1803*4882a593Smuzhiyun ubi->free_count++;
1804*4882a593Smuzhiyun
1805*4882a593Smuzhiyun ubi->lookuptbl[e->pnum] = e;
1806*4882a593Smuzhiyun
1807*4882a593Smuzhiyun found_pebs++;
1808*4882a593Smuzhiyun }
1809*4882a593Smuzhiyun
1810*4882a593Smuzhiyun ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
1811*4882a593Smuzhiyun ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
1812*4882a593Smuzhiyun cond_resched();
1813*4882a593Smuzhiyun
1814*4882a593Smuzhiyun e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1815*4882a593Smuzhiyun if (!e) {
1816*4882a593Smuzhiyun err = -ENOMEM;
1817*4882a593Smuzhiyun goto out_free;
1818*4882a593Smuzhiyun }
1819*4882a593Smuzhiyun
1820*4882a593Smuzhiyun e->pnum = aeb->pnum;
1821*4882a593Smuzhiyun e->ec = aeb->ec;
1822*4882a593Smuzhiyun ubi->lookuptbl[e->pnum] = e;
1823*4882a593Smuzhiyun
1824*4882a593Smuzhiyun if (!aeb->scrub) {
1825*4882a593Smuzhiyun dbg_wl("add PEB %d EC %d to the used tree",
1826*4882a593Smuzhiyun e->pnum, e->ec);
1827*4882a593Smuzhiyun wl_tree_add(e, &ubi->used);
1828*4882a593Smuzhiyun } else {
1829*4882a593Smuzhiyun dbg_wl("add PEB %d EC %d to the scrub tree",
1830*4882a593Smuzhiyun e->pnum, e->ec);
1831*4882a593Smuzhiyun wl_tree_add(e, &ubi->scrub);
1832*4882a593Smuzhiyun }
1833*4882a593Smuzhiyun
1834*4882a593Smuzhiyun found_pebs++;
1835*4882a593Smuzhiyun }
1836*4882a593Smuzhiyun }
1837*4882a593Smuzhiyun
1838*4882a593Smuzhiyun list_for_each_entry(aeb, &ai->fastmap, u.list) {
1839*4882a593Smuzhiyun cond_resched();
1840*4882a593Smuzhiyun
1841*4882a593Smuzhiyun e = ubi_find_fm_block(ubi, aeb->pnum);
1842*4882a593Smuzhiyun
1843*4882a593Smuzhiyun if (e) {
1844*4882a593Smuzhiyun ubi_assert(!ubi->lookuptbl[e->pnum]);
1845*4882a593Smuzhiyun ubi->lookuptbl[e->pnum] = e;
1846*4882a593Smuzhiyun } else {
1847*4882a593Smuzhiyun bool sync = false;
1848*4882a593Smuzhiyun
1849*4882a593Smuzhiyun /*
1850*4882a593Smuzhiyun * Usually old Fastmap PEBs are scheduled for erasure
1851*4882a593Smuzhiyun * and we don't have to care about them but if we face
1852*4882a593Smuzhiyun * an power cut before scheduling them we need to
1853*4882a593Smuzhiyun * take care of them here.
1854*4882a593Smuzhiyun */
1855*4882a593Smuzhiyun if (ubi->lookuptbl[aeb->pnum])
1856*4882a593Smuzhiyun continue;
1857*4882a593Smuzhiyun
1858*4882a593Smuzhiyun /*
1859*4882a593Smuzhiyun * The fastmap update code might not find a free PEB for
1860*4882a593Smuzhiyun * writing the fastmap anchor to and then reuses the
1861*4882a593Smuzhiyun * current fastmap anchor PEB. When this PEB gets erased
1862*4882a593Smuzhiyun * and a power cut happens before it is written again we
1863*4882a593Smuzhiyun * must make sure that the fastmap attach code doesn't
1864*4882a593Smuzhiyun * find any outdated fastmap anchors, hence we erase the
1865*4882a593Smuzhiyun * outdated fastmap anchor PEBs synchronously here.
1866*4882a593Smuzhiyun */
1867*4882a593Smuzhiyun if (aeb->vol_id == UBI_FM_SB_VOLUME_ID)
1868*4882a593Smuzhiyun sync = true;
1869*4882a593Smuzhiyun
1870*4882a593Smuzhiyun err = erase_aeb(ubi, aeb, sync);
1871*4882a593Smuzhiyun if (err)
1872*4882a593Smuzhiyun goto out_free;
1873*4882a593Smuzhiyun }
1874*4882a593Smuzhiyun
1875*4882a593Smuzhiyun found_pebs++;
1876*4882a593Smuzhiyun }
1877*4882a593Smuzhiyun
1878*4882a593Smuzhiyun dbg_wl("found %i PEBs", found_pebs);
1879*4882a593Smuzhiyun
1880*4882a593Smuzhiyun ubi_assert(ubi->good_peb_count == found_pebs);
1881*4882a593Smuzhiyun
1882*4882a593Smuzhiyun reserved_pebs = WL_RESERVED_PEBS;
1883*4882a593Smuzhiyun ubi_fastmap_init(ubi, &reserved_pebs);
1884*4882a593Smuzhiyun
1885*4882a593Smuzhiyun if (ubi->avail_pebs < reserved_pebs) {
1886*4882a593Smuzhiyun ubi_err(ubi, "no enough physical eraseblocks (%d, need %d)",
1887*4882a593Smuzhiyun ubi->avail_pebs, reserved_pebs);
1888*4882a593Smuzhiyun if (ubi->corr_peb_count)
1889*4882a593Smuzhiyun ubi_err(ubi, "%d PEBs are corrupted and not used",
1890*4882a593Smuzhiyun ubi->corr_peb_count);
1891*4882a593Smuzhiyun err = -ENOSPC;
1892*4882a593Smuzhiyun goto out_free;
1893*4882a593Smuzhiyun }
1894*4882a593Smuzhiyun ubi->avail_pebs -= reserved_pebs;
1895*4882a593Smuzhiyun ubi->rsvd_pebs += reserved_pebs;
1896*4882a593Smuzhiyun
1897*4882a593Smuzhiyun /* Schedule wear-leveling if needed */
1898*4882a593Smuzhiyun err = ensure_wear_leveling(ubi, 0);
1899*4882a593Smuzhiyun if (err)
1900*4882a593Smuzhiyun goto out_free;
1901*4882a593Smuzhiyun
1902*4882a593Smuzhiyun #ifdef CONFIG_MTD_UBI_FASTMAP
1903*4882a593Smuzhiyun if (!ubi->ro_mode && !ubi->fm_disabled)
1904*4882a593Smuzhiyun ubi_ensure_anchor_pebs(ubi);
1905*4882a593Smuzhiyun #endif
1906*4882a593Smuzhiyun return 0;
1907*4882a593Smuzhiyun
1908*4882a593Smuzhiyun out_free:
1909*4882a593Smuzhiyun shutdown_work(ubi);
1910*4882a593Smuzhiyun tree_destroy(ubi, &ubi->used);
1911*4882a593Smuzhiyun tree_destroy(ubi, &ubi->free);
1912*4882a593Smuzhiyun tree_destroy(ubi, &ubi->scrub);
1913*4882a593Smuzhiyun kfree(ubi->lookuptbl);
1914*4882a593Smuzhiyun return err;
1915*4882a593Smuzhiyun }
1916*4882a593Smuzhiyun
1917*4882a593Smuzhiyun /**
1918*4882a593Smuzhiyun * protection_queue_destroy - destroy the protection queue.
1919*4882a593Smuzhiyun * @ubi: UBI device description object
1920*4882a593Smuzhiyun */
1921*4882a593Smuzhiyun static void protection_queue_destroy(struct ubi_device *ubi)
1922*4882a593Smuzhiyun {
1923*4882a593Smuzhiyun int i;
1924*4882a593Smuzhiyun struct ubi_wl_entry *e, *tmp;
1925*4882a593Smuzhiyun
1926*4882a593Smuzhiyun for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) {
1927*4882a593Smuzhiyun list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) {
1928*4882a593Smuzhiyun list_del(&e->u.list);
1929*4882a593Smuzhiyun wl_entry_destroy(ubi, e);
1930*4882a593Smuzhiyun }
1931*4882a593Smuzhiyun }
1932*4882a593Smuzhiyun }
1933*4882a593Smuzhiyun
1934*4882a593Smuzhiyun /**
1935*4882a593Smuzhiyun * ubi_wl_close - close the wear-leveling sub-system.
1936*4882a593Smuzhiyun * @ubi: UBI device description object
1937*4882a593Smuzhiyun */
1938*4882a593Smuzhiyun void ubi_wl_close(struct ubi_device *ubi)
1939*4882a593Smuzhiyun {
1940*4882a593Smuzhiyun dbg_wl("close the WL sub-system");
1941*4882a593Smuzhiyun ubi_fastmap_close(ubi);
1942*4882a593Smuzhiyun shutdown_work(ubi);
1943*4882a593Smuzhiyun protection_queue_destroy(ubi);
1944*4882a593Smuzhiyun tree_destroy(ubi, &ubi->used);
1945*4882a593Smuzhiyun tree_destroy(ubi, &ubi->erroneous);
1946*4882a593Smuzhiyun tree_destroy(ubi, &ubi->free);
1947*4882a593Smuzhiyun tree_destroy(ubi, &ubi->scrub);
1948*4882a593Smuzhiyun kfree(ubi->lookuptbl);
1949*4882a593Smuzhiyun }
1950*4882a593Smuzhiyun
1951*4882a593Smuzhiyun /**
1952*4882a593Smuzhiyun * self_check_ec - make sure that the erase counter of a PEB is correct.
1953*4882a593Smuzhiyun * @ubi: UBI device description object
1954*4882a593Smuzhiyun * @pnum: the physical eraseblock number to check
1955*4882a593Smuzhiyun * @ec: the erase counter to check
1956*4882a593Smuzhiyun *
1957*4882a593Smuzhiyun * This function returns zero if the erase counter of physical eraseblock @pnum
1958*4882a593Smuzhiyun * is equivalent to @ec, and a negative error code if not or if an error
1959*4882a593Smuzhiyun * occurred.
1960*4882a593Smuzhiyun */
1961*4882a593Smuzhiyun static int self_check_ec(struct ubi_device *ubi, int pnum, int ec)
1962*4882a593Smuzhiyun {
1963*4882a593Smuzhiyun int err;
1964*4882a593Smuzhiyun long long read_ec;
1965*4882a593Smuzhiyun struct ubi_ec_hdr *ec_hdr;
1966*4882a593Smuzhiyun
1967*4882a593Smuzhiyun if (!ubi_dbg_chk_gen(ubi))
1968*4882a593Smuzhiyun return 0;
1969*4882a593Smuzhiyun
1970*4882a593Smuzhiyun ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
1971*4882a593Smuzhiyun if (!ec_hdr)
1972*4882a593Smuzhiyun return -ENOMEM;
1973*4882a593Smuzhiyun
1974*4882a593Smuzhiyun err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
1975*4882a593Smuzhiyun if (err && err != UBI_IO_BITFLIPS) {
1976*4882a593Smuzhiyun /* The header does not have to exist */
1977*4882a593Smuzhiyun err = 0;
1978*4882a593Smuzhiyun goto out_free;
1979*4882a593Smuzhiyun }
1980*4882a593Smuzhiyun
1981*4882a593Smuzhiyun read_ec = be64_to_cpu(ec_hdr->ec);
1982*4882a593Smuzhiyun if (ec != read_ec && read_ec - ec > 1) {
1983*4882a593Smuzhiyun ubi_err(ubi, "self-check failed for PEB %d", pnum);
1984*4882a593Smuzhiyun ubi_err(ubi, "read EC is %lld, should be %d", read_ec, ec);
1985*4882a593Smuzhiyun dump_stack();
1986*4882a593Smuzhiyun err = 1;
1987*4882a593Smuzhiyun } else
1988*4882a593Smuzhiyun err = 0;
1989*4882a593Smuzhiyun
1990*4882a593Smuzhiyun out_free:
1991*4882a593Smuzhiyun kfree(ec_hdr);
1992*4882a593Smuzhiyun return err;
1993*4882a593Smuzhiyun }
1994*4882a593Smuzhiyun
1995*4882a593Smuzhiyun /**
1996*4882a593Smuzhiyun * self_check_in_wl_tree - check that wear-leveling entry is in WL RB-tree.
1997*4882a593Smuzhiyun * @ubi: UBI device description object
1998*4882a593Smuzhiyun * @e: the wear-leveling entry to check
1999*4882a593Smuzhiyun * @root: the root of the tree
2000*4882a593Smuzhiyun *
2001*4882a593Smuzhiyun * This function returns zero if @e is in the @root RB-tree and %-EINVAL if it
2002*4882a593Smuzhiyun * is not.
2003*4882a593Smuzhiyun */
2004*4882a593Smuzhiyun static int self_check_in_wl_tree(const struct ubi_device *ubi,
2005*4882a593Smuzhiyun struct ubi_wl_entry *e, struct rb_root *root)
2006*4882a593Smuzhiyun {
2007*4882a593Smuzhiyun if (!ubi_dbg_chk_gen(ubi))
2008*4882a593Smuzhiyun return 0;
2009*4882a593Smuzhiyun
2010*4882a593Smuzhiyun if (in_wl_tree(e, root))
2011*4882a593Smuzhiyun return 0;
2012*4882a593Smuzhiyun
2013*4882a593Smuzhiyun ubi_err(ubi, "self-check failed for PEB %d, EC %d, RB-tree %p ",
2014*4882a593Smuzhiyun e->pnum, e->ec, root);
2015*4882a593Smuzhiyun dump_stack();
2016*4882a593Smuzhiyun return -EINVAL;
2017*4882a593Smuzhiyun }
2018*4882a593Smuzhiyun
2019*4882a593Smuzhiyun /**
2020*4882a593Smuzhiyun * self_check_in_pq - check if wear-leveling entry is in the protection
2021*4882a593Smuzhiyun * queue.
2022*4882a593Smuzhiyun * @ubi: UBI device description object
2023*4882a593Smuzhiyun * @e: the wear-leveling entry to check
2024*4882a593Smuzhiyun *
2025*4882a593Smuzhiyun * This function returns zero if @e is in @ubi->pq and %-EINVAL if it is not.
2026*4882a593Smuzhiyun */
2027*4882a593Smuzhiyun static int self_check_in_pq(const struct ubi_device *ubi,
2028*4882a593Smuzhiyun struct ubi_wl_entry *e)
2029*4882a593Smuzhiyun {
2030*4882a593Smuzhiyun if (!ubi_dbg_chk_gen(ubi))
2031*4882a593Smuzhiyun return 0;
2032*4882a593Smuzhiyun
2033*4882a593Smuzhiyun if (in_pq(ubi, e))
2034*4882a593Smuzhiyun return 0;
2035*4882a593Smuzhiyun
2036*4882a593Smuzhiyun ubi_err(ubi, "self-check failed for PEB %d, EC %d, Protect queue",
2037*4882a593Smuzhiyun e->pnum, e->ec);
2038*4882a593Smuzhiyun dump_stack();
2039*4882a593Smuzhiyun return -EINVAL;
2040*4882a593Smuzhiyun }
2041*4882a593Smuzhiyun #ifndef CONFIG_MTD_UBI_FASTMAP
2042*4882a593Smuzhiyun static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
2043*4882a593Smuzhiyun {
2044*4882a593Smuzhiyun struct ubi_wl_entry *e;
2045*4882a593Smuzhiyun
2046*4882a593Smuzhiyun e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
2047*4882a593Smuzhiyun self_check_in_wl_tree(ubi, e, &ubi->free);
2048*4882a593Smuzhiyun ubi->free_count--;
2049*4882a593Smuzhiyun ubi_assert(ubi->free_count >= 0);
2050*4882a593Smuzhiyun rb_erase(&e->u.rb, &ubi->free);
2051*4882a593Smuzhiyun
2052*4882a593Smuzhiyun return e;
2053*4882a593Smuzhiyun }
2054*4882a593Smuzhiyun
2055*4882a593Smuzhiyun /**
2056*4882a593Smuzhiyun * produce_free_peb - produce a free physical eraseblock.
2057*4882a593Smuzhiyun * @ubi: UBI device description object
2058*4882a593Smuzhiyun *
2059*4882a593Smuzhiyun * This function tries to make a free PEB by means of synchronous execution of
2060*4882a593Smuzhiyun * pending works. This may be needed if, for example the background thread is
2061*4882a593Smuzhiyun * disabled. Returns zero in case of success and a negative error code in case
2062*4882a593Smuzhiyun * of failure.
2063*4882a593Smuzhiyun */
2064*4882a593Smuzhiyun static int produce_free_peb(struct ubi_device *ubi)
2065*4882a593Smuzhiyun {
2066*4882a593Smuzhiyun int err;
2067*4882a593Smuzhiyun
2068*4882a593Smuzhiyun while (!ubi->free.rb_node && ubi->works_count) {
2069*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
2070*4882a593Smuzhiyun
2071*4882a593Smuzhiyun dbg_wl("do one work synchronously");
2072*4882a593Smuzhiyun err = do_work(ubi);
2073*4882a593Smuzhiyun
2074*4882a593Smuzhiyun spin_lock(&ubi->wl_lock);
2075*4882a593Smuzhiyun if (err)
2076*4882a593Smuzhiyun return err;
2077*4882a593Smuzhiyun }
2078*4882a593Smuzhiyun
2079*4882a593Smuzhiyun return 0;
2080*4882a593Smuzhiyun }
2081*4882a593Smuzhiyun
2082*4882a593Smuzhiyun /**
2083*4882a593Smuzhiyun * ubi_wl_get_peb - get a physical eraseblock.
2084*4882a593Smuzhiyun * @ubi: UBI device description object
2085*4882a593Smuzhiyun *
2086*4882a593Smuzhiyun * This function returns a physical eraseblock in case of success and a
2087*4882a593Smuzhiyun * negative error code in case of failure.
2088*4882a593Smuzhiyun * Returns with ubi->fm_eba_sem held in read mode!
2089*4882a593Smuzhiyun */
2090*4882a593Smuzhiyun int ubi_wl_get_peb(struct ubi_device *ubi)
2091*4882a593Smuzhiyun {
2092*4882a593Smuzhiyun int err;
2093*4882a593Smuzhiyun struct ubi_wl_entry *e;
2094*4882a593Smuzhiyun
2095*4882a593Smuzhiyun retry:
2096*4882a593Smuzhiyun down_read(&ubi->fm_eba_sem);
2097*4882a593Smuzhiyun spin_lock(&ubi->wl_lock);
2098*4882a593Smuzhiyun if (!ubi->free.rb_node) {
2099*4882a593Smuzhiyun if (ubi->works_count == 0) {
2100*4882a593Smuzhiyun ubi_err(ubi, "no free eraseblocks");
2101*4882a593Smuzhiyun ubi_assert(list_empty(&ubi->works));
2102*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
2103*4882a593Smuzhiyun return -ENOSPC;
2104*4882a593Smuzhiyun }
2105*4882a593Smuzhiyun
2106*4882a593Smuzhiyun err = produce_free_peb(ubi);
2107*4882a593Smuzhiyun if (err < 0) {
2108*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
2109*4882a593Smuzhiyun return err;
2110*4882a593Smuzhiyun }
2111*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
2112*4882a593Smuzhiyun up_read(&ubi->fm_eba_sem);
2113*4882a593Smuzhiyun goto retry;
2114*4882a593Smuzhiyun
2115*4882a593Smuzhiyun }
2116*4882a593Smuzhiyun e = wl_get_wle(ubi);
2117*4882a593Smuzhiyun prot_queue_add(ubi, e);
2118*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
2119*4882a593Smuzhiyun
2120*4882a593Smuzhiyun err = ubi_self_check_all_ff(ubi, e->pnum, ubi->vid_hdr_aloffset,
2121*4882a593Smuzhiyun ubi->peb_size - ubi->vid_hdr_aloffset);
2122*4882a593Smuzhiyun if (err) {
2123*4882a593Smuzhiyun ubi_err(ubi, "new PEB %d does not contain all 0xFF bytes", e->pnum);
2124*4882a593Smuzhiyun return err;
2125*4882a593Smuzhiyun }
2126*4882a593Smuzhiyun
2127*4882a593Smuzhiyun return e->pnum;
2128*4882a593Smuzhiyun }
2129*4882a593Smuzhiyun #else
2130*4882a593Smuzhiyun #include "fastmap-wl.c"
2131*4882a593Smuzhiyun #endif
2132