1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright (c) International Business Machines Corp., 2006
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * SPDX-License-Identifier: GPL-2.0+
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Authors: Artem Bityutskiy (Битюцкий Артём), Thomas Gleixner
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun /*
10*4882a593Smuzhiyun * UBI wear-leveling sub-system.
11*4882a593Smuzhiyun *
12*4882a593Smuzhiyun * This sub-system is responsible for wear-leveling. It works in terms of
13*4882a593Smuzhiyun * physical eraseblocks and erase counters and knows nothing about logical
14*4882a593Smuzhiyun * eraseblocks, volumes, etc. From this sub-system's perspective all physical
15*4882a593Smuzhiyun * eraseblocks are of two types - used and free. Used physical eraseblocks are
16*4882a593Smuzhiyun * those that were "get" by the 'ubi_wl_get_peb()' function, and free physical
17*4882a593Smuzhiyun * eraseblocks are those that were put by the 'ubi_wl_put_peb()' function.
18*4882a593Smuzhiyun *
19*4882a593Smuzhiyun * Physical eraseblocks returned by 'ubi_wl_get_peb()' have only erase counter
20*4882a593Smuzhiyun * header. The rest of the physical eraseblock contains only %0xFF bytes.
21*4882a593Smuzhiyun *
22*4882a593Smuzhiyun * When physical eraseblocks are returned to the WL sub-system by means of the
23*4882a593Smuzhiyun * 'ubi_wl_put_peb()' function, they are scheduled for erasure. The erasure is
24*4882a593Smuzhiyun * done asynchronously in context of the per-UBI device background thread,
25*4882a593Smuzhiyun * which is also managed by the WL sub-system.
26*4882a593Smuzhiyun *
27*4882a593Smuzhiyun * The wear-leveling is ensured by means of moving the contents of used
28*4882a593Smuzhiyun * physical eraseblocks with low erase counter to free physical eraseblocks
29*4882a593Smuzhiyun * with high erase counter.
30*4882a593Smuzhiyun *
31*4882a593Smuzhiyun * If the WL sub-system fails to erase a physical eraseblock, it marks it as
32*4882a593Smuzhiyun * bad.
33*4882a593Smuzhiyun *
34*4882a593Smuzhiyun * This sub-system is also responsible for scrubbing. If a bit-flip is detected
35*4882a593Smuzhiyun * in a physical eraseblock, it has to be moved. Technically this is the same
36*4882a593Smuzhiyun * as moving it for wear-leveling reasons.
37*4882a593Smuzhiyun *
38*4882a593Smuzhiyun * As it was said, for the UBI sub-system all physical eraseblocks are either
39*4882a593Smuzhiyun * "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while
40*4882a593Smuzhiyun * used eraseblocks are kept in @wl->used, @wl->erroneous, or @wl->scrub
41*4882a593Smuzhiyun * RB-trees, as well as (temporarily) in the @wl->pq queue.
42*4882a593Smuzhiyun *
43*4882a593Smuzhiyun * When the WL sub-system returns a physical eraseblock, the physical
44*4882a593Smuzhiyun * eraseblock is protected from being moved for some "time". For this reason,
45*4882a593Smuzhiyun * the physical eraseblock is not directly moved from the @wl->free tree to the
46*4882a593Smuzhiyun * @wl->used tree. There is a protection queue in between where this
47*4882a593Smuzhiyun * physical eraseblock is temporarily stored (@wl->pq).
48*4882a593Smuzhiyun *
49*4882a593Smuzhiyun * All this protection stuff is needed because:
50*4882a593Smuzhiyun * o we don't want to move physical eraseblocks just after we have given them
51*4882a593Smuzhiyun * to the user; instead, we first want to let users fill them up with data;
52*4882a593Smuzhiyun *
53*4882a593Smuzhiyun * o there is a chance that the user will put the physical eraseblock very
54*4882a593Smuzhiyun * soon, so it makes sense not to move it for some time, but wait.
55*4882a593Smuzhiyun *
56*4882a593Smuzhiyun * Physical eraseblocks stay protected only for limited time. But the "time" is
57*4882a593Smuzhiyun * measured in erase cycles in this case. This is implemented with help of the
58*4882a593Smuzhiyun * protection queue. Eraseblocks are put to the tail of this queue when they
59*4882a593Smuzhiyun * are returned by the 'ubi_wl_get_peb()', and eraseblocks are removed from the
60*4882a593Smuzhiyun * head of the queue on each erase operation (for any eraseblock). So the
61*4882a593Smuzhiyun * length of the queue defines how may (global) erase cycles PEBs are protected.
62*4882a593Smuzhiyun *
63*4882a593Smuzhiyun * To put it differently, each physical eraseblock has 2 main states: free and
64*4882a593Smuzhiyun * used. The former state corresponds to the @wl->free tree. The latter state
65*4882a593Smuzhiyun * is split up on several sub-states:
66*4882a593Smuzhiyun * o the WL movement is allowed (@wl->used tree);
67*4882a593Smuzhiyun * o the WL movement is disallowed (@wl->erroneous) because the PEB is
68*4882a593Smuzhiyun * erroneous - e.g., there was a read error;
69*4882a593Smuzhiyun * o the WL movement is temporarily prohibited (@wl->pq queue);
70*4882a593Smuzhiyun * o scrubbing is needed (@wl->scrub tree).
71*4882a593Smuzhiyun *
72*4882a593Smuzhiyun * Depending on the sub-state, wear-leveling entries of the used physical
73*4882a593Smuzhiyun * eraseblocks may be kept in one of those structures.
74*4882a593Smuzhiyun *
75*4882a593Smuzhiyun * Note, in this implementation, we keep a small in-RAM object for each physical
76*4882a593Smuzhiyun * eraseblock. This is surely not a scalable solution. But it appears to be good
77*4882a593Smuzhiyun * enough for moderately large flashes and it is simple. In future, one may
78*4882a593Smuzhiyun * re-work this sub-system and make it more scalable.
79*4882a593Smuzhiyun *
80*4882a593Smuzhiyun * At the moment this sub-system does not utilize the sequence number, which
81*4882a593Smuzhiyun * was introduced relatively recently. But it would be wise to do this because
82*4882a593Smuzhiyun * the sequence number of a logical eraseblock characterizes how old is it. For
83*4882a593Smuzhiyun * example, when we move a PEB with low erase counter, and we need to pick the
84*4882a593Smuzhiyun * target PEB, we pick a PEB with the highest EC if our PEB is "old" and we
85*4882a593Smuzhiyun * pick target PEB with an average EC if our PEB is not very "old". This is a
86*4882a593Smuzhiyun * room for future re-works of the WL sub-system.
87*4882a593Smuzhiyun */
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun #ifndef __UBOOT__
90*4882a593Smuzhiyun #include <linux/slab.h>
91*4882a593Smuzhiyun #include <linux/crc32.h>
92*4882a593Smuzhiyun #include <linux/freezer.h>
93*4882a593Smuzhiyun #include <linux/kthread.h>
94*4882a593Smuzhiyun #else
95*4882a593Smuzhiyun #include <ubi_uboot.h>
96*4882a593Smuzhiyun #endif
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun #include "ubi.h"
99*4882a593Smuzhiyun #include "wl.h"
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun /* Number of physical eraseblocks reserved for wear-leveling purposes */
102*4882a593Smuzhiyun #define WL_RESERVED_PEBS 1
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun /*
105*4882a593Smuzhiyun * Maximum difference between two erase counters. If this threshold is
106*4882a593Smuzhiyun * exceeded, the WL sub-system starts moving data from used physical
107*4882a593Smuzhiyun * eraseblocks with low erase counter to free physical eraseblocks with high
108*4882a593Smuzhiyun * erase counter.
109*4882a593Smuzhiyun */
110*4882a593Smuzhiyun #define UBI_WL_THRESHOLD CONFIG_MTD_UBI_WL_THRESHOLD
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun /*
113*4882a593Smuzhiyun * When a physical eraseblock is moved, the WL sub-system has to pick the target
114*4882a593Smuzhiyun * physical eraseblock to move to. The simplest way would be just to pick the
115*4882a593Smuzhiyun * one with the highest erase counter. But in certain workloads this could lead
116*4882a593Smuzhiyun * to an unlimited wear of one or few physical eraseblock. Indeed, imagine a
117*4882a593Smuzhiyun * situation when the picked physical eraseblock is constantly erased after the
118*4882a593Smuzhiyun * data is written to it. So, we have a constant which limits the highest erase
119*4882a593Smuzhiyun * counter of the free physical eraseblock to pick. Namely, the WL sub-system
120*4882a593Smuzhiyun * does not pick eraseblocks with erase counter greater than the lowest erase
121*4882a593Smuzhiyun * counter plus %WL_FREE_MAX_DIFF.
122*4882a593Smuzhiyun */
123*4882a593Smuzhiyun #define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD)
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun /*
126*4882a593Smuzhiyun * Maximum number of consecutive background thread failures which is enough to
127*4882a593Smuzhiyun * switch to read-only mode.
128*4882a593Smuzhiyun */
129*4882a593Smuzhiyun #define WL_MAX_FAILURES 32
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun static int self_check_ec(struct ubi_device *ubi, int pnum, int ec);
132*4882a593Smuzhiyun static int self_check_in_wl_tree(const struct ubi_device *ubi,
133*4882a593Smuzhiyun struct ubi_wl_entry *e, struct rb_root *root);
134*4882a593Smuzhiyun static int self_check_in_pq(const struct ubi_device *ubi,
135*4882a593Smuzhiyun struct ubi_wl_entry *e);
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun /**
138*4882a593Smuzhiyun * wl_tree_add - add a wear-leveling entry to a WL RB-tree.
139*4882a593Smuzhiyun * @e: the wear-leveling entry to add
140*4882a593Smuzhiyun * @root: the root of the tree
141*4882a593Smuzhiyun *
142*4882a593Smuzhiyun * Note, we use (erase counter, physical eraseblock number) pairs as keys in
143*4882a593Smuzhiyun * the @ubi->used and @ubi->free RB-trees.
144*4882a593Smuzhiyun */
wl_tree_add(struct ubi_wl_entry * e,struct rb_root * root)145*4882a593Smuzhiyun static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
146*4882a593Smuzhiyun {
147*4882a593Smuzhiyun struct rb_node **p, *parent = NULL;
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun p = &root->rb_node;
150*4882a593Smuzhiyun while (*p) {
151*4882a593Smuzhiyun struct ubi_wl_entry *e1;
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun parent = *p;
154*4882a593Smuzhiyun e1 = rb_entry(parent, struct ubi_wl_entry, u.rb);
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun if (e->ec < e1->ec)
157*4882a593Smuzhiyun p = &(*p)->rb_left;
158*4882a593Smuzhiyun else if (e->ec > e1->ec)
159*4882a593Smuzhiyun p = &(*p)->rb_right;
160*4882a593Smuzhiyun else {
161*4882a593Smuzhiyun ubi_assert(e->pnum != e1->pnum);
162*4882a593Smuzhiyun if (e->pnum < e1->pnum)
163*4882a593Smuzhiyun p = &(*p)->rb_left;
164*4882a593Smuzhiyun else
165*4882a593Smuzhiyun p = &(*p)->rb_right;
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun rb_link_node(&e->u.rb, parent, p);
170*4882a593Smuzhiyun rb_insert_color(&e->u.rb, root);
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun /**
174*4882a593Smuzhiyun * wl_tree_destroy - destroy a wear-leveling entry.
175*4882a593Smuzhiyun * @ubi: UBI device description object
176*4882a593Smuzhiyun * @e: the wear-leveling entry to add
177*4882a593Smuzhiyun *
178*4882a593Smuzhiyun * This function destroys a wear leveling entry and removes
179*4882a593Smuzhiyun * the reference from the lookup table.
180*4882a593Smuzhiyun */
wl_entry_destroy(struct ubi_device * ubi,struct ubi_wl_entry * e)181*4882a593Smuzhiyun static void wl_entry_destroy(struct ubi_device *ubi, struct ubi_wl_entry *e)
182*4882a593Smuzhiyun {
183*4882a593Smuzhiyun ubi->lookuptbl[e->pnum] = NULL;
184*4882a593Smuzhiyun kmem_cache_free(ubi_wl_entry_slab, e);
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun /**
188*4882a593Smuzhiyun * do_work - do one pending work.
189*4882a593Smuzhiyun * @ubi: UBI device description object
190*4882a593Smuzhiyun *
191*4882a593Smuzhiyun * This function returns zero in case of success and a negative error code in
192*4882a593Smuzhiyun * case of failure.
193*4882a593Smuzhiyun */
do_work(struct ubi_device * ubi)194*4882a593Smuzhiyun static int do_work(struct ubi_device *ubi)
195*4882a593Smuzhiyun {
196*4882a593Smuzhiyun int err;
197*4882a593Smuzhiyun struct ubi_work *wrk;
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun cond_resched();
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun /*
202*4882a593Smuzhiyun * @ubi->work_sem is used to synchronize with the workers. Workers take
203*4882a593Smuzhiyun * it in read mode, so many of them may be doing works at a time. But
204*4882a593Smuzhiyun * the queue flush code has to be sure the whole queue of works is
205*4882a593Smuzhiyun * done, and it takes the mutex in write mode.
206*4882a593Smuzhiyun */
207*4882a593Smuzhiyun down_read(&ubi->work_sem);
208*4882a593Smuzhiyun spin_lock(&ubi->wl_lock);
209*4882a593Smuzhiyun if (list_empty(&ubi->works)) {
210*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
211*4882a593Smuzhiyun up_read(&ubi->work_sem);
212*4882a593Smuzhiyun return 0;
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun wrk = list_entry(ubi->works.next, struct ubi_work, list);
216*4882a593Smuzhiyun list_del(&wrk->list);
217*4882a593Smuzhiyun ubi->works_count -= 1;
218*4882a593Smuzhiyun ubi_assert(ubi->works_count >= 0);
219*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun /*
222*4882a593Smuzhiyun * Call the worker function. Do not touch the work structure
223*4882a593Smuzhiyun * after this call as it will have been freed or reused by that
224*4882a593Smuzhiyun * time by the worker function.
225*4882a593Smuzhiyun */
226*4882a593Smuzhiyun err = wrk->func(ubi, wrk, 0);
227*4882a593Smuzhiyun if (err)
228*4882a593Smuzhiyun ubi_err(ubi, "work failed with error code %d", err);
229*4882a593Smuzhiyun up_read(&ubi->work_sem);
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun return err;
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun /**
235*4882a593Smuzhiyun * in_wl_tree - check if wear-leveling entry is present in a WL RB-tree.
236*4882a593Smuzhiyun * @e: the wear-leveling entry to check
237*4882a593Smuzhiyun * @root: the root of the tree
238*4882a593Smuzhiyun *
239*4882a593Smuzhiyun * This function returns non-zero if @e is in the @root RB-tree and zero if it
240*4882a593Smuzhiyun * is not.
241*4882a593Smuzhiyun */
in_wl_tree(struct ubi_wl_entry * e,struct rb_root * root)242*4882a593Smuzhiyun static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root)
243*4882a593Smuzhiyun {
244*4882a593Smuzhiyun struct rb_node *p;
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun p = root->rb_node;
247*4882a593Smuzhiyun while (p) {
248*4882a593Smuzhiyun struct ubi_wl_entry *e1;
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun if (e->pnum == e1->pnum) {
253*4882a593Smuzhiyun ubi_assert(e == e1);
254*4882a593Smuzhiyun return 1;
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun if (e->ec < e1->ec)
258*4882a593Smuzhiyun p = p->rb_left;
259*4882a593Smuzhiyun else if (e->ec > e1->ec)
260*4882a593Smuzhiyun p = p->rb_right;
261*4882a593Smuzhiyun else {
262*4882a593Smuzhiyun ubi_assert(e->pnum != e1->pnum);
263*4882a593Smuzhiyun if (e->pnum < e1->pnum)
264*4882a593Smuzhiyun p = p->rb_left;
265*4882a593Smuzhiyun else
266*4882a593Smuzhiyun p = p->rb_right;
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun return 0;
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun /**
274*4882a593Smuzhiyun * prot_queue_add - add physical eraseblock to the protection queue.
275*4882a593Smuzhiyun * @ubi: UBI device description object
276*4882a593Smuzhiyun * @e: the physical eraseblock to add
277*4882a593Smuzhiyun *
278*4882a593Smuzhiyun * This function adds @e to the tail of the protection queue @ubi->pq, where
279*4882a593Smuzhiyun * @e will stay for %UBI_PROT_QUEUE_LEN erase operations and will be
280*4882a593Smuzhiyun * temporarily protected from the wear-leveling worker. Note, @wl->lock has to
281*4882a593Smuzhiyun * be locked.
282*4882a593Smuzhiyun */
prot_queue_add(struct ubi_device * ubi,struct ubi_wl_entry * e)283*4882a593Smuzhiyun static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
284*4882a593Smuzhiyun {
285*4882a593Smuzhiyun int pq_tail = ubi->pq_head - 1;
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun if (pq_tail < 0)
288*4882a593Smuzhiyun pq_tail = UBI_PROT_QUEUE_LEN - 1;
289*4882a593Smuzhiyun ubi_assert(pq_tail >= 0 && pq_tail < UBI_PROT_QUEUE_LEN);
290*4882a593Smuzhiyun list_add_tail(&e->u.list, &ubi->pq[pq_tail]);
291*4882a593Smuzhiyun dbg_wl("added PEB %d EC %d to the protection queue", e->pnum, e->ec);
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun /**
295*4882a593Smuzhiyun * find_wl_entry - find wear-leveling entry closest to certain erase counter.
296*4882a593Smuzhiyun * @ubi: UBI device description object
297*4882a593Smuzhiyun * @root: the RB-tree where to look for
298*4882a593Smuzhiyun * @diff: maximum possible difference from the smallest erase counter
299*4882a593Smuzhiyun *
300*4882a593Smuzhiyun * This function looks for a wear leveling entry with erase counter closest to
301*4882a593Smuzhiyun * min + @diff, where min is the smallest erase counter.
302*4882a593Smuzhiyun */
find_wl_entry(struct ubi_device * ubi,struct rb_root * root,int diff)303*4882a593Smuzhiyun static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi,
304*4882a593Smuzhiyun struct rb_root *root, int diff)
305*4882a593Smuzhiyun {
306*4882a593Smuzhiyun struct rb_node *p;
307*4882a593Smuzhiyun struct ubi_wl_entry *e, *prev_e = NULL;
308*4882a593Smuzhiyun int max;
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
311*4882a593Smuzhiyun max = e->ec + diff;
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun p = root->rb_node;
314*4882a593Smuzhiyun while (p) {
315*4882a593Smuzhiyun struct ubi_wl_entry *e1;
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
318*4882a593Smuzhiyun if (e1->ec >= max)
319*4882a593Smuzhiyun p = p->rb_left;
320*4882a593Smuzhiyun else {
321*4882a593Smuzhiyun p = p->rb_right;
322*4882a593Smuzhiyun prev_e = e;
323*4882a593Smuzhiyun e = e1;
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun /* If no fastmap has been written and this WL entry can be used
328*4882a593Smuzhiyun * as anchor PEB, hold it back and return the second best WL entry
329*4882a593Smuzhiyun * such that fastmap can use the anchor PEB later. */
330*4882a593Smuzhiyun if (prev_e && !ubi->fm_disabled &&
331*4882a593Smuzhiyun !ubi->fm && e->pnum < UBI_FM_MAX_START)
332*4882a593Smuzhiyun return prev_e;
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun return e;
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun /**
338*4882a593Smuzhiyun * find_mean_wl_entry - find wear-leveling entry with medium erase counter.
339*4882a593Smuzhiyun * @ubi: UBI device description object
340*4882a593Smuzhiyun * @root: the RB-tree where to look for
341*4882a593Smuzhiyun *
342*4882a593Smuzhiyun * This function looks for a wear leveling entry with medium erase counter,
343*4882a593Smuzhiyun * but not greater or equivalent than the lowest erase counter plus
344*4882a593Smuzhiyun * %WL_FREE_MAX_DIFF/2.
345*4882a593Smuzhiyun */
find_mean_wl_entry(struct ubi_device * ubi,struct rb_root * root)346*4882a593Smuzhiyun static struct ubi_wl_entry *find_mean_wl_entry(struct ubi_device *ubi,
347*4882a593Smuzhiyun struct rb_root *root)
348*4882a593Smuzhiyun {
349*4882a593Smuzhiyun struct ubi_wl_entry *e, *first, *last;
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun first = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
352*4882a593Smuzhiyun last = rb_entry(rb_last(root), struct ubi_wl_entry, u.rb);
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun if (last->ec - first->ec < WL_FREE_MAX_DIFF) {
355*4882a593Smuzhiyun e = rb_entry(root->rb_node, struct ubi_wl_entry, u.rb);
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun /* If no fastmap has been written and this WL entry can be used
358*4882a593Smuzhiyun * as anchor PEB, hold it back and return the second best
359*4882a593Smuzhiyun * WL entry such that fastmap can use the anchor PEB later. */
360*4882a593Smuzhiyun e = may_reserve_for_fm(ubi, e, root);
361*4882a593Smuzhiyun } else
362*4882a593Smuzhiyun e = find_wl_entry(ubi, root, WL_FREE_MAX_DIFF/2);
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun return e;
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun /**
368*4882a593Smuzhiyun * wl_get_wle - get a mean wl entry to be used by ubi_wl_get_peb() or
369*4882a593Smuzhiyun * refill_wl_user_pool().
370*4882a593Smuzhiyun * @ubi: UBI device description object
371*4882a593Smuzhiyun *
372*4882a593Smuzhiyun * This function returns a a wear leveling entry in case of success and
373*4882a593Smuzhiyun * NULL in case of failure.
374*4882a593Smuzhiyun */
wl_get_wle(struct ubi_device * ubi)375*4882a593Smuzhiyun static struct ubi_wl_entry *wl_get_wle(struct ubi_device *ubi)
376*4882a593Smuzhiyun {
377*4882a593Smuzhiyun struct ubi_wl_entry *e;
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun e = find_mean_wl_entry(ubi, &ubi->free);
380*4882a593Smuzhiyun if (!e) {
381*4882a593Smuzhiyun ubi_err(ubi, "no free eraseblocks");
382*4882a593Smuzhiyun return NULL;
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun self_check_in_wl_tree(ubi, e, &ubi->free);
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun /*
388*4882a593Smuzhiyun * Move the physical eraseblock to the protection queue where it will
389*4882a593Smuzhiyun * be protected from being moved for some time.
390*4882a593Smuzhiyun */
391*4882a593Smuzhiyun rb_erase(&e->u.rb, &ubi->free);
392*4882a593Smuzhiyun ubi->free_count--;
393*4882a593Smuzhiyun dbg_wl("PEB %d EC %d", e->pnum, e->ec);
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun return e;
396*4882a593Smuzhiyun }
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun /**
399*4882a593Smuzhiyun * prot_queue_del - remove a physical eraseblock from the protection queue.
400*4882a593Smuzhiyun * @ubi: UBI device description object
401*4882a593Smuzhiyun * @pnum: the physical eraseblock to remove
402*4882a593Smuzhiyun *
403*4882a593Smuzhiyun * This function deletes PEB @pnum from the protection queue and returns zero
404*4882a593Smuzhiyun * in case of success and %-ENODEV if the PEB was not found.
405*4882a593Smuzhiyun */
prot_queue_del(struct ubi_device * ubi,int pnum)406*4882a593Smuzhiyun static int prot_queue_del(struct ubi_device *ubi, int pnum)
407*4882a593Smuzhiyun {
408*4882a593Smuzhiyun struct ubi_wl_entry *e;
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun e = ubi->lookuptbl[pnum];
411*4882a593Smuzhiyun if (!e)
412*4882a593Smuzhiyun return -ENODEV;
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun if (self_check_in_pq(ubi, e))
415*4882a593Smuzhiyun return -ENODEV;
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun list_del(&e->u.list);
418*4882a593Smuzhiyun dbg_wl("deleted PEB %d from the protection queue", e->pnum);
419*4882a593Smuzhiyun return 0;
420*4882a593Smuzhiyun }
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun /**
423*4882a593Smuzhiyun * sync_erase - synchronously erase a physical eraseblock.
424*4882a593Smuzhiyun * @ubi: UBI device description object
425*4882a593Smuzhiyun * @e: the the physical eraseblock to erase
426*4882a593Smuzhiyun * @torture: if the physical eraseblock has to be tortured
427*4882a593Smuzhiyun *
428*4882a593Smuzhiyun * This function returns zero in case of success and a negative error code in
429*4882a593Smuzhiyun * case of failure.
430*4882a593Smuzhiyun */
sync_erase(struct ubi_device * ubi,struct ubi_wl_entry * e,int torture)431*4882a593Smuzhiyun static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
432*4882a593Smuzhiyun int torture)
433*4882a593Smuzhiyun {
434*4882a593Smuzhiyun int err;
435*4882a593Smuzhiyun struct ubi_ec_hdr *ec_hdr;
436*4882a593Smuzhiyun unsigned long long ec = e->ec;
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec);
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun err = self_check_ec(ubi, e->pnum, e->ec);
441*4882a593Smuzhiyun if (err)
442*4882a593Smuzhiyun return -EINVAL;
443*4882a593Smuzhiyun
444*4882a593Smuzhiyun ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
445*4882a593Smuzhiyun if (!ec_hdr)
446*4882a593Smuzhiyun return -ENOMEM;
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun err = ubi_io_sync_erase(ubi, e->pnum, torture);
449*4882a593Smuzhiyun if (err < 0)
450*4882a593Smuzhiyun goto out_free;
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun ec += err;
453*4882a593Smuzhiyun if (ec > UBI_MAX_ERASECOUNTER) {
454*4882a593Smuzhiyun /*
455*4882a593Smuzhiyun * Erase counter overflow. Upgrade UBI and use 64-bit
456*4882a593Smuzhiyun * erase counters internally.
457*4882a593Smuzhiyun */
458*4882a593Smuzhiyun ubi_err(ubi, "erase counter overflow at PEB %d, EC %llu",
459*4882a593Smuzhiyun e->pnum, ec);
460*4882a593Smuzhiyun err = -EINVAL;
461*4882a593Smuzhiyun goto out_free;
462*4882a593Smuzhiyun }
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun dbg_wl("erased PEB %d, new EC %llu", e->pnum, ec);
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun ec_hdr->ec = cpu_to_be64(ec);
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr);
469*4882a593Smuzhiyun if (err)
470*4882a593Smuzhiyun goto out_free;
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun e->ec = ec;
473*4882a593Smuzhiyun spin_lock(&ubi->wl_lock);
474*4882a593Smuzhiyun if (e->ec > ubi->max_ec)
475*4882a593Smuzhiyun ubi->max_ec = e->ec;
476*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun out_free:
479*4882a593Smuzhiyun kfree(ec_hdr);
480*4882a593Smuzhiyun return err;
481*4882a593Smuzhiyun }
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun /**
484*4882a593Smuzhiyun * serve_prot_queue - check if it is time to stop protecting PEBs.
485*4882a593Smuzhiyun * @ubi: UBI device description object
486*4882a593Smuzhiyun *
487*4882a593Smuzhiyun * This function is called after each erase operation and removes PEBs from the
488*4882a593Smuzhiyun * tail of the protection queue. These PEBs have been protected for long enough
489*4882a593Smuzhiyun * and should be moved to the used tree.
490*4882a593Smuzhiyun */
serve_prot_queue(struct ubi_device * ubi)491*4882a593Smuzhiyun static void serve_prot_queue(struct ubi_device *ubi)
492*4882a593Smuzhiyun {
493*4882a593Smuzhiyun struct ubi_wl_entry *e, *tmp;
494*4882a593Smuzhiyun int count;
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun /*
497*4882a593Smuzhiyun * There may be several protected physical eraseblock to remove,
498*4882a593Smuzhiyun * process them all.
499*4882a593Smuzhiyun */
500*4882a593Smuzhiyun repeat:
501*4882a593Smuzhiyun count = 0;
502*4882a593Smuzhiyun spin_lock(&ubi->wl_lock);
503*4882a593Smuzhiyun list_for_each_entry_safe(e, tmp, &ubi->pq[ubi->pq_head], u.list) {
504*4882a593Smuzhiyun dbg_wl("PEB %d EC %d protection over, move to used tree",
505*4882a593Smuzhiyun e->pnum, e->ec);
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun list_del(&e->u.list);
508*4882a593Smuzhiyun wl_tree_add(e, &ubi->used);
509*4882a593Smuzhiyun if (count++ > 32) {
510*4882a593Smuzhiyun /*
511*4882a593Smuzhiyun * Let's be nice and avoid holding the spinlock for
512*4882a593Smuzhiyun * too long.
513*4882a593Smuzhiyun */
514*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
515*4882a593Smuzhiyun cond_resched();
516*4882a593Smuzhiyun goto repeat;
517*4882a593Smuzhiyun }
518*4882a593Smuzhiyun }
519*4882a593Smuzhiyun
520*4882a593Smuzhiyun ubi->pq_head += 1;
521*4882a593Smuzhiyun if (ubi->pq_head == UBI_PROT_QUEUE_LEN)
522*4882a593Smuzhiyun ubi->pq_head = 0;
523*4882a593Smuzhiyun ubi_assert(ubi->pq_head >= 0 && ubi->pq_head < UBI_PROT_QUEUE_LEN);
524*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
525*4882a593Smuzhiyun }
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun #ifdef __UBOOT__
ubi_do_worker(struct ubi_device * ubi)528*4882a593Smuzhiyun void ubi_do_worker(struct ubi_device *ubi)
529*4882a593Smuzhiyun {
530*4882a593Smuzhiyun int err;
531*4882a593Smuzhiyun
532*4882a593Smuzhiyun if (list_empty(&ubi->works) || ubi->ro_mode ||
533*4882a593Smuzhiyun !ubi->thread_enabled || ubi_dbg_is_bgt_disabled(ubi))
534*4882a593Smuzhiyun return;
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun spin_lock(&ubi->wl_lock);
537*4882a593Smuzhiyun while (!list_empty(&ubi->works)) {
538*4882a593Smuzhiyun /*
539*4882a593Smuzhiyun * call do_work, which executes exactly one work form the queue,
540*4882a593Smuzhiyun * including removeing it from the work queue.
541*4882a593Smuzhiyun */
542*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
543*4882a593Smuzhiyun err = do_work(ubi);
544*4882a593Smuzhiyun spin_lock(&ubi->wl_lock);
545*4882a593Smuzhiyun if (err) {
546*4882a593Smuzhiyun ubi_err(ubi, "%s: work failed with error code %d",
547*4882a593Smuzhiyun ubi->bgt_name, err);
548*4882a593Smuzhiyun }
549*4882a593Smuzhiyun }
550*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
551*4882a593Smuzhiyun }
552*4882a593Smuzhiyun #endif
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun /**
555*4882a593Smuzhiyun * __schedule_ubi_work - schedule a work.
556*4882a593Smuzhiyun * @ubi: UBI device description object
557*4882a593Smuzhiyun * @wrk: the work to schedule
558*4882a593Smuzhiyun *
559*4882a593Smuzhiyun * This function adds a work defined by @wrk to the tail of the pending works
560*4882a593Smuzhiyun * list. Can only be used if ubi->work_sem is already held in read mode!
561*4882a593Smuzhiyun */
__schedule_ubi_work(struct ubi_device * ubi,struct ubi_work * wrk)562*4882a593Smuzhiyun static void __schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
563*4882a593Smuzhiyun {
564*4882a593Smuzhiyun spin_lock(&ubi->wl_lock);
565*4882a593Smuzhiyun list_add_tail(&wrk->list, &ubi->works);
566*4882a593Smuzhiyun ubi_assert(ubi->works_count >= 0);
567*4882a593Smuzhiyun ubi->works_count += 1;
568*4882a593Smuzhiyun #ifndef __UBOOT__
569*4882a593Smuzhiyun if (ubi->thread_enabled && !ubi_dbg_is_bgt_disabled(ubi))
570*4882a593Smuzhiyun wake_up_process(ubi->bgt_thread);
571*4882a593Smuzhiyun #endif
572*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
573*4882a593Smuzhiyun }
574*4882a593Smuzhiyun
575*4882a593Smuzhiyun /**
576*4882a593Smuzhiyun * schedule_ubi_work - schedule a work.
577*4882a593Smuzhiyun * @ubi: UBI device description object
578*4882a593Smuzhiyun * @wrk: the work to schedule
579*4882a593Smuzhiyun *
580*4882a593Smuzhiyun * This function adds a work defined by @wrk to the tail of the pending works
581*4882a593Smuzhiyun * list.
582*4882a593Smuzhiyun */
schedule_ubi_work(struct ubi_device * ubi,struct ubi_work * wrk)583*4882a593Smuzhiyun static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
584*4882a593Smuzhiyun {
585*4882a593Smuzhiyun down_read(&ubi->work_sem);
586*4882a593Smuzhiyun __schedule_ubi_work(ubi, wrk);
587*4882a593Smuzhiyun up_read(&ubi->work_sem);
588*4882a593Smuzhiyun }
589*4882a593Smuzhiyun
590*4882a593Smuzhiyun static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
591*4882a593Smuzhiyun int shutdown);
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun /**
594*4882a593Smuzhiyun * schedule_erase - schedule an erase work.
595*4882a593Smuzhiyun * @ubi: UBI device description object
596*4882a593Smuzhiyun * @e: the WL entry of the physical eraseblock to erase
597*4882a593Smuzhiyun * @vol_id: the volume ID that last used this PEB
598*4882a593Smuzhiyun * @lnum: the last used logical eraseblock number for the PEB
599*4882a593Smuzhiyun * @torture: if the physical eraseblock has to be tortured
600*4882a593Smuzhiyun *
601*4882a593Smuzhiyun * This function returns zero in case of success and a %-ENOMEM in case of
602*4882a593Smuzhiyun * failure.
603*4882a593Smuzhiyun */
schedule_erase(struct ubi_device * ubi,struct ubi_wl_entry * e,int vol_id,int lnum,int torture)604*4882a593Smuzhiyun static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
605*4882a593Smuzhiyun int vol_id, int lnum, int torture)
606*4882a593Smuzhiyun {
607*4882a593Smuzhiyun struct ubi_work *wl_wrk;
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun ubi_assert(e);
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun dbg_wl("schedule erasure of PEB %d, EC %d, torture %d",
612*4882a593Smuzhiyun e->pnum, e->ec, torture);
613*4882a593Smuzhiyun
614*4882a593Smuzhiyun wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
615*4882a593Smuzhiyun if (!wl_wrk)
616*4882a593Smuzhiyun return -ENOMEM;
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun wl_wrk->func = &erase_worker;
619*4882a593Smuzhiyun wl_wrk->e = e;
620*4882a593Smuzhiyun wl_wrk->vol_id = vol_id;
621*4882a593Smuzhiyun wl_wrk->lnum = lnum;
622*4882a593Smuzhiyun wl_wrk->torture = torture;
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun schedule_ubi_work(ubi, wl_wrk);
625*4882a593Smuzhiyun
626*4882a593Smuzhiyun #ifdef __UBOOT__
627*4882a593Smuzhiyun ubi_do_worker(ubi);
628*4882a593Smuzhiyun #endif
629*4882a593Smuzhiyun return 0;
630*4882a593Smuzhiyun }
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun /**
633*4882a593Smuzhiyun * do_sync_erase - run the erase worker synchronously.
634*4882a593Smuzhiyun * @ubi: UBI device description object
635*4882a593Smuzhiyun * @e: the WL entry of the physical eraseblock to erase
636*4882a593Smuzhiyun * @vol_id: the volume ID that last used this PEB
637*4882a593Smuzhiyun * @lnum: the last used logical eraseblock number for the PEB
638*4882a593Smuzhiyun * @torture: if the physical eraseblock has to be tortured
639*4882a593Smuzhiyun *
640*4882a593Smuzhiyun */
do_sync_erase(struct ubi_device * ubi,struct ubi_wl_entry * e,int vol_id,int lnum,int torture)641*4882a593Smuzhiyun static int do_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
642*4882a593Smuzhiyun int vol_id, int lnum, int torture)
643*4882a593Smuzhiyun {
644*4882a593Smuzhiyun struct ubi_work *wl_wrk;
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun dbg_wl("sync erase of PEB %i", e->pnum);
647*4882a593Smuzhiyun
648*4882a593Smuzhiyun wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
649*4882a593Smuzhiyun if (!wl_wrk)
650*4882a593Smuzhiyun return -ENOMEM;
651*4882a593Smuzhiyun
652*4882a593Smuzhiyun wl_wrk->e = e;
653*4882a593Smuzhiyun wl_wrk->vol_id = vol_id;
654*4882a593Smuzhiyun wl_wrk->lnum = lnum;
655*4882a593Smuzhiyun wl_wrk->torture = torture;
656*4882a593Smuzhiyun
657*4882a593Smuzhiyun return erase_worker(ubi, wl_wrk, 0);
658*4882a593Smuzhiyun }
659*4882a593Smuzhiyun
660*4882a593Smuzhiyun /**
661*4882a593Smuzhiyun * wear_leveling_worker - wear-leveling worker function.
662*4882a593Smuzhiyun * @ubi: UBI device description object
663*4882a593Smuzhiyun * @wrk: the work object
664*4882a593Smuzhiyun * @shutdown: non-zero if the worker has to free memory and exit
665*4882a593Smuzhiyun * because the WL-subsystem is shutting down
666*4882a593Smuzhiyun *
667*4882a593Smuzhiyun * This function copies a more worn out physical eraseblock to a less worn out
668*4882a593Smuzhiyun * one. Returns zero in case of success and a negative error code in case of
669*4882a593Smuzhiyun * failure.
670*4882a593Smuzhiyun */
wear_leveling_worker(struct ubi_device * ubi,struct ubi_work * wrk,int shutdown)671*4882a593Smuzhiyun static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
672*4882a593Smuzhiyun int shutdown)
673*4882a593Smuzhiyun {
674*4882a593Smuzhiyun int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
675*4882a593Smuzhiyun int vol_id = -1, lnum = -1;
676*4882a593Smuzhiyun #ifdef CONFIG_MTD_UBI_FASTMAP
677*4882a593Smuzhiyun int anchor = wrk->anchor;
678*4882a593Smuzhiyun #endif
679*4882a593Smuzhiyun struct ubi_wl_entry *e1, *e2;
680*4882a593Smuzhiyun struct ubi_vid_hdr *vid_hdr;
681*4882a593Smuzhiyun
682*4882a593Smuzhiyun kfree(wrk);
683*4882a593Smuzhiyun if (shutdown)
684*4882a593Smuzhiyun return 0;
685*4882a593Smuzhiyun
686*4882a593Smuzhiyun vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
687*4882a593Smuzhiyun if (!vid_hdr)
688*4882a593Smuzhiyun return -ENOMEM;
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun mutex_lock(&ubi->move_mutex);
691*4882a593Smuzhiyun spin_lock(&ubi->wl_lock);
692*4882a593Smuzhiyun ubi_assert(!ubi->move_from && !ubi->move_to);
693*4882a593Smuzhiyun ubi_assert(!ubi->move_to_put);
694*4882a593Smuzhiyun
695*4882a593Smuzhiyun if (!ubi->free.rb_node ||
696*4882a593Smuzhiyun (!ubi->used.rb_node && !ubi->scrub.rb_node)) {
697*4882a593Smuzhiyun /*
698*4882a593Smuzhiyun * No free physical eraseblocks? Well, they must be waiting in
699*4882a593Smuzhiyun * the queue to be erased. Cancel movement - it will be
700*4882a593Smuzhiyun * triggered again when a free physical eraseblock appears.
701*4882a593Smuzhiyun *
702*4882a593Smuzhiyun * No used physical eraseblocks? They must be temporarily
703*4882a593Smuzhiyun * protected from being moved. They will be moved to the
704*4882a593Smuzhiyun * @ubi->used tree later and the wear-leveling will be
705*4882a593Smuzhiyun * triggered again.
706*4882a593Smuzhiyun */
707*4882a593Smuzhiyun dbg_wl("cancel WL, a list is empty: free %d, used %d",
708*4882a593Smuzhiyun !ubi->free.rb_node, !ubi->used.rb_node);
709*4882a593Smuzhiyun goto out_cancel;
710*4882a593Smuzhiyun }
711*4882a593Smuzhiyun
712*4882a593Smuzhiyun #ifdef CONFIG_MTD_UBI_FASTMAP
713*4882a593Smuzhiyun /* Check whether we need to produce an anchor PEB */
714*4882a593Smuzhiyun if (!anchor)
715*4882a593Smuzhiyun anchor = !anchor_pebs_avalible(&ubi->free);
716*4882a593Smuzhiyun
717*4882a593Smuzhiyun if (anchor) {
718*4882a593Smuzhiyun e1 = find_anchor_wl_entry(&ubi->used);
719*4882a593Smuzhiyun if (!e1)
720*4882a593Smuzhiyun goto out_cancel;
721*4882a593Smuzhiyun e2 = get_peb_for_wl(ubi);
722*4882a593Smuzhiyun if (!e2)
723*4882a593Smuzhiyun goto out_cancel;
724*4882a593Smuzhiyun
725*4882a593Smuzhiyun self_check_in_wl_tree(ubi, e1, &ubi->used);
726*4882a593Smuzhiyun rb_erase(&e1->u.rb, &ubi->used);
727*4882a593Smuzhiyun dbg_wl("anchor-move PEB %d to PEB %d", e1->pnum, e2->pnum);
728*4882a593Smuzhiyun } else if (!ubi->scrub.rb_node) {
729*4882a593Smuzhiyun #else
730*4882a593Smuzhiyun if (!ubi->scrub.rb_node) {
731*4882a593Smuzhiyun #endif
732*4882a593Smuzhiyun /*
733*4882a593Smuzhiyun * Now pick the least worn-out used physical eraseblock and a
734*4882a593Smuzhiyun * highly worn-out free physical eraseblock. If the erase
735*4882a593Smuzhiyun * counters differ much enough, start wear-leveling.
736*4882a593Smuzhiyun */
737*4882a593Smuzhiyun e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
738*4882a593Smuzhiyun e2 = get_peb_for_wl(ubi);
739*4882a593Smuzhiyun if (!e2)
740*4882a593Smuzhiyun goto out_cancel;
741*4882a593Smuzhiyun
742*4882a593Smuzhiyun if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
743*4882a593Smuzhiyun dbg_wl("no WL needed: min used EC %d, max free EC %d",
744*4882a593Smuzhiyun e1->ec, e2->ec);
745*4882a593Smuzhiyun
746*4882a593Smuzhiyun /* Give the unused PEB back */
747*4882a593Smuzhiyun wl_tree_add(e2, &ubi->free);
748*4882a593Smuzhiyun ubi->free_count++;
749*4882a593Smuzhiyun goto out_cancel;
750*4882a593Smuzhiyun }
751*4882a593Smuzhiyun self_check_in_wl_tree(ubi, e1, &ubi->used);
752*4882a593Smuzhiyun rb_erase(&e1->u.rb, &ubi->used);
753*4882a593Smuzhiyun dbg_wl("move PEB %d EC %d to PEB %d EC %d",
754*4882a593Smuzhiyun e1->pnum, e1->ec, e2->pnum, e2->ec);
755*4882a593Smuzhiyun } else {
756*4882a593Smuzhiyun /* Perform scrubbing */
757*4882a593Smuzhiyun scrubbing = 1;
758*4882a593Smuzhiyun e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
759*4882a593Smuzhiyun e2 = get_peb_for_wl(ubi);
760*4882a593Smuzhiyun if (!e2)
761*4882a593Smuzhiyun goto out_cancel;
762*4882a593Smuzhiyun
763*4882a593Smuzhiyun self_check_in_wl_tree(ubi, e1, &ubi->scrub);
764*4882a593Smuzhiyun rb_erase(&e1->u.rb, &ubi->scrub);
765*4882a593Smuzhiyun dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
766*4882a593Smuzhiyun }
767*4882a593Smuzhiyun
768*4882a593Smuzhiyun ubi->move_from = e1;
769*4882a593Smuzhiyun ubi->move_to = e2;
770*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
771*4882a593Smuzhiyun
772*4882a593Smuzhiyun /*
773*4882a593Smuzhiyun * Now we are going to copy physical eraseblock @e1->pnum to @e2->pnum.
774*4882a593Smuzhiyun * We so far do not know which logical eraseblock our physical
775*4882a593Smuzhiyun * eraseblock (@e1) belongs to. We have to read the volume identifier
776*4882a593Smuzhiyun * header first.
777*4882a593Smuzhiyun *
778*4882a593Smuzhiyun * Note, we are protected from this PEB being unmapped and erased. The
779*4882a593Smuzhiyun * 'ubi_wl_put_peb()' would wait for moving to be finished if the PEB
780*4882a593Smuzhiyun * which is being moved was unmapped.
781*4882a593Smuzhiyun */
782*4882a593Smuzhiyun
783*4882a593Smuzhiyun err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0);
784*4882a593Smuzhiyun if (err && err != UBI_IO_BITFLIPS) {
785*4882a593Smuzhiyun if (err == UBI_IO_FF) {
786*4882a593Smuzhiyun /*
787*4882a593Smuzhiyun * We are trying to move PEB without a VID header. UBI
788*4882a593Smuzhiyun * always write VID headers shortly after the PEB was
789*4882a593Smuzhiyun * given, so we have a situation when it has not yet
790*4882a593Smuzhiyun * had a chance to write it, because it was preempted.
791*4882a593Smuzhiyun * So add this PEB to the protection queue so far,
792*4882a593Smuzhiyun * because presumably more data will be written there
793*4882a593Smuzhiyun * (including the missing VID header), and then we'll
794*4882a593Smuzhiyun * move it.
795*4882a593Smuzhiyun */
796*4882a593Smuzhiyun dbg_wl("PEB %d has no VID header", e1->pnum);
797*4882a593Smuzhiyun protect = 1;
798*4882a593Smuzhiyun goto out_not_moved;
799*4882a593Smuzhiyun } else if (err == UBI_IO_FF_BITFLIPS) {
800*4882a593Smuzhiyun /*
801*4882a593Smuzhiyun * The same situation as %UBI_IO_FF, but bit-flips were
802*4882a593Smuzhiyun * detected. It is better to schedule this PEB for
803*4882a593Smuzhiyun * scrubbing.
804*4882a593Smuzhiyun */
805*4882a593Smuzhiyun dbg_wl("PEB %d has no VID header but has bit-flips",
806*4882a593Smuzhiyun e1->pnum);
807*4882a593Smuzhiyun scrubbing = 1;
808*4882a593Smuzhiyun goto out_not_moved;
809*4882a593Smuzhiyun }
810*4882a593Smuzhiyun
811*4882a593Smuzhiyun ubi_err(ubi, "error %d while reading VID header from PEB %d",
812*4882a593Smuzhiyun err, e1->pnum);
813*4882a593Smuzhiyun goto out_error;
814*4882a593Smuzhiyun }
815*4882a593Smuzhiyun
816*4882a593Smuzhiyun vol_id = be32_to_cpu(vid_hdr->vol_id);
817*4882a593Smuzhiyun lnum = be32_to_cpu(vid_hdr->lnum);
818*4882a593Smuzhiyun
819*4882a593Smuzhiyun err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr);
820*4882a593Smuzhiyun if (err) {
821*4882a593Smuzhiyun if (err == MOVE_CANCEL_RACE) {
822*4882a593Smuzhiyun /*
823*4882a593Smuzhiyun * The LEB has not been moved because the volume is
824*4882a593Smuzhiyun * being deleted or the PEB has been put meanwhile. We
825*4882a593Smuzhiyun * should prevent this PEB from being selected for
826*4882a593Smuzhiyun * wear-leveling movement again, so put it to the
827*4882a593Smuzhiyun * protection queue.
828*4882a593Smuzhiyun */
829*4882a593Smuzhiyun protect = 1;
830*4882a593Smuzhiyun goto out_not_moved;
831*4882a593Smuzhiyun }
832*4882a593Smuzhiyun if (err == MOVE_RETRY) {
833*4882a593Smuzhiyun scrubbing = 1;
834*4882a593Smuzhiyun goto out_not_moved;
835*4882a593Smuzhiyun }
836*4882a593Smuzhiyun if (err == MOVE_TARGET_BITFLIPS || err == MOVE_TARGET_WR_ERR ||
837*4882a593Smuzhiyun err == MOVE_TARGET_RD_ERR) {
838*4882a593Smuzhiyun /*
839*4882a593Smuzhiyun * Target PEB had bit-flips or write error - torture it.
840*4882a593Smuzhiyun */
841*4882a593Smuzhiyun torture = 1;
842*4882a593Smuzhiyun goto out_not_moved;
843*4882a593Smuzhiyun }
844*4882a593Smuzhiyun
845*4882a593Smuzhiyun if (err == MOVE_SOURCE_RD_ERR) {
846*4882a593Smuzhiyun /*
847*4882a593Smuzhiyun * An error happened while reading the source PEB. Do
848*4882a593Smuzhiyun * not switch to R/O mode in this case, and give the
849*4882a593Smuzhiyun * upper layers a possibility to recover from this,
850*4882a593Smuzhiyun * e.g. by unmapping corresponding LEB. Instead, just
851*4882a593Smuzhiyun * put this PEB to the @ubi->erroneous list to prevent
852*4882a593Smuzhiyun * UBI from trying to move it over and over again.
853*4882a593Smuzhiyun */
854*4882a593Smuzhiyun if (ubi->erroneous_peb_count > ubi->max_erroneous) {
855*4882a593Smuzhiyun ubi_err(ubi, "too many erroneous eraseblocks (%d)",
856*4882a593Smuzhiyun ubi->erroneous_peb_count);
857*4882a593Smuzhiyun goto out_error;
858*4882a593Smuzhiyun }
859*4882a593Smuzhiyun erroneous = 1;
860*4882a593Smuzhiyun goto out_not_moved;
861*4882a593Smuzhiyun }
862*4882a593Smuzhiyun
863*4882a593Smuzhiyun if (err < 0)
864*4882a593Smuzhiyun goto out_error;
865*4882a593Smuzhiyun
866*4882a593Smuzhiyun ubi_assert(0);
867*4882a593Smuzhiyun }
868*4882a593Smuzhiyun
869*4882a593Smuzhiyun /* The PEB has been successfully moved */
870*4882a593Smuzhiyun if (scrubbing)
871*4882a593Smuzhiyun ubi_msg(ubi, "scrubbed PEB %d (LEB %d:%d), data moved to PEB %d",
872*4882a593Smuzhiyun e1->pnum, vol_id, lnum, e2->pnum);
873*4882a593Smuzhiyun ubi_free_vid_hdr(ubi, vid_hdr);
874*4882a593Smuzhiyun
875*4882a593Smuzhiyun spin_lock(&ubi->wl_lock);
876*4882a593Smuzhiyun if (!ubi->move_to_put) {
877*4882a593Smuzhiyun wl_tree_add(e2, &ubi->used);
878*4882a593Smuzhiyun e2 = NULL;
879*4882a593Smuzhiyun }
880*4882a593Smuzhiyun ubi->move_from = ubi->move_to = NULL;
881*4882a593Smuzhiyun ubi->move_to_put = ubi->wl_scheduled = 0;
882*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
883*4882a593Smuzhiyun
884*4882a593Smuzhiyun err = do_sync_erase(ubi, e1, vol_id, lnum, 0);
885*4882a593Smuzhiyun if (err) {
886*4882a593Smuzhiyun if (e2)
887*4882a593Smuzhiyun wl_entry_destroy(ubi, e2);
888*4882a593Smuzhiyun goto out_ro;
889*4882a593Smuzhiyun }
890*4882a593Smuzhiyun
891*4882a593Smuzhiyun if (e2) {
892*4882a593Smuzhiyun /*
893*4882a593Smuzhiyun * Well, the target PEB was put meanwhile, schedule it for
894*4882a593Smuzhiyun * erasure.
895*4882a593Smuzhiyun */
896*4882a593Smuzhiyun dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase",
897*4882a593Smuzhiyun e2->pnum, vol_id, lnum);
898*4882a593Smuzhiyun err = do_sync_erase(ubi, e2, vol_id, lnum, 0);
899*4882a593Smuzhiyun if (err)
900*4882a593Smuzhiyun goto out_ro;
901*4882a593Smuzhiyun }
902*4882a593Smuzhiyun
903*4882a593Smuzhiyun dbg_wl("done");
904*4882a593Smuzhiyun mutex_unlock(&ubi->move_mutex);
905*4882a593Smuzhiyun return 0;
906*4882a593Smuzhiyun
907*4882a593Smuzhiyun /*
908*4882a593Smuzhiyun * For some reasons the LEB was not moved, might be an error, might be
909*4882a593Smuzhiyun * something else. @e1 was not changed, so return it back. @e2 might
910*4882a593Smuzhiyun * have been changed, schedule it for erasure.
911*4882a593Smuzhiyun */
912*4882a593Smuzhiyun out_not_moved:
913*4882a593Smuzhiyun if (vol_id != -1)
914*4882a593Smuzhiyun dbg_wl("cancel moving PEB %d (LEB %d:%d) to PEB %d (%d)",
915*4882a593Smuzhiyun e1->pnum, vol_id, lnum, e2->pnum, err);
916*4882a593Smuzhiyun else
917*4882a593Smuzhiyun dbg_wl("cancel moving PEB %d to PEB %d (%d)",
918*4882a593Smuzhiyun e1->pnum, e2->pnum, err);
919*4882a593Smuzhiyun spin_lock(&ubi->wl_lock);
920*4882a593Smuzhiyun if (protect)
921*4882a593Smuzhiyun prot_queue_add(ubi, e1);
922*4882a593Smuzhiyun else if (erroneous) {
923*4882a593Smuzhiyun wl_tree_add(e1, &ubi->erroneous);
924*4882a593Smuzhiyun ubi->erroneous_peb_count += 1;
925*4882a593Smuzhiyun } else if (scrubbing)
926*4882a593Smuzhiyun wl_tree_add(e1, &ubi->scrub);
927*4882a593Smuzhiyun else
928*4882a593Smuzhiyun wl_tree_add(e1, &ubi->used);
929*4882a593Smuzhiyun ubi_assert(!ubi->move_to_put);
930*4882a593Smuzhiyun ubi->move_from = ubi->move_to = NULL;
931*4882a593Smuzhiyun ubi->wl_scheduled = 0;
932*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
933*4882a593Smuzhiyun
934*4882a593Smuzhiyun ubi_free_vid_hdr(ubi, vid_hdr);
935*4882a593Smuzhiyun err = do_sync_erase(ubi, e2, vol_id, lnum, torture);
936*4882a593Smuzhiyun if (err)
937*4882a593Smuzhiyun goto out_ro;
938*4882a593Smuzhiyun
939*4882a593Smuzhiyun mutex_unlock(&ubi->move_mutex);
940*4882a593Smuzhiyun return 0;
941*4882a593Smuzhiyun
942*4882a593Smuzhiyun out_error:
943*4882a593Smuzhiyun if (vol_id != -1)
944*4882a593Smuzhiyun ubi_err(ubi, "error %d while moving PEB %d to PEB %d",
945*4882a593Smuzhiyun err, e1->pnum, e2->pnum);
946*4882a593Smuzhiyun else
947*4882a593Smuzhiyun ubi_err(ubi, "error %d while moving PEB %d (LEB %d:%d) to PEB %d",
948*4882a593Smuzhiyun err, e1->pnum, vol_id, lnum, e2->pnum);
949*4882a593Smuzhiyun spin_lock(&ubi->wl_lock);
950*4882a593Smuzhiyun ubi->move_from = ubi->move_to = NULL;
951*4882a593Smuzhiyun ubi->move_to_put = ubi->wl_scheduled = 0;
952*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
953*4882a593Smuzhiyun
954*4882a593Smuzhiyun ubi_free_vid_hdr(ubi, vid_hdr);
955*4882a593Smuzhiyun wl_entry_destroy(ubi, e1);
956*4882a593Smuzhiyun wl_entry_destroy(ubi, e2);
957*4882a593Smuzhiyun
958*4882a593Smuzhiyun out_ro:
959*4882a593Smuzhiyun ubi_ro_mode(ubi);
960*4882a593Smuzhiyun mutex_unlock(&ubi->move_mutex);
961*4882a593Smuzhiyun ubi_assert(err != 0);
962*4882a593Smuzhiyun return err < 0 ? err : -EIO;
963*4882a593Smuzhiyun
964*4882a593Smuzhiyun out_cancel:
965*4882a593Smuzhiyun ubi->wl_scheduled = 0;
966*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
967*4882a593Smuzhiyun mutex_unlock(&ubi->move_mutex);
968*4882a593Smuzhiyun ubi_free_vid_hdr(ubi, vid_hdr);
969*4882a593Smuzhiyun return 0;
970*4882a593Smuzhiyun }
971*4882a593Smuzhiyun
972*4882a593Smuzhiyun /**
973*4882a593Smuzhiyun * ensure_wear_leveling - schedule wear-leveling if it is needed.
974*4882a593Smuzhiyun * @ubi: UBI device description object
975*4882a593Smuzhiyun * @nested: set to non-zero if this function is called from UBI worker
976*4882a593Smuzhiyun *
977*4882a593Smuzhiyun * This function checks if it is time to start wear-leveling and schedules it
978*4882a593Smuzhiyun * if yes. This function returns zero in case of success and a negative error
979*4882a593Smuzhiyun * code in case of failure.
980*4882a593Smuzhiyun */
981*4882a593Smuzhiyun static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
982*4882a593Smuzhiyun {
983*4882a593Smuzhiyun int err = 0;
984*4882a593Smuzhiyun struct ubi_wl_entry *e1;
985*4882a593Smuzhiyun struct ubi_wl_entry *e2;
986*4882a593Smuzhiyun struct ubi_work *wrk;
987*4882a593Smuzhiyun
988*4882a593Smuzhiyun spin_lock(&ubi->wl_lock);
989*4882a593Smuzhiyun if (ubi->wl_scheduled)
990*4882a593Smuzhiyun /* Wear-leveling is already in the work queue */
991*4882a593Smuzhiyun goto out_unlock;
992*4882a593Smuzhiyun
993*4882a593Smuzhiyun /*
994*4882a593Smuzhiyun * If the ubi->scrub tree is not empty, scrubbing is needed, and the
995*4882a593Smuzhiyun * the WL worker has to be scheduled anyway.
996*4882a593Smuzhiyun */
997*4882a593Smuzhiyun if (!ubi->scrub.rb_node) {
998*4882a593Smuzhiyun if (!ubi->used.rb_node || !ubi->free.rb_node)
999*4882a593Smuzhiyun /* No physical eraseblocks - no deal */
1000*4882a593Smuzhiyun goto out_unlock;
1001*4882a593Smuzhiyun
1002*4882a593Smuzhiyun /*
1003*4882a593Smuzhiyun * We schedule wear-leveling only if the difference between the
1004*4882a593Smuzhiyun * lowest erase counter of used physical eraseblocks and a high
1005*4882a593Smuzhiyun * erase counter of free physical eraseblocks is greater than
1006*4882a593Smuzhiyun * %UBI_WL_THRESHOLD.
1007*4882a593Smuzhiyun */
1008*4882a593Smuzhiyun e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
1009*4882a593Smuzhiyun e2 = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
1010*4882a593Smuzhiyun
1011*4882a593Smuzhiyun if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
1012*4882a593Smuzhiyun goto out_unlock;
1013*4882a593Smuzhiyun dbg_wl("schedule wear-leveling");
1014*4882a593Smuzhiyun } else
1015*4882a593Smuzhiyun dbg_wl("schedule scrubbing");
1016*4882a593Smuzhiyun
1017*4882a593Smuzhiyun ubi->wl_scheduled = 1;
1018*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
1019*4882a593Smuzhiyun
1020*4882a593Smuzhiyun wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
1021*4882a593Smuzhiyun if (!wrk) {
1022*4882a593Smuzhiyun err = -ENOMEM;
1023*4882a593Smuzhiyun goto out_cancel;
1024*4882a593Smuzhiyun }
1025*4882a593Smuzhiyun
1026*4882a593Smuzhiyun wrk->anchor = 0;
1027*4882a593Smuzhiyun wrk->func = &wear_leveling_worker;
1028*4882a593Smuzhiyun if (nested)
1029*4882a593Smuzhiyun __schedule_ubi_work(ubi, wrk);
1030*4882a593Smuzhiyun #ifndef __UBOOT__
1031*4882a593Smuzhiyun else
1032*4882a593Smuzhiyun schedule_ubi_work(ubi, wrk);
1033*4882a593Smuzhiyun #else
1034*4882a593Smuzhiyun else {
1035*4882a593Smuzhiyun schedule_ubi_work(ubi, wrk);
1036*4882a593Smuzhiyun ubi_do_worker(ubi);
1037*4882a593Smuzhiyun }
1038*4882a593Smuzhiyun #endif
1039*4882a593Smuzhiyun return err;
1040*4882a593Smuzhiyun
1041*4882a593Smuzhiyun out_cancel:
1042*4882a593Smuzhiyun spin_lock(&ubi->wl_lock);
1043*4882a593Smuzhiyun ubi->wl_scheduled = 0;
1044*4882a593Smuzhiyun out_unlock:
1045*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
1046*4882a593Smuzhiyun return err;
1047*4882a593Smuzhiyun }
1048*4882a593Smuzhiyun
1049*4882a593Smuzhiyun /**
1050*4882a593Smuzhiyun * erase_worker - physical eraseblock erase worker function.
1051*4882a593Smuzhiyun * @ubi: UBI device description object
1052*4882a593Smuzhiyun * @wl_wrk: the work object
1053*4882a593Smuzhiyun * @shutdown: non-zero if the worker has to free memory and exit
1054*4882a593Smuzhiyun * because the WL sub-system is shutting down
1055*4882a593Smuzhiyun *
1056*4882a593Smuzhiyun * This function erases a physical eraseblock and perform torture testing if
1057*4882a593Smuzhiyun * needed. It also takes care about marking the physical eraseblock bad if
1058*4882a593Smuzhiyun * needed. Returns zero in case of success and a negative error code in case of
1059*4882a593Smuzhiyun * failure.
1060*4882a593Smuzhiyun */
1061*4882a593Smuzhiyun static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1062*4882a593Smuzhiyun int shutdown)
1063*4882a593Smuzhiyun {
1064*4882a593Smuzhiyun struct ubi_wl_entry *e = wl_wrk->e;
1065*4882a593Smuzhiyun int pnum = e->pnum;
1066*4882a593Smuzhiyun int vol_id = wl_wrk->vol_id;
1067*4882a593Smuzhiyun int lnum = wl_wrk->lnum;
1068*4882a593Smuzhiyun int err, available_consumed = 0;
1069*4882a593Smuzhiyun
1070*4882a593Smuzhiyun if (shutdown) {
1071*4882a593Smuzhiyun dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
1072*4882a593Smuzhiyun kfree(wl_wrk);
1073*4882a593Smuzhiyun wl_entry_destroy(ubi, e);
1074*4882a593Smuzhiyun return 0;
1075*4882a593Smuzhiyun }
1076*4882a593Smuzhiyun
1077*4882a593Smuzhiyun dbg_wl("erase PEB %d EC %d LEB %d:%d",
1078*4882a593Smuzhiyun pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum);
1079*4882a593Smuzhiyun
1080*4882a593Smuzhiyun err = sync_erase(ubi, e, wl_wrk->torture);
1081*4882a593Smuzhiyun if (!err) {
1082*4882a593Smuzhiyun /* Fine, we've erased it successfully */
1083*4882a593Smuzhiyun kfree(wl_wrk);
1084*4882a593Smuzhiyun
1085*4882a593Smuzhiyun spin_lock(&ubi->wl_lock);
1086*4882a593Smuzhiyun wl_tree_add(e, &ubi->free);
1087*4882a593Smuzhiyun ubi->free_count++;
1088*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
1089*4882a593Smuzhiyun
1090*4882a593Smuzhiyun /*
1091*4882a593Smuzhiyun * One more erase operation has happened, take care about
1092*4882a593Smuzhiyun * protected physical eraseblocks.
1093*4882a593Smuzhiyun */
1094*4882a593Smuzhiyun serve_prot_queue(ubi);
1095*4882a593Smuzhiyun
1096*4882a593Smuzhiyun /* And take care about wear-leveling */
1097*4882a593Smuzhiyun err = ensure_wear_leveling(ubi, 1);
1098*4882a593Smuzhiyun return err;
1099*4882a593Smuzhiyun }
1100*4882a593Smuzhiyun
1101*4882a593Smuzhiyun ubi_err(ubi, "failed to erase PEB %d, error %d", pnum, err);
1102*4882a593Smuzhiyun kfree(wl_wrk);
1103*4882a593Smuzhiyun
1104*4882a593Smuzhiyun if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
1105*4882a593Smuzhiyun err == -EBUSY) {
1106*4882a593Smuzhiyun int err1;
1107*4882a593Smuzhiyun
1108*4882a593Smuzhiyun /* Re-schedule the LEB for erasure */
1109*4882a593Smuzhiyun err1 = schedule_erase(ubi, e, vol_id, lnum, 0);
1110*4882a593Smuzhiyun if (err1) {
1111*4882a593Smuzhiyun err = err1;
1112*4882a593Smuzhiyun goto out_ro;
1113*4882a593Smuzhiyun }
1114*4882a593Smuzhiyun return err;
1115*4882a593Smuzhiyun }
1116*4882a593Smuzhiyun
1117*4882a593Smuzhiyun wl_entry_destroy(ubi, e);
1118*4882a593Smuzhiyun if (err != -EIO)
1119*4882a593Smuzhiyun /*
1120*4882a593Smuzhiyun * If this is not %-EIO, we have no idea what to do. Scheduling
1121*4882a593Smuzhiyun * this physical eraseblock for erasure again would cause
1122*4882a593Smuzhiyun * errors again and again. Well, lets switch to R/O mode.
1123*4882a593Smuzhiyun */
1124*4882a593Smuzhiyun goto out_ro;
1125*4882a593Smuzhiyun
1126*4882a593Smuzhiyun /* It is %-EIO, the PEB went bad */
1127*4882a593Smuzhiyun
1128*4882a593Smuzhiyun if (!ubi->bad_allowed) {
1129*4882a593Smuzhiyun ubi_err(ubi, "bad physical eraseblock %d detected", pnum);
1130*4882a593Smuzhiyun goto out_ro;
1131*4882a593Smuzhiyun }
1132*4882a593Smuzhiyun
1133*4882a593Smuzhiyun spin_lock(&ubi->volumes_lock);
1134*4882a593Smuzhiyun if (ubi->beb_rsvd_pebs == 0) {
1135*4882a593Smuzhiyun if (ubi->avail_pebs == 0) {
1136*4882a593Smuzhiyun spin_unlock(&ubi->volumes_lock);
1137*4882a593Smuzhiyun ubi_err(ubi, "no reserved/available physical eraseblocks");
1138*4882a593Smuzhiyun goto out_ro;
1139*4882a593Smuzhiyun }
1140*4882a593Smuzhiyun ubi->avail_pebs -= 1;
1141*4882a593Smuzhiyun available_consumed = 1;
1142*4882a593Smuzhiyun }
1143*4882a593Smuzhiyun spin_unlock(&ubi->volumes_lock);
1144*4882a593Smuzhiyun
1145*4882a593Smuzhiyun ubi_msg(ubi, "mark PEB %d as bad", pnum);
1146*4882a593Smuzhiyun err = ubi_io_mark_bad(ubi, pnum);
1147*4882a593Smuzhiyun if (err)
1148*4882a593Smuzhiyun goto out_ro;
1149*4882a593Smuzhiyun
1150*4882a593Smuzhiyun spin_lock(&ubi->volumes_lock);
1151*4882a593Smuzhiyun if (ubi->beb_rsvd_pebs > 0) {
1152*4882a593Smuzhiyun if (available_consumed) {
1153*4882a593Smuzhiyun /*
1154*4882a593Smuzhiyun * The amount of reserved PEBs increased since we last
1155*4882a593Smuzhiyun * checked.
1156*4882a593Smuzhiyun */
1157*4882a593Smuzhiyun ubi->avail_pebs += 1;
1158*4882a593Smuzhiyun available_consumed = 0;
1159*4882a593Smuzhiyun }
1160*4882a593Smuzhiyun ubi->beb_rsvd_pebs -= 1;
1161*4882a593Smuzhiyun }
1162*4882a593Smuzhiyun ubi->bad_peb_count += 1;
1163*4882a593Smuzhiyun ubi->good_peb_count -= 1;
1164*4882a593Smuzhiyun ubi_calculate_reserved(ubi);
1165*4882a593Smuzhiyun if (available_consumed)
1166*4882a593Smuzhiyun ubi_warn(ubi, "no PEBs in the reserved pool, used an available PEB");
1167*4882a593Smuzhiyun else if (ubi->beb_rsvd_pebs)
1168*4882a593Smuzhiyun ubi_msg(ubi, "%d PEBs left in the reserve",
1169*4882a593Smuzhiyun ubi->beb_rsvd_pebs);
1170*4882a593Smuzhiyun else
1171*4882a593Smuzhiyun ubi_warn(ubi, "last PEB from the reserve was used");
1172*4882a593Smuzhiyun spin_unlock(&ubi->volumes_lock);
1173*4882a593Smuzhiyun
1174*4882a593Smuzhiyun return err;
1175*4882a593Smuzhiyun
1176*4882a593Smuzhiyun out_ro:
1177*4882a593Smuzhiyun if (available_consumed) {
1178*4882a593Smuzhiyun spin_lock(&ubi->volumes_lock);
1179*4882a593Smuzhiyun ubi->avail_pebs += 1;
1180*4882a593Smuzhiyun spin_unlock(&ubi->volumes_lock);
1181*4882a593Smuzhiyun }
1182*4882a593Smuzhiyun ubi_ro_mode(ubi);
1183*4882a593Smuzhiyun return err;
1184*4882a593Smuzhiyun }
1185*4882a593Smuzhiyun
1186*4882a593Smuzhiyun /**
1187*4882a593Smuzhiyun * ubi_wl_put_peb - return a PEB to the wear-leveling sub-system.
1188*4882a593Smuzhiyun * @ubi: UBI device description object
1189*4882a593Smuzhiyun * @vol_id: the volume ID that last used this PEB
1190*4882a593Smuzhiyun * @lnum: the last used logical eraseblock number for the PEB
1191*4882a593Smuzhiyun * @pnum: physical eraseblock to return
1192*4882a593Smuzhiyun * @torture: if this physical eraseblock has to be tortured
1193*4882a593Smuzhiyun *
1194*4882a593Smuzhiyun * This function is called to return physical eraseblock @pnum to the pool of
1195*4882a593Smuzhiyun * free physical eraseblocks. The @torture flag has to be set if an I/O error
1196*4882a593Smuzhiyun * occurred to this @pnum and it has to be tested. This function returns zero
1197*4882a593Smuzhiyun * in case of success, and a negative error code in case of failure.
1198*4882a593Smuzhiyun */
1199*4882a593Smuzhiyun int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum,
1200*4882a593Smuzhiyun int pnum, int torture)
1201*4882a593Smuzhiyun {
1202*4882a593Smuzhiyun int err;
1203*4882a593Smuzhiyun struct ubi_wl_entry *e;
1204*4882a593Smuzhiyun
1205*4882a593Smuzhiyun dbg_wl("PEB %d", pnum);
1206*4882a593Smuzhiyun ubi_assert(pnum >= 0);
1207*4882a593Smuzhiyun ubi_assert(pnum < ubi->peb_count);
1208*4882a593Smuzhiyun
1209*4882a593Smuzhiyun down_read(&ubi->fm_protect);
1210*4882a593Smuzhiyun
1211*4882a593Smuzhiyun retry:
1212*4882a593Smuzhiyun spin_lock(&ubi->wl_lock);
1213*4882a593Smuzhiyun e = ubi->lookuptbl[pnum];
1214*4882a593Smuzhiyun if (e == ubi->move_from) {
1215*4882a593Smuzhiyun /*
1216*4882a593Smuzhiyun * User is putting the physical eraseblock which was selected to
1217*4882a593Smuzhiyun * be moved. It will be scheduled for erasure in the
1218*4882a593Smuzhiyun * wear-leveling worker.
1219*4882a593Smuzhiyun */
1220*4882a593Smuzhiyun dbg_wl("PEB %d is being moved, wait", pnum);
1221*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
1222*4882a593Smuzhiyun
1223*4882a593Smuzhiyun /* Wait for the WL worker by taking the @ubi->move_mutex */
1224*4882a593Smuzhiyun mutex_lock(&ubi->move_mutex);
1225*4882a593Smuzhiyun mutex_unlock(&ubi->move_mutex);
1226*4882a593Smuzhiyun goto retry;
1227*4882a593Smuzhiyun } else if (e == ubi->move_to) {
1228*4882a593Smuzhiyun /*
1229*4882a593Smuzhiyun * User is putting the physical eraseblock which was selected
1230*4882a593Smuzhiyun * as the target the data is moved to. It may happen if the EBA
1231*4882a593Smuzhiyun * sub-system already re-mapped the LEB in 'ubi_eba_copy_leb()'
1232*4882a593Smuzhiyun * but the WL sub-system has not put the PEB to the "used" tree
1233*4882a593Smuzhiyun * yet, but it is about to do this. So we just set a flag which
1234*4882a593Smuzhiyun * will tell the WL worker that the PEB is not needed anymore
1235*4882a593Smuzhiyun * and should be scheduled for erasure.
1236*4882a593Smuzhiyun */
1237*4882a593Smuzhiyun dbg_wl("PEB %d is the target of data moving", pnum);
1238*4882a593Smuzhiyun ubi_assert(!ubi->move_to_put);
1239*4882a593Smuzhiyun ubi->move_to_put = 1;
1240*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
1241*4882a593Smuzhiyun up_read(&ubi->fm_protect);
1242*4882a593Smuzhiyun return 0;
1243*4882a593Smuzhiyun } else {
1244*4882a593Smuzhiyun if (in_wl_tree(e, &ubi->used)) {
1245*4882a593Smuzhiyun self_check_in_wl_tree(ubi, e, &ubi->used);
1246*4882a593Smuzhiyun rb_erase(&e->u.rb, &ubi->used);
1247*4882a593Smuzhiyun } else if (in_wl_tree(e, &ubi->scrub)) {
1248*4882a593Smuzhiyun self_check_in_wl_tree(ubi, e, &ubi->scrub);
1249*4882a593Smuzhiyun rb_erase(&e->u.rb, &ubi->scrub);
1250*4882a593Smuzhiyun } else if (in_wl_tree(e, &ubi->erroneous)) {
1251*4882a593Smuzhiyun self_check_in_wl_tree(ubi, e, &ubi->erroneous);
1252*4882a593Smuzhiyun rb_erase(&e->u.rb, &ubi->erroneous);
1253*4882a593Smuzhiyun ubi->erroneous_peb_count -= 1;
1254*4882a593Smuzhiyun ubi_assert(ubi->erroneous_peb_count >= 0);
1255*4882a593Smuzhiyun /* Erroneous PEBs should be tortured */
1256*4882a593Smuzhiyun torture = 1;
1257*4882a593Smuzhiyun } else {
1258*4882a593Smuzhiyun err = prot_queue_del(ubi, e->pnum);
1259*4882a593Smuzhiyun if (err) {
1260*4882a593Smuzhiyun ubi_err(ubi, "PEB %d not found", pnum);
1261*4882a593Smuzhiyun ubi_ro_mode(ubi);
1262*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
1263*4882a593Smuzhiyun up_read(&ubi->fm_protect);
1264*4882a593Smuzhiyun return err;
1265*4882a593Smuzhiyun }
1266*4882a593Smuzhiyun }
1267*4882a593Smuzhiyun }
1268*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
1269*4882a593Smuzhiyun
1270*4882a593Smuzhiyun err = schedule_erase(ubi, e, vol_id, lnum, torture);
1271*4882a593Smuzhiyun if (err) {
1272*4882a593Smuzhiyun spin_lock(&ubi->wl_lock);
1273*4882a593Smuzhiyun wl_tree_add(e, &ubi->used);
1274*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
1275*4882a593Smuzhiyun }
1276*4882a593Smuzhiyun
1277*4882a593Smuzhiyun up_read(&ubi->fm_protect);
1278*4882a593Smuzhiyun return err;
1279*4882a593Smuzhiyun }
1280*4882a593Smuzhiyun
1281*4882a593Smuzhiyun /**
1282*4882a593Smuzhiyun * ubi_wl_scrub_peb - schedule a physical eraseblock for scrubbing.
1283*4882a593Smuzhiyun * @ubi: UBI device description object
1284*4882a593Smuzhiyun * @pnum: the physical eraseblock to schedule
1285*4882a593Smuzhiyun *
1286*4882a593Smuzhiyun * If a bit-flip in a physical eraseblock is detected, this physical eraseblock
1287*4882a593Smuzhiyun * needs scrubbing. This function schedules a physical eraseblock for
1288*4882a593Smuzhiyun * scrubbing which is done in background. This function returns zero in case of
1289*4882a593Smuzhiyun * success and a negative error code in case of failure.
1290*4882a593Smuzhiyun */
1291*4882a593Smuzhiyun int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
1292*4882a593Smuzhiyun {
1293*4882a593Smuzhiyun struct ubi_wl_entry *e;
1294*4882a593Smuzhiyun
1295*4882a593Smuzhiyun ubi_msg(ubi, "schedule PEB %d for scrubbing", pnum);
1296*4882a593Smuzhiyun
1297*4882a593Smuzhiyun retry:
1298*4882a593Smuzhiyun spin_lock(&ubi->wl_lock);
1299*4882a593Smuzhiyun e = ubi->lookuptbl[pnum];
1300*4882a593Smuzhiyun if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub) ||
1301*4882a593Smuzhiyun in_wl_tree(e, &ubi->erroneous)) {
1302*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
1303*4882a593Smuzhiyun return 0;
1304*4882a593Smuzhiyun }
1305*4882a593Smuzhiyun
1306*4882a593Smuzhiyun if (e == ubi->move_to) {
1307*4882a593Smuzhiyun /*
1308*4882a593Smuzhiyun * This physical eraseblock was used to move data to. The data
1309*4882a593Smuzhiyun * was moved but the PEB was not yet inserted to the proper
1310*4882a593Smuzhiyun * tree. We should just wait a little and let the WL worker
1311*4882a593Smuzhiyun * proceed.
1312*4882a593Smuzhiyun */
1313*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
1314*4882a593Smuzhiyun dbg_wl("the PEB %d is not in proper tree, retry", pnum);
1315*4882a593Smuzhiyun yield();
1316*4882a593Smuzhiyun goto retry;
1317*4882a593Smuzhiyun }
1318*4882a593Smuzhiyun
1319*4882a593Smuzhiyun if (in_wl_tree(e, &ubi->used)) {
1320*4882a593Smuzhiyun self_check_in_wl_tree(ubi, e, &ubi->used);
1321*4882a593Smuzhiyun rb_erase(&e->u.rb, &ubi->used);
1322*4882a593Smuzhiyun } else {
1323*4882a593Smuzhiyun int err;
1324*4882a593Smuzhiyun
1325*4882a593Smuzhiyun err = prot_queue_del(ubi, e->pnum);
1326*4882a593Smuzhiyun if (err) {
1327*4882a593Smuzhiyun ubi_err(ubi, "PEB %d not found", pnum);
1328*4882a593Smuzhiyun ubi_ro_mode(ubi);
1329*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
1330*4882a593Smuzhiyun return err;
1331*4882a593Smuzhiyun }
1332*4882a593Smuzhiyun }
1333*4882a593Smuzhiyun
1334*4882a593Smuzhiyun wl_tree_add(e, &ubi->scrub);
1335*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
1336*4882a593Smuzhiyun
1337*4882a593Smuzhiyun /*
1338*4882a593Smuzhiyun * Technically scrubbing is the same as wear-leveling, so it is done
1339*4882a593Smuzhiyun * by the WL worker.
1340*4882a593Smuzhiyun */
1341*4882a593Smuzhiyun return ensure_wear_leveling(ubi, 0);
1342*4882a593Smuzhiyun }
1343*4882a593Smuzhiyun
1344*4882a593Smuzhiyun /**
1345*4882a593Smuzhiyun * ubi_wl_flush - flush all pending works.
1346*4882a593Smuzhiyun * @ubi: UBI device description object
1347*4882a593Smuzhiyun * @vol_id: the volume id to flush for
1348*4882a593Smuzhiyun * @lnum: the logical eraseblock number to flush for
1349*4882a593Smuzhiyun *
1350*4882a593Smuzhiyun * This function executes all pending works for a particular volume id /
1351*4882a593Smuzhiyun * logical eraseblock number pair. If either value is set to %UBI_ALL, then it
1352*4882a593Smuzhiyun * acts as a wildcard for all of the corresponding volume numbers or logical
1353*4882a593Smuzhiyun * eraseblock numbers. It returns zero in case of success and a negative error
1354*4882a593Smuzhiyun * code in case of failure.
1355*4882a593Smuzhiyun */
1356*4882a593Smuzhiyun int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum)
1357*4882a593Smuzhiyun {
1358*4882a593Smuzhiyun int err = 0;
1359*4882a593Smuzhiyun int found = 1;
1360*4882a593Smuzhiyun
1361*4882a593Smuzhiyun /*
1362*4882a593Smuzhiyun * Erase while the pending works queue is not empty, but not more than
1363*4882a593Smuzhiyun * the number of currently pending works.
1364*4882a593Smuzhiyun */
1365*4882a593Smuzhiyun dbg_wl("flush pending work for LEB %d:%d (%d pending works)",
1366*4882a593Smuzhiyun vol_id, lnum, ubi->works_count);
1367*4882a593Smuzhiyun
1368*4882a593Smuzhiyun while (found) {
1369*4882a593Smuzhiyun struct ubi_work *wrk, *tmp;
1370*4882a593Smuzhiyun found = 0;
1371*4882a593Smuzhiyun
1372*4882a593Smuzhiyun down_read(&ubi->work_sem);
1373*4882a593Smuzhiyun spin_lock(&ubi->wl_lock);
1374*4882a593Smuzhiyun list_for_each_entry_safe(wrk, tmp, &ubi->works, list) {
1375*4882a593Smuzhiyun if ((vol_id == UBI_ALL || wrk->vol_id == vol_id) &&
1376*4882a593Smuzhiyun (lnum == UBI_ALL || wrk->lnum == lnum)) {
1377*4882a593Smuzhiyun list_del(&wrk->list);
1378*4882a593Smuzhiyun ubi->works_count -= 1;
1379*4882a593Smuzhiyun ubi_assert(ubi->works_count >= 0);
1380*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
1381*4882a593Smuzhiyun
1382*4882a593Smuzhiyun err = wrk->func(ubi, wrk, 0);
1383*4882a593Smuzhiyun if (err) {
1384*4882a593Smuzhiyun up_read(&ubi->work_sem);
1385*4882a593Smuzhiyun return err;
1386*4882a593Smuzhiyun }
1387*4882a593Smuzhiyun
1388*4882a593Smuzhiyun spin_lock(&ubi->wl_lock);
1389*4882a593Smuzhiyun found = 1;
1390*4882a593Smuzhiyun break;
1391*4882a593Smuzhiyun }
1392*4882a593Smuzhiyun }
1393*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
1394*4882a593Smuzhiyun up_read(&ubi->work_sem);
1395*4882a593Smuzhiyun }
1396*4882a593Smuzhiyun
1397*4882a593Smuzhiyun /*
1398*4882a593Smuzhiyun * Make sure all the works which have been done in parallel are
1399*4882a593Smuzhiyun * finished.
1400*4882a593Smuzhiyun */
1401*4882a593Smuzhiyun down_write(&ubi->work_sem);
1402*4882a593Smuzhiyun up_write(&ubi->work_sem);
1403*4882a593Smuzhiyun
1404*4882a593Smuzhiyun return err;
1405*4882a593Smuzhiyun }
1406*4882a593Smuzhiyun
1407*4882a593Smuzhiyun /**
1408*4882a593Smuzhiyun * tree_destroy - destroy an RB-tree.
1409*4882a593Smuzhiyun * @ubi: UBI device description object
1410*4882a593Smuzhiyun * @root: the root of the tree to destroy
1411*4882a593Smuzhiyun */
1412*4882a593Smuzhiyun static void tree_destroy(struct ubi_device *ubi, struct rb_root *root)
1413*4882a593Smuzhiyun {
1414*4882a593Smuzhiyun struct rb_node *rb;
1415*4882a593Smuzhiyun struct ubi_wl_entry *e;
1416*4882a593Smuzhiyun
1417*4882a593Smuzhiyun rb = root->rb_node;
1418*4882a593Smuzhiyun while (rb) {
1419*4882a593Smuzhiyun if (rb->rb_left)
1420*4882a593Smuzhiyun rb = rb->rb_left;
1421*4882a593Smuzhiyun else if (rb->rb_right)
1422*4882a593Smuzhiyun rb = rb->rb_right;
1423*4882a593Smuzhiyun else {
1424*4882a593Smuzhiyun e = rb_entry(rb, struct ubi_wl_entry, u.rb);
1425*4882a593Smuzhiyun
1426*4882a593Smuzhiyun rb = rb_parent(rb);
1427*4882a593Smuzhiyun if (rb) {
1428*4882a593Smuzhiyun if (rb->rb_left == &e->u.rb)
1429*4882a593Smuzhiyun rb->rb_left = NULL;
1430*4882a593Smuzhiyun else
1431*4882a593Smuzhiyun rb->rb_right = NULL;
1432*4882a593Smuzhiyun }
1433*4882a593Smuzhiyun
1434*4882a593Smuzhiyun wl_entry_destroy(ubi, e);
1435*4882a593Smuzhiyun }
1436*4882a593Smuzhiyun }
1437*4882a593Smuzhiyun }
1438*4882a593Smuzhiyun
1439*4882a593Smuzhiyun /**
1440*4882a593Smuzhiyun * ubi_thread - UBI background thread.
1441*4882a593Smuzhiyun * @u: the UBI device description object pointer
1442*4882a593Smuzhiyun */
1443*4882a593Smuzhiyun int ubi_thread(void *u)
1444*4882a593Smuzhiyun {
1445*4882a593Smuzhiyun int failures = 0;
1446*4882a593Smuzhiyun struct ubi_device *ubi = u;
1447*4882a593Smuzhiyun
1448*4882a593Smuzhiyun ubi_msg(ubi, "background thread \"%s\" started, PID %d",
1449*4882a593Smuzhiyun ubi->bgt_name, task_pid_nr(current));
1450*4882a593Smuzhiyun
1451*4882a593Smuzhiyun set_freezable();
1452*4882a593Smuzhiyun for (;;) {
1453*4882a593Smuzhiyun int err;
1454*4882a593Smuzhiyun
1455*4882a593Smuzhiyun if (kthread_should_stop())
1456*4882a593Smuzhiyun break;
1457*4882a593Smuzhiyun
1458*4882a593Smuzhiyun if (try_to_freeze())
1459*4882a593Smuzhiyun continue;
1460*4882a593Smuzhiyun
1461*4882a593Smuzhiyun spin_lock(&ubi->wl_lock);
1462*4882a593Smuzhiyun if (list_empty(&ubi->works) || ubi->ro_mode ||
1463*4882a593Smuzhiyun !ubi->thread_enabled || ubi_dbg_is_bgt_disabled(ubi)) {
1464*4882a593Smuzhiyun set_current_state(TASK_INTERRUPTIBLE);
1465*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
1466*4882a593Smuzhiyun schedule();
1467*4882a593Smuzhiyun continue;
1468*4882a593Smuzhiyun }
1469*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
1470*4882a593Smuzhiyun
1471*4882a593Smuzhiyun err = do_work(ubi);
1472*4882a593Smuzhiyun if (err) {
1473*4882a593Smuzhiyun ubi_err(ubi, "%s: work failed with error code %d",
1474*4882a593Smuzhiyun ubi->bgt_name, err);
1475*4882a593Smuzhiyun if (failures++ > WL_MAX_FAILURES) {
1476*4882a593Smuzhiyun /*
1477*4882a593Smuzhiyun * Too many failures, disable the thread and
1478*4882a593Smuzhiyun * switch to read-only mode.
1479*4882a593Smuzhiyun */
1480*4882a593Smuzhiyun ubi_msg(ubi, "%s: %d consecutive failures",
1481*4882a593Smuzhiyun ubi->bgt_name, WL_MAX_FAILURES);
1482*4882a593Smuzhiyun ubi_ro_mode(ubi);
1483*4882a593Smuzhiyun ubi->thread_enabled = 0;
1484*4882a593Smuzhiyun continue;
1485*4882a593Smuzhiyun }
1486*4882a593Smuzhiyun } else
1487*4882a593Smuzhiyun failures = 0;
1488*4882a593Smuzhiyun
1489*4882a593Smuzhiyun cond_resched();
1490*4882a593Smuzhiyun }
1491*4882a593Smuzhiyun
1492*4882a593Smuzhiyun dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
1493*4882a593Smuzhiyun return 0;
1494*4882a593Smuzhiyun }
1495*4882a593Smuzhiyun
1496*4882a593Smuzhiyun /**
1497*4882a593Smuzhiyun * shutdown_work - shutdown all pending works.
1498*4882a593Smuzhiyun * @ubi: UBI device description object
1499*4882a593Smuzhiyun */
1500*4882a593Smuzhiyun static void shutdown_work(struct ubi_device *ubi)
1501*4882a593Smuzhiyun {
1502*4882a593Smuzhiyun #ifdef CONFIG_MTD_UBI_FASTMAP
1503*4882a593Smuzhiyun #ifndef __UBOOT__
1504*4882a593Smuzhiyun flush_work(&ubi->fm_work);
1505*4882a593Smuzhiyun #else
1506*4882a593Smuzhiyun /* in U-Boot, we have all work done */
1507*4882a593Smuzhiyun #endif
1508*4882a593Smuzhiyun #endif
1509*4882a593Smuzhiyun while (!list_empty(&ubi->works)) {
1510*4882a593Smuzhiyun struct ubi_work *wrk;
1511*4882a593Smuzhiyun
1512*4882a593Smuzhiyun wrk = list_entry(ubi->works.next, struct ubi_work, list);
1513*4882a593Smuzhiyun list_del(&wrk->list);
1514*4882a593Smuzhiyun wrk->func(ubi, wrk, 1);
1515*4882a593Smuzhiyun ubi->works_count -= 1;
1516*4882a593Smuzhiyun ubi_assert(ubi->works_count >= 0);
1517*4882a593Smuzhiyun }
1518*4882a593Smuzhiyun }
1519*4882a593Smuzhiyun
1520*4882a593Smuzhiyun /**
1521*4882a593Smuzhiyun * ubi_wl_init - initialize the WL sub-system using attaching information.
1522*4882a593Smuzhiyun * @ubi: UBI device description object
1523*4882a593Smuzhiyun * @ai: attaching information
1524*4882a593Smuzhiyun *
1525*4882a593Smuzhiyun * This function returns zero in case of success, and a negative error code in
1526*4882a593Smuzhiyun * case of failure.
1527*4882a593Smuzhiyun */
1528*4882a593Smuzhiyun int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
1529*4882a593Smuzhiyun {
1530*4882a593Smuzhiyun int err, i, reserved_pebs, found_pebs = 0;
1531*4882a593Smuzhiyun struct rb_node *rb1, *rb2;
1532*4882a593Smuzhiyun struct ubi_ainf_volume *av;
1533*4882a593Smuzhiyun struct ubi_ainf_peb *aeb, *tmp;
1534*4882a593Smuzhiyun struct ubi_wl_entry *e;
1535*4882a593Smuzhiyun
1536*4882a593Smuzhiyun ubi->used = ubi->erroneous = ubi->free = ubi->scrub = RB_ROOT;
1537*4882a593Smuzhiyun spin_lock_init(&ubi->wl_lock);
1538*4882a593Smuzhiyun mutex_init(&ubi->move_mutex);
1539*4882a593Smuzhiyun init_rwsem(&ubi->work_sem);
1540*4882a593Smuzhiyun ubi->max_ec = ai->max_ec;
1541*4882a593Smuzhiyun INIT_LIST_HEAD(&ubi->works);
1542*4882a593Smuzhiyun
1543*4882a593Smuzhiyun sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
1544*4882a593Smuzhiyun
1545*4882a593Smuzhiyun err = -ENOMEM;
1546*4882a593Smuzhiyun ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL);
1547*4882a593Smuzhiyun if (!ubi->lookuptbl)
1548*4882a593Smuzhiyun return err;
1549*4882a593Smuzhiyun
1550*4882a593Smuzhiyun for (i = 0; i < UBI_PROT_QUEUE_LEN; i++)
1551*4882a593Smuzhiyun INIT_LIST_HEAD(&ubi->pq[i]);
1552*4882a593Smuzhiyun ubi->pq_head = 0;
1553*4882a593Smuzhiyun
1554*4882a593Smuzhiyun ubi->free_count = 0;
1555*4882a593Smuzhiyun list_for_each_entry_safe(aeb, tmp, &ai->erase, u.list) {
1556*4882a593Smuzhiyun cond_resched();
1557*4882a593Smuzhiyun
1558*4882a593Smuzhiyun e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1559*4882a593Smuzhiyun if (!e)
1560*4882a593Smuzhiyun goto out_free;
1561*4882a593Smuzhiyun
1562*4882a593Smuzhiyun e->pnum = aeb->pnum;
1563*4882a593Smuzhiyun e->ec = aeb->ec;
1564*4882a593Smuzhiyun ubi->lookuptbl[e->pnum] = e;
1565*4882a593Smuzhiyun if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0)) {
1566*4882a593Smuzhiyun wl_entry_destroy(ubi, e);
1567*4882a593Smuzhiyun goto out_free;
1568*4882a593Smuzhiyun }
1569*4882a593Smuzhiyun
1570*4882a593Smuzhiyun found_pebs++;
1571*4882a593Smuzhiyun }
1572*4882a593Smuzhiyun
1573*4882a593Smuzhiyun list_for_each_entry(aeb, &ai->free, u.list) {
1574*4882a593Smuzhiyun cond_resched();
1575*4882a593Smuzhiyun
1576*4882a593Smuzhiyun e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1577*4882a593Smuzhiyun if (!e)
1578*4882a593Smuzhiyun goto out_free;
1579*4882a593Smuzhiyun
1580*4882a593Smuzhiyun e->pnum = aeb->pnum;
1581*4882a593Smuzhiyun e->ec = aeb->ec;
1582*4882a593Smuzhiyun ubi_assert(e->ec >= 0);
1583*4882a593Smuzhiyun
1584*4882a593Smuzhiyun wl_tree_add(e, &ubi->free);
1585*4882a593Smuzhiyun ubi->free_count++;
1586*4882a593Smuzhiyun
1587*4882a593Smuzhiyun ubi->lookuptbl[e->pnum] = e;
1588*4882a593Smuzhiyun
1589*4882a593Smuzhiyun found_pebs++;
1590*4882a593Smuzhiyun }
1591*4882a593Smuzhiyun
1592*4882a593Smuzhiyun ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
1593*4882a593Smuzhiyun ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
1594*4882a593Smuzhiyun cond_resched();
1595*4882a593Smuzhiyun
1596*4882a593Smuzhiyun e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1597*4882a593Smuzhiyun if (!e)
1598*4882a593Smuzhiyun goto out_free;
1599*4882a593Smuzhiyun
1600*4882a593Smuzhiyun e->pnum = aeb->pnum;
1601*4882a593Smuzhiyun e->ec = aeb->ec;
1602*4882a593Smuzhiyun ubi->lookuptbl[e->pnum] = e;
1603*4882a593Smuzhiyun
1604*4882a593Smuzhiyun if (!aeb->scrub) {
1605*4882a593Smuzhiyun dbg_wl("add PEB %d EC %d to the used tree",
1606*4882a593Smuzhiyun e->pnum, e->ec);
1607*4882a593Smuzhiyun wl_tree_add(e, &ubi->used);
1608*4882a593Smuzhiyun } else {
1609*4882a593Smuzhiyun dbg_wl("add PEB %d EC %d to the scrub tree",
1610*4882a593Smuzhiyun e->pnum, e->ec);
1611*4882a593Smuzhiyun wl_tree_add(e, &ubi->scrub);
1612*4882a593Smuzhiyun }
1613*4882a593Smuzhiyun
1614*4882a593Smuzhiyun found_pebs++;
1615*4882a593Smuzhiyun }
1616*4882a593Smuzhiyun }
1617*4882a593Smuzhiyun
1618*4882a593Smuzhiyun dbg_wl("found %i PEBs", found_pebs);
1619*4882a593Smuzhiyun
1620*4882a593Smuzhiyun if (ubi->fm) {
1621*4882a593Smuzhiyun ubi_assert(ubi->good_peb_count ==
1622*4882a593Smuzhiyun found_pebs + ubi->fm->used_blocks);
1623*4882a593Smuzhiyun
1624*4882a593Smuzhiyun for (i = 0; i < ubi->fm->used_blocks; i++) {
1625*4882a593Smuzhiyun e = ubi->fm->e[i];
1626*4882a593Smuzhiyun ubi->lookuptbl[e->pnum] = e;
1627*4882a593Smuzhiyun }
1628*4882a593Smuzhiyun }
1629*4882a593Smuzhiyun else
1630*4882a593Smuzhiyun ubi_assert(ubi->good_peb_count == found_pebs);
1631*4882a593Smuzhiyun
1632*4882a593Smuzhiyun reserved_pebs = WL_RESERVED_PEBS;
1633*4882a593Smuzhiyun ubi_fastmap_init(ubi, &reserved_pebs);
1634*4882a593Smuzhiyun
1635*4882a593Smuzhiyun if (ubi->avail_pebs < reserved_pebs) {
1636*4882a593Smuzhiyun ubi_err(ubi, "no enough physical eraseblocks (%d, need %d)",
1637*4882a593Smuzhiyun ubi->avail_pebs, reserved_pebs);
1638*4882a593Smuzhiyun if (ubi->corr_peb_count)
1639*4882a593Smuzhiyun ubi_err(ubi, "%d PEBs are corrupted and not used",
1640*4882a593Smuzhiyun ubi->corr_peb_count);
1641*4882a593Smuzhiyun goto out_free;
1642*4882a593Smuzhiyun }
1643*4882a593Smuzhiyun ubi->avail_pebs -= reserved_pebs;
1644*4882a593Smuzhiyun ubi->rsvd_pebs += reserved_pebs;
1645*4882a593Smuzhiyun
1646*4882a593Smuzhiyun /* Schedule wear-leveling if needed */
1647*4882a593Smuzhiyun err = ensure_wear_leveling(ubi, 0);
1648*4882a593Smuzhiyun if (err)
1649*4882a593Smuzhiyun goto out_free;
1650*4882a593Smuzhiyun
1651*4882a593Smuzhiyun return 0;
1652*4882a593Smuzhiyun
1653*4882a593Smuzhiyun out_free:
1654*4882a593Smuzhiyun shutdown_work(ubi);
1655*4882a593Smuzhiyun tree_destroy(ubi, &ubi->used);
1656*4882a593Smuzhiyun tree_destroy(ubi, &ubi->free);
1657*4882a593Smuzhiyun tree_destroy(ubi, &ubi->scrub);
1658*4882a593Smuzhiyun kfree(ubi->lookuptbl);
1659*4882a593Smuzhiyun return err;
1660*4882a593Smuzhiyun }
1661*4882a593Smuzhiyun
1662*4882a593Smuzhiyun /**
1663*4882a593Smuzhiyun * protection_queue_destroy - destroy the protection queue.
1664*4882a593Smuzhiyun * @ubi: UBI device description object
1665*4882a593Smuzhiyun */
1666*4882a593Smuzhiyun static void protection_queue_destroy(struct ubi_device *ubi)
1667*4882a593Smuzhiyun {
1668*4882a593Smuzhiyun int i;
1669*4882a593Smuzhiyun struct ubi_wl_entry *e, *tmp;
1670*4882a593Smuzhiyun
1671*4882a593Smuzhiyun for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) {
1672*4882a593Smuzhiyun list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) {
1673*4882a593Smuzhiyun list_del(&e->u.list);
1674*4882a593Smuzhiyun wl_entry_destroy(ubi, e);
1675*4882a593Smuzhiyun }
1676*4882a593Smuzhiyun }
1677*4882a593Smuzhiyun }
1678*4882a593Smuzhiyun
1679*4882a593Smuzhiyun /**
1680*4882a593Smuzhiyun * ubi_wl_close - close the wear-leveling sub-system.
1681*4882a593Smuzhiyun * @ubi: UBI device description object
1682*4882a593Smuzhiyun */
1683*4882a593Smuzhiyun void ubi_wl_close(struct ubi_device *ubi)
1684*4882a593Smuzhiyun {
1685*4882a593Smuzhiyun dbg_wl("close the WL sub-system");
1686*4882a593Smuzhiyun ubi_fastmap_close(ubi);
1687*4882a593Smuzhiyun shutdown_work(ubi);
1688*4882a593Smuzhiyun protection_queue_destroy(ubi);
1689*4882a593Smuzhiyun tree_destroy(ubi, &ubi->used);
1690*4882a593Smuzhiyun tree_destroy(ubi, &ubi->erroneous);
1691*4882a593Smuzhiyun tree_destroy(ubi, &ubi->free);
1692*4882a593Smuzhiyun tree_destroy(ubi, &ubi->scrub);
1693*4882a593Smuzhiyun kfree(ubi->lookuptbl);
1694*4882a593Smuzhiyun }
1695*4882a593Smuzhiyun
1696*4882a593Smuzhiyun /**
1697*4882a593Smuzhiyun * self_check_ec - make sure that the erase counter of a PEB is correct.
1698*4882a593Smuzhiyun * @ubi: UBI device description object
1699*4882a593Smuzhiyun * @pnum: the physical eraseblock number to check
1700*4882a593Smuzhiyun * @ec: the erase counter to check
1701*4882a593Smuzhiyun *
1702*4882a593Smuzhiyun * This function returns zero if the erase counter of physical eraseblock @pnum
1703*4882a593Smuzhiyun * is equivalent to @ec, and a negative error code if not or if an error
1704*4882a593Smuzhiyun * occurred.
1705*4882a593Smuzhiyun */
1706*4882a593Smuzhiyun static int self_check_ec(struct ubi_device *ubi, int pnum, int ec)
1707*4882a593Smuzhiyun {
1708*4882a593Smuzhiyun int err;
1709*4882a593Smuzhiyun long long read_ec;
1710*4882a593Smuzhiyun struct ubi_ec_hdr *ec_hdr;
1711*4882a593Smuzhiyun
1712*4882a593Smuzhiyun if (!ubi_dbg_chk_gen(ubi))
1713*4882a593Smuzhiyun return 0;
1714*4882a593Smuzhiyun
1715*4882a593Smuzhiyun ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
1716*4882a593Smuzhiyun if (!ec_hdr)
1717*4882a593Smuzhiyun return -ENOMEM;
1718*4882a593Smuzhiyun
1719*4882a593Smuzhiyun err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
1720*4882a593Smuzhiyun if (err && err != UBI_IO_BITFLIPS) {
1721*4882a593Smuzhiyun /* The header does not have to exist */
1722*4882a593Smuzhiyun err = 0;
1723*4882a593Smuzhiyun goto out_free;
1724*4882a593Smuzhiyun }
1725*4882a593Smuzhiyun
1726*4882a593Smuzhiyun read_ec = be64_to_cpu(ec_hdr->ec);
1727*4882a593Smuzhiyun if (ec != read_ec && read_ec - ec > 1) {
1728*4882a593Smuzhiyun ubi_err(ubi, "self-check failed for PEB %d", pnum);
1729*4882a593Smuzhiyun ubi_err(ubi, "read EC is %lld, should be %d", read_ec, ec);
1730*4882a593Smuzhiyun dump_stack();
1731*4882a593Smuzhiyun err = 1;
1732*4882a593Smuzhiyun } else
1733*4882a593Smuzhiyun err = 0;
1734*4882a593Smuzhiyun
1735*4882a593Smuzhiyun out_free:
1736*4882a593Smuzhiyun kfree(ec_hdr);
1737*4882a593Smuzhiyun return err;
1738*4882a593Smuzhiyun }
1739*4882a593Smuzhiyun
1740*4882a593Smuzhiyun /**
1741*4882a593Smuzhiyun * self_check_in_wl_tree - check that wear-leveling entry is in WL RB-tree.
1742*4882a593Smuzhiyun * @ubi: UBI device description object
1743*4882a593Smuzhiyun * @e: the wear-leveling entry to check
1744*4882a593Smuzhiyun * @root: the root of the tree
1745*4882a593Smuzhiyun *
1746*4882a593Smuzhiyun * This function returns zero if @e is in the @root RB-tree and %-EINVAL if it
1747*4882a593Smuzhiyun * is not.
1748*4882a593Smuzhiyun */
1749*4882a593Smuzhiyun static int self_check_in_wl_tree(const struct ubi_device *ubi,
1750*4882a593Smuzhiyun struct ubi_wl_entry *e, struct rb_root *root)
1751*4882a593Smuzhiyun {
1752*4882a593Smuzhiyun if (!ubi_dbg_chk_gen(ubi))
1753*4882a593Smuzhiyun return 0;
1754*4882a593Smuzhiyun
1755*4882a593Smuzhiyun if (in_wl_tree(e, root))
1756*4882a593Smuzhiyun return 0;
1757*4882a593Smuzhiyun
1758*4882a593Smuzhiyun ubi_err(ubi, "self-check failed for PEB %d, EC %d, RB-tree %p ",
1759*4882a593Smuzhiyun e->pnum, e->ec, root);
1760*4882a593Smuzhiyun dump_stack();
1761*4882a593Smuzhiyun return -EINVAL;
1762*4882a593Smuzhiyun }
1763*4882a593Smuzhiyun
1764*4882a593Smuzhiyun /**
1765*4882a593Smuzhiyun * self_check_in_pq - check if wear-leveling entry is in the protection
1766*4882a593Smuzhiyun * queue.
1767*4882a593Smuzhiyun * @ubi: UBI device description object
1768*4882a593Smuzhiyun * @e: the wear-leveling entry to check
1769*4882a593Smuzhiyun *
1770*4882a593Smuzhiyun * This function returns zero if @e is in @ubi->pq and %-EINVAL if it is not.
1771*4882a593Smuzhiyun */
1772*4882a593Smuzhiyun static int self_check_in_pq(const struct ubi_device *ubi,
1773*4882a593Smuzhiyun struct ubi_wl_entry *e)
1774*4882a593Smuzhiyun {
1775*4882a593Smuzhiyun struct ubi_wl_entry *p;
1776*4882a593Smuzhiyun int i;
1777*4882a593Smuzhiyun
1778*4882a593Smuzhiyun if (!ubi_dbg_chk_gen(ubi))
1779*4882a593Smuzhiyun return 0;
1780*4882a593Smuzhiyun
1781*4882a593Smuzhiyun for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i)
1782*4882a593Smuzhiyun list_for_each_entry(p, &ubi->pq[i], u.list)
1783*4882a593Smuzhiyun if (p == e)
1784*4882a593Smuzhiyun return 0;
1785*4882a593Smuzhiyun
1786*4882a593Smuzhiyun ubi_err(ubi, "self-check failed for PEB %d, EC %d, Protect queue",
1787*4882a593Smuzhiyun e->pnum, e->ec);
1788*4882a593Smuzhiyun dump_stack();
1789*4882a593Smuzhiyun return -EINVAL;
1790*4882a593Smuzhiyun }
1791*4882a593Smuzhiyun #ifndef CONFIG_MTD_UBI_FASTMAP
1792*4882a593Smuzhiyun static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
1793*4882a593Smuzhiyun {
1794*4882a593Smuzhiyun struct ubi_wl_entry *e;
1795*4882a593Smuzhiyun
1796*4882a593Smuzhiyun e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
1797*4882a593Smuzhiyun self_check_in_wl_tree(ubi, e, &ubi->free);
1798*4882a593Smuzhiyun ubi->free_count--;
1799*4882a593Smuzhiyun ubi_assert(ubi->free_count >= 0);
1800*4882a593Smuzhiyun rb_erase(&e->u.rb, &ubi->free);
1801*4882a593Smuzhiyun
1802*4882a593Smuzhiyun return e;
1803*4882a593Smuzhiyun }
1804*4882a593Smuzhiyun
1805*4882a593Smuzhiyun /**
1806*4882a593Smuzhiyun * produce_free_peb - produce a free physical eraseblock.
1807*4882a593Smuzhiyun * @ubi: UBI device description object
1808*4882a593Smuzhiyun *
1809*4882a593Smuzhiyun * This function tries to make a free PEB by means of synchronous execution of
1810*4882a593Smuzhiyun * pending works. This may be needed if, for example the background thread is
1811*4882a593Smuzhiyun * disabled. Returns zero in case of success and a negative error code in case
1812*4882a593Smuzhiyun * of failure.
1813*4882a593Smuzhiyun */
1814*4882a593Smuzhiyun static int produce_free_peb(struct ubi_device *ubi)
1815*4882a593Smuzhiyun {
1816*4882a593Smuzhiyun int err;
1817*4882a593Smuzhiyun
1818*4882a593Smuzhiyun while (!ubi->free.rb_node && ubi->works_count) {
1819*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
1820*4882a593Smuzhiyun
1821*4882a593Smuzhiyun dbg_wl("do one work synchronously");
1822*4882a593Smuzhiyun err = do_work(ubi);
1823*4882a593Smuzhiyun
1824*4882a593Smuzhiyun spin_lock(&ubi->wl_lock);
1825*4882a593Smuzhiyun if (err)
1826*4882a593Smuzhiyun return err;
1827*4882a593Smuzhiyun }
1828*4882a593Smuzhiyun
1829*4882a593Smuzhiyun return 0;
1830*4882a593Smuzhiyun }
1831*4882a593Smuzhiyun
1832*4882a593Smuzhiyun /**
1833*4882a593Smuzhiyun * ubi_wl_get_peb - get a physical eraseblock.
1834*4882a593Smuzhiyun * @ubi: UBI device description object
1835*4882a593Smuzhiyun *
1836*4882a593Smuzhiyun * This function returns a physical eraseblock in case of success and a
1837*4882a593Smuzhiyun * negative error code in case of failure.
1838*4882a593Smuzhiyun * Returns with ubi->fm_eba_sem held in read mode!
1839*4882a593Smuzhiyun */
1840*4882a593Smuzhiyun int ubi_wl_get_peb(struct ubi_device *ubi)
1841*4882a593Smuzhiyun {
1842*4882a593Smuzhiyun int err;
1843*4882a593Smuzhiyun struct ubi_wl_entry *e;
1844*4882a593Smuzhiyun
1845*4882a593Smuzhiyun retry:
1846*4882a593Smuzhiyun down_read(&ubi->fm_eba_sem);
1847*4882a593Smuzhiyun spin_lock(&ubi->wl_lock);
1848*4882a593Smuzhiyun if (!ubi->free.rb_node) {
1849*4882a593Smuzhiyun if (ubi->works_count == 0) {
1850*4882a593Smuzhiyun ubi_err(ubi, "no free eraseblocks");
1851*4882a593Smuzhiyun ubi_assert(list_empty(&ubi->works));
1852*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
1853*4882a593Smuzhiyun return -ENOSPC;
1854*4882a593Smuzhiyun }
1855*4882a593Smuzhiyun
1856*4882a593Smuzhiyun err = produce_free_peb(ubi);
1857*4882a593Smuzhiyun if (err < 0) {
1858*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
1859*4882a593Smuzhiyun return err;
1860*4882a593Smuzhiyun }
1861*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
1862*4882a593Smuzhiyun up_read(&ubi->fm_eba_sem);
1863*4882a593Smuzhiyun goto retry;
1864*4882a593Smuzhiyun
1865*4882a593Smuzhiyun }
1866*4882a593Smuzhiyun e = wl_get_wle(ubi);
1867*4882a593Smuzhiyun prot_queue_add(ubi, e);
1868*4882a593Smuzhiyun spin_unlock(&ubi->wl_lock);
1869*4882a593Smuzhiyun
1870*4882a593Smuzhiyun err = ubi_self_check_all_ff(ubi, e->pnum, ubi->vid_hdr_aloffset,
1871*4882a593Smuzhiyun ubi->peb_size - ubi->vid_hdr_aloffset);
1872*4882a593Smuzhiyun if (err) {
1873*4882a593Smuzhiyun ubi_err(ubi, "new PEB %d does not contain all 0xFF bytes", e->pnum);
1874*4882a593Smuzhiyun return err;
1875*4882a593Smuzhiyun }
1876*4882a593Smuzhiyun
1877*4882a593Smuzhiyun return e->pnum;
1878*4882a593Smuzhiyun }
1879*4882a593Smuzhiyun #else
1880*4882a593Smuzhiyun #include "fastmap-wl.c"
1881*4882a593Smuzhiyun #endif
1882