xref: /rk3399_rockchip-uboot/drivers/mtd/ubi/wl.c (revision c91a719daa331b5856109313371e4ece5ec06d96)
1*c91a719dSKyungmin Park /*
2*c91a719dSKyungmin Park  * Copyright (c) International Business Machines Corp., 2006
3*c91a719dSKyungmin Park  *
4*c91a719dSKyungmin Park  * This program is free software; you can redistribute it and/or modify
5*c91a719dSKyungmin Park  * it under the terms of the GNU General Public License as published by
6*c91a719dSKyungmin Park  * the Free Software Foundation; either version 2 of the License, or
7*c91a719dSKyungmin Park  * (at your option) any later version.
8*c91a719dSKyungmin Park  *
9*c91a719dSKyungmin Park  * This program is distributed in the hope that it will be useful,
10*c91a719dSKyungmin Park  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11*c91a719dSKyungmin Park  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
12*c91a719dSKyungmin Park  * the GNU General Public License for more details.
13*c91a719dSKyungmin Park  *
14*c91a719dSKyungmin Park  * You should have received a copy of the GNU General Public License
15*c91a719dSKyungmin Park  * along with this program; if not, write to the Free Software
16*c91a719dSKyungmin Park  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17*c91a719dSKyungmin Park  *
18*c91a719dSKyungmin Park  * Authors: Artem Bityutskiy (Битюцкий Артём), Thomas Gleixner
19*c91a719dSKyungmin Park  */
20*c91a719dSKyungmin Park 
21*c91a719dSKyungmin Park /*
22*c91a719dSKyungmin Park  * UBI wear-leveling unit.
23*c91a719dSKyungmin Park  *
24*c91a719dSKyungmin Park  * This unit is responsible for wear-leveling. It works in terms of physical
25*c91a719dSKyungmin Park  * eraseblocks and erase counters and knows nothing about logical eraseblocks,
26*c91a719dSKyungmin Park  * volumes, etc. From this unit's perspective all physical eraseblocks are of
27*c91a719dSKyungmin Park  * two types - used and free. Used physical eraseblocks are those that were
28*c91a719dSKyungmin Park  * "get" by the 'ubi_wl_get_peb()' function, and free physical eraseblocks are
29*c91a719dSKyungmin Park  * those that were put by the 'ubi_wl_put_peb()' function.
30*c91a719dSKyungmin Park  *
31*c91a719dSKyungmin Park  * Physical eraseblocks returned by 'ubi_wl_get_peb()' have only erase counter
32*c91a719dSKyungmin Park  * header. The rest of the physical eraseblock contains only 0xFF bytes.
33*c91a719dSKyungmin Park  *
34*c91a719dSKyungmin Park  * When physical eraseblocks are returned to the WL unit by means of the
35*c91a719dSKyungmin Park  * 'ubi_wl_put_peb()' function, they are scheduled for erasure. The erasure is
36*c91a719dSKyungmin Park  * done asynchronously in context of the per-UBI device background thread,
37*c91a719dSKyungmin Park  * which is also managed by the WL unit.
38*c91a719dSKyungmin Park  *
39*c91a719dSKyungmin Park  * The wear-leveling is ensured by means of moving the contents of used
40*c91a719dSKyungmin Park  * physical eraseblocks with low erase counter to free physical eraseblocks
41*c91a719dSKyungmin Park  * with high erase counter.
42*c91a719dSKyungmin Park  *
43*c91a719dSKyungmin Park  * The 'ubi_wl_get_peb()' function accepts data type hints which help to pick
44*c91a719dSKyungmin Park  * an "optimal" physical eraseblock. For example, when it is known that the
45*c91a719dSKyungmin Park  * physical eraseblock will be "put" soon because it contains short-term data,
46*c91a719dSKyungmin Park  * the WL unit may pick a free physical eraseblock with low erase counter, and
47*c91a719dSKyungmin Park  * so forth.
48*c91a719dSKyungmin Park  *
49*c91a719dSKyungmin Park  * If the WL unit fails to erase a physical eraseblock, it marks it as bad.
50*c91a719dSKyungmin Park  *
51*c91a719dSKyungmin Park  * This unit is also responsible for scrubbing. If a bit-flip is detected in a
52*c91a719dSKyungmin Park  * physical eraseblock, it has to be moved. Technically this is the same as
53*c91a719dSKyungmin Park  * moving it for wear-leveling reasons.
54*c91a719dSKyungmin Park  *
55*c91a719dSKyungmin Park  * As it was said, for the UBI unit all physical eraseblocks are either "free"
56*c91a719dSKyungmin Park  * or "used". Free eraseblock are kept in the @wl->free RB-tree, while used
57*c91a719dSKyungmin Park  * eraseblocks are kept in a set of different RB-trees: @wl->used,
58*c91a719dSKyungmin Park  * @wl->prot.pnum, @wl->prot.aec, and @wl->scrub.
59*c91a719dSKyungmin Park  *
60*c91a719dSKyungmin Park  * Note, in this implementation, we keep a small in-RAM object for each physical
61*c91a719dSKyungmin Park  * eraseblock. This is surely not a scalable solution. But it appears to be good
62*c91a719dSKyungmin Park  * enough for moderately large flashes and it is simple. In future, one may
63*c91a719dSKyungmin Park  * re-work this unit and make it more scalable.
64*c91a719dSKyungmin Park  *
65*c91a719dSKyungmin Park  * At the moment this unit does not utilize the sequence number, which was
66*c91a719dSKyungmin Park  * introduced relatively recently. But it would be wise to do this because the
67*c91a719dSKyungmin Park  * sequence number of a logical eraseblock characterizes how old is it. For
68*c91a719dSKyungmin Park  * example, when we move a PEB with low erase counter, and we need to pick the
69*c91a719dSKyungmin Park  * target PEB, we pick a PEB with the highest EC if our PEB is "old" and we
70*c91a719dSKyungmin Park  * pick target PEB with an average EC if our PEB is not very "old". This is a
71*c91a719dSKyungmin Park  * room for future re-works of the WL unit.
72*c91a719dSKyungmin Park  *
73*c91a719dSKyungmin Park  * FIXME: looks too complex, should be simplified (later).
74*c91a719dSKyungmin Park  */
75*c91a719dSKyungmin Park 
76*c91a719dSKyungmin Park #ifdef UBI_LINUX
77*c91a719dSKyungmin Park #include <linux/slab.h>
78*c91a719dSKyungmin Park #include <linux/crc32.h>
79*c91a719dSKyungmin Park #include <linux/freezer.h>
80*c91a719dSKyungmin Park #include <linux/kthread.h>
81*c91a719dSKyungmin Park #endif
82*c91a719dSKyungmin Park 
83*c91a719dSKyungmin Park #include <ubi_uboot.h>
84*c91a719dSKyungmin Park #include "ubi.h"
85*c91a719dSKyungmin Park 
86*c91a719dSKyungmin Park /* Number of physical eraseblocks reserved for wear-leveling purposes */
87*c91a719dSKyungmin Park #define WL_RESERVED_PEBS 1
88*c91a719dSKyungmin Park 
89*c91a719dSKyungmin Park /*
90*c91a719dSKyungmin Park  * How many erase cycles are short term, unknown, and long term physical
91*c91a719dSKyungmin Park  * eraseblocks protected.
92*c91a719dSKyungmin Park  */
93*c91a719dSKyungmin Park #define ST_PROTECTION 16
94*c91a719dSKyungmin Park #define U_PROTECTION  10
95*c91a719dSKyungmin Park #define LT_PROTECTION 4
96*c91a719dSKyungmin Park 
97*c91a719dSKyungmin Park /*
98*c91a719dSKyungmin Park  * Maximum difference between two erase counters. If this threshold is
99*c91a719dSKyungmin Park  * exceeded, the WL unit starts moving data from used physical eraseblocks with
100*c91a719dSKyungmin Park  * low erase counter to free physical eraseblocks with high erase counter.
101*c91a719dSKyungmin Park  */
102*c91a719dSKyungmin Park #define UBI_WL_THRESHOLD CONFIG_MTD_UBI_WL_THRESHOLD
103*c91a719dSKyungmin Park 
104*c91a719dSKyungmin Park /*
105*c91a719dSKyungmin Park  * When a physical eraseblock is moved, the WL unit has to pick the target
106*c91a719dSKyungmin Park  * physical eraseblock to move to. The simplest way would be just to pick the
107*c91a719dSKyungmin Park  * one with the highest erase counter. But in certain workloads this could lead
108*c91a719dSKyungmin Park  * to an unlimited wear of one or few physical eraseblock. Indeed, imagine a
109*c91a719dSKyungmin Park  * situation when the picked physical eraseblock is constantly erased after the
110*c91a719dSKyungmin Park  * data is written to it. So, we have a constant which limits the highest erase
111*c91a719dSKyungmin Park  * counter of the free physical eraseblock to pick. Namely, the WL unit does
112*c91a719dSKyungmin Park  * not pick eraseblocks with erase counter greater then the lowest erase
113*c91a719dSKyungmin Park  * counter plus %WL_FREE_MAX_DIFF.
114*c91a719dSKyungmin Park  */
115*c91a719dSKyungmin Park #define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD)
116*c91a719dSKyungmin Park 
117*c91a719dSKyungmin Park /*
118*c91a719dSKyungmin Park  * Maximum number of consecutive background thread failures which is enough to
119*c91a719dSKyungmin Park  * switch to read-only mode.
120*c91a719dSKyungmin Park  */
121*c91a719dSKyungmin Park #define WL_MAX_FAILURES 32
122*c91a719dSKyungmin Park 
123*c91a719dSKyungmin Park /**
124*c91a719dSKyungmin Park  * struct ubi_wl_prot_entry - PEB protection entry.
125*c91a719dSKyungmin Park  * @rb_pnum: link in the @wl->prot.pnum RB-tree
126*c91a719dSKyungmin Park  * @rb_aec: link in the @wl->prot.aec RB-tree
127*c91a719dSKyungmin Park  * @abs_ec: the absolute erase counter value when the protection ends
128*c91a719dSKyungmin Park  * @e: the wear-leveling entry of the physical eraseblock under protection
129*c91a719dSKyungmin Park  *
130*c91a719dSKyungmin Park  * When the WL unit returns a physical eraseblock, the physical eraseblock is
131*c91a719dSKyungmin Park  * protected from being moved for some "time". For this reason, the physical
132*c91a719dSKyungmin Park  * eraseblock is not directly moved from the @wl->free tree to the @wl->used
133*c91a719dSKyungmin Park  * tree. There is one more tree in between where this physical eraseblock is
134*c91a719dSKyungmin Park  * temporarily stored (@wl->prot).
135*c91a719dSKyungmin Park  *
136*c91a719dSKyungmin Park  * All this protection stuff is needed because:
137*c91a719dSKyungmin Park  *  o we don't want to move physical eraseblocks just after we have given them
138*c91a719dSKyungmin Park  *    to the user; instead, we first want to let users fill them up with data;
139*c91a719dSKyungmin Park  *
140*c91a719dSKyungmin Park  *  o there is a chance that the user will put the physical eraseblock very
141*c91a719dSKyungmin Park  *    soon, so it makes sense not to move it for some time, but wait; this is
142*c91a719dSKyungmin Park  *    especially important in case of "short term" physical eraseblocks.
143*c91a719dSKyungmin Park  *
144*c91a719dSKyungmin Park  * Physical eraseblocks stay protected only for limited time. But the "time" is
145*c91a719dSKyungmin Park  * measured in erase cycles in this case. This is implemented with help of the
146*c91a719dSKyungmin Park  * absolute erase counter (@wl->abs_ec). When it reaches certain value, the
147*c91a719dSKyungmin Park  * physical eraseblocks are moved from the protection trees (@wl->prot.*) to
148*c91a719dSKyungmin Park  * the @wl->used tree.
149*c91a719dSKyungmin Park  *
150*c91a719dSKyungmin Park  * Protected physical eraseblocks are searched by physical eraseblock number
151*c91a719dSKyungmin Park  * (when they are put) and by the absolute erase counter (to check if it is
152*c91a719dSKyungmin Park  * time to move them to the @wl->used tree). So there are actually 2 RB-trees
153*c91a719dSKyungmin Park  * storing the protected physical eraseblocks: @wl->prot.pnum and
154*c91a719dSKyungmin Park  * @wl->prot.aec. They are referred to as the "protection" trees. The
155*c91a719dSKyungmin Park  * first one is indexed by the physical eraseblock number. The second one is
156*c91a719dSKyungmin Park  * indexed by the absolute erase counter. Both trees store
157*c91a719dSKyungmin Park  * &struct ubi_wl_prot_entry objects.
158*c91a719dSKyungmin Park  *
159*c91a719dSKyungmin Park  * Each physical eraseblock has 2 main states: free and used. The former state
160*c91a719dSKyungmin Park  * corresponds to the @wl->free tree. The latter state is split up on several
161*c91a719dSKyungmin Park  * sub-states:
162*c91a719dSKyungmin Park  * o the WL movement is allowed (@wl->used tree);
163*c91a719dSKyungmin Park  * o the WL movement is temporarily prohibited (@wl->prot.pnum and
164*c91a719dSKyungmin Park  * @wl->prot.aec trees);
165*c91a719dSKyungmin Park  * o scrubbing is needed (@wl->scrub tree).
166*c91a719dSKyungmin Park  *
167*c91a719dSKyungmin Park  * Depending on the sub-state, wear-leveling entries of the used physical
168*c91a719dSKyungmin Park  * eraseblocks may be kept in one of those trees.
169*c91a719dSKyungmin Park  */
170*c91a719dSKyungmin Park struct ubi_wl_prot_entry {
171*c91a719dSKyungmin Park 	struct rb_node rb_pnum;
172*c91a719dSKyungmin Park 	struct rb_node rb_aec;
173*c91a719dSKyungmin Park 	unsigned long long abs_ec;
174*c91a719dSKyungmin Park 	struct ubi_wl_entry *e;
175*c91a719dSKyungmin Park };
176*c91a719dSKyungmin Park 
177*c91a719dSKyungmin Park /**
178*c91a719dSKyungmin Park  * struct ubi_work - UBI work description data structure.
179*c91a719dSKyungmin Park  * @list: a link in the list of pending works
180*c91a719dSKyungmin Park  * @func: worker function
181*c91a719dSKyungmin Park  * @priv: private data of the worker function
182*c91a719dSKyungmin Park  *
183*c91a719dSKyungmin Park  * @e: physical eraseblock to erase
184*c91a719dSKyungmin Park  * @torture: if the physical eraseblock has to be tortured
185*c91a719dSKyungmin Park  *
186*c91a719dSKyungmin Park  * The @func pointer points to the worker function. If the @cancel argument is
187*c91a719dSKyungmin Park  * not zero, the worker has to free the resources and exit immediately. The
188*c91a719dSKyungmin Park  * worker has to return zero in case of success and a negative error code in
189*c91a719dSKyungmin Park  * case of failure.
190*c91a719dSKyungmin Park  */
191*c91a719dSKyungmin Park struct ubi_work {
192*c91a719dSKyungmin Park 	struct list_head list;
193*c91a719dSKyungmin Park 	int (*func)(struct ubi_device *ubi, struct ubi_work *wrk, int cancel);
194*c91a719dSKyungmin Park 	/* The below fields are only relevant to erasure works */
195*c91a719dSKyungmin Park 	struct ubi_wl_entry *e;
196*c91a719dSKyungmin Park 	int torture;
197*c91a719dSKyungmin Park };
198*c91a719dSKyungmin Park 
199*c91a719dSKyungmin Park #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
200*c91a719dSKyungmin Park static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec);
201*c91a719dSKyungmin Park static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
202*c91a719dSKyungmin Park 				     struct rb_root *root);
203*c91a719dSKyungmin Park #else
204*c91a719dSKyungmin Park #define paranoid_check_ec(ubi, pnum, ec) 0
205*c91a719dSKyungmin Park #define paranoid_check_in_wl_tree(e, root)
206*c91a719dSKyungmin Park #endif
207*c91a719dSKyungmin Park 
208*c91a719dSKyungmin Park /**
209*c91a719dSKyungmin Park  * wl_tree_add - add a wear-leveling entry to a WL RB-tree.
210*c91a719dSKyungmin Park  * @e: the wear-leveling entry to add
211*c91a719dSKyungmin Park  * @root: the root of the tree
212*c91a719dSKyungmin Park  *
213*c91a719dSKyungmin Park  * Note, we use (erase counter, physical eraseblock number) pairs as keys in
214*c91a719dSKyungmin Park  * the @ubi->used and @ubi->free RB-trees.
215*c91a719dSKyungmin Park  */
216*c91a719dSKyungmin Park static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
217*c91a719dSKyungmin Park {
218*c91a719dSKyungmin Park 	struct rb_node **p, *parent = NULL;
219*c91a719dSKyungmin Park 
220*c91a719dSKyungmin Park 	p = &root->rb_node;
221*c91a719dSKyungmin Park 	while (*p) {
222*c91a719dSKyungmin Park 		struct ubi_wl_entry *e1;
223*c91a719dSKyungmin Park 
224*c91a719dSKyungmin Park 		parent = *p;
225*c91a719dSKyungmin Park 		e1 = rb_entry(parent, struct ubi_wl_entry, rb);
226*c91a719dSKyungmin Park 
227*c91a719dSKyungmin Park 		if (e->ec < e1->ec)
228*c91a719dSKyungmin Park 			p = &(*p)->rb_left;
229*c91a719dSKyungmin Park 		else if (e->ec > e1->ec)
230*c91a719dSKyungmin Park 			p = &(*p)->rb_right;
231*c91a719dSKyungmin Park 		else {
232*c91a719dSKyungmin Park 			ubi_assert(e->pnum != e1->pnum);
233*c91a719dSKyungmin Park 			if (e->pnum < e1->pnum)
234*c91a719dSKyungmin Park 				p = &(*p)->rb_left;
235*c91a719dSKyungmin Park 			else
236*c91a719dSKyungmin Park 				p = &(*p)->rb_right;
237*c91a719dSKyungmin Park 		}
238*c91a719dSKyungmin Park 	}
239*c91a719dSKyungmin Park 
240*c91a719dSKyungmin Park 	rb_link_node(&e->rb, parent, p);
241*c91a719dSKyungmin Park 	rb_insert_color(&e->rb, root);
242*c91a719dSKyungmin Park }
243*c91a719dSKyungmin Park 
244*c91a719dSKyungmin Park /**
245*c91a719dSKyungmin Park  * do_work - do one pending work.
246*c91a719dSKyungmin Park  * @ubi: UBI device description object
247*c91a719dSKyungmin Park  *
248*c91a719dSKyungmin Park  * This function returns zero in case of success and a negative error code in
249*c91a719dSKyungmin Park  * case of failure.
250*c91a719dSKyungmin Park  */
251*c91a719dSKyungmin Park static int do_work(struct ubi_device *ubi)
252*c91a719dSKyungmin Park {
253*c91a719dSKyungmin Park 	int err;
254*c91a719dSKyungmin Park 	struct ubi_work *wrk;
255*c91a719dSKyungmin Park 
256*c91a719dSKyungmin Park 	cond_resched();
257*c91a719dSKyungmin Park 
258*c91a719dSKyungmin Park 	/*
259*c91a719dSKyungmin Park 	 * @ubi->work_sem is used to synchronize with the workers. Workers take
260*c91a719dSKyungmin Park 	 * it in read mode, so many of them may be doing works at a time. But
261*c91a719dSKyungmin Park 	 * the queue flush code has to be sure the whole queue of works is
262*c91a719dSKyungmin Park 	 * done, and it takes the mutex in write mode.
263*c91a719dSKyungmin Park 	 */
264*c91a719dSKyungmin Park 	down_read(&ubi->work_sem);
265*c91a719dSKyungmin Park 	spin_lock(&ubi->wl_lock);
266*c91a719dSKyungmin Park 	if (list_empty(&ubi->works)) {
267*c91a719dSKyungmin Park 		spin_unlock(&ubi->wl_lock);
268*c91a719dSKyungmin Park 		up_read(&ubi->work_sem);
269*c91a719dSKyungmin Park 		return 0;
270*c91a719dSKyungmin Park 	}
271*c91a719dSKyungmin Park 
272*c91a719dSKyungmin Park 	wrk = list_entry(ubi->works.next, struct ubi_work, list);
273*c91a719dSKyungmin Park 	list_del(&wrk->list);
274*c91a719dSKyungmin Park 	ubi->works_count -= 1;
275*c91a719dSKyungmin Park 	ubi_assert(ubi->works_count >= 0);
276*c91a719dSKyungmin Park 	spin_unlock(&ubi->wl_lock);
277*c91a719dSKyungmin Park 
278*c91a719dSKyungmin Park 	/*
279*c91a719dSKyungmin Park 	 * Call the worker function. Do not touch the work structure
280*c91a719dSKyungmin Park 	 * after this call as it will have been freed or reused by that
281*c91a719dSKyungmin Park 	 * time by the worker function.
282*c91a719dSKyungmin Park 	 */
283*c91a719dSKyungmin Park 	err = wrk->func(ubi, wrk, 0);
284*c91a719dSKyungmin Park 	if (err)
285*c91a719dSKyungmin Park 		ubi_err("work failed with error code %d", err);
286*c91a719dSKyungmin Park 	up_read(&ubi->work_sem);
287*c91a719dSKyungmin Park 
288*c91a719dSKyungmin Park 	return err;
289*c91a719dSKyungmin Park }
290*c91a719dSKyungmin Park 
291*c91a719dSKyungmin Park /**
292*c91a719dSKyungmin Park  * produce_free_peb - produce a free physical eraseblock.
293*c91a719dSKyungmin Park  * @ubi: UBI device description object
294*c91a719dSKyungmin Park  *
295*c91a719dSKyungmin Park  * This function tries to make a free PEB by means of synchronous execution of
296*c91a719dSKyungmin Park  * pending works. This may be needed if, for example the background thread is
297*c91a719dSKyungmin Park  * disabled. Returns zero in case of success and a negative error code in case
298*c91a719dSKyungmin Park  * of failure.
299*c91a719dSKyungmin Park  */
300*c91a719dSKyungmin Park static int produce_free_peb(struct ubi_device *ubi)
301*c91a719dSKyungmin Park {
302*c91a719dSKyungmin Park 	int err;
303*c91a719dSKyungmin Park 
304*c91a719dSKyungmin Park 	spin_lock(&ubi->wl_lock);
305*c91a719dSKyungmin Park 	while (!ubi->free.rb_node) {
306*c91a719dSKyungmin Park 		spin_unlock(&ubi->wl_lock);
307*c91a719dSKyungmin Park 
308*c91a719dSKyungmin Park 		dbg_wl("do one work synchronously");
309*c91a719dSKyungmin Park 		err = do_work(ubi);
310*c91a719dSKyungmin Park 		if (err)
311*c91a719dSKyungmin Park 			return err;
312*c91a719dSKyungmin Park 
313*c91a719dSKyungmin Park 		spin_lock(&ubi->wl_lock);
314*c91a719dSKyungmin Park 	}
315*c91a719dSKyungmin Park 	spin_unlock(&ubi->wl_lock);
316*c91a719dSKyungmin Park 
317*c91a719dSKyungmin Park 	return 0;
318*c91a719dSKyungmin Park }
319*c91a719dSKyungmin Park 
320*c91a719dSKyungmin Park /**
321*c91a719dSKyungmin Park  * in_wl_tree - check if wear-leveling entry is present in a WL RB-tree.
322*c91a719dSKyungmin Park  * @e: the wear-leveling entry to check
323*c91a719dSKyungmin Park  * @root: the root of the tree
324*c91a719dSKyungmin Park  *
325*c91a719dSKyungmin Park  * This function returns non-zero if @e is in the @root RB-tree and zero if it
326*c91a719dSKyungmin Park  * is not.
327*c91a719dSKyungmin Park  */
328*c91a719dSKyungmin Park static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root)
329*c91a719dSKyungmin Park {
330*c91a719dSKyungmin Park 	struct rb_node *p;
331*c91a719dSKyungmin Park 
332*c91a719dSKyungmin Park 	p = root->rb_node;
333*c91a719dSKyungmin Park 	while (p) {
334*c91a719dSKyungmin Park 		struct ubi_wl_entry *e1;
335*c91a719dSKyungmin Park 
336*c91a719dSKyungmin Park 		e1 = rb_entry(p, struct ubi_wl_entry, rb);
337*c91a719dSKyungmin Park 
338*c91a719dSKyungmin Park 		if (e->pnum == e1->pnum) {
339*c91a719dSKyungmin Park 			ubi_assert(e == e1);
340*c91a719dSKyungmin Park 			return 1;
341*c91a719dSKyungmin Park 		}
342*c91a719dSKyungmin Park 
343*c91a719dSKyungmin Park 		if (e->ec < e1->ec)
344*c91a719dSKyungmin Park 			p = p->rb_left;
345*c91a719dSKyungmin Park 		else if (e->ec > e1->ec)
346*c91a719dSKyungmin Park 			p = p->rb_right;
347*c91a719dSKyungmin Park 		else {
348*c91a719dSKyungmin Park 			ubi_assert(e->pnum != e1->pnum);
349*c91a719dSKyungmin Park 			if (e->pnum < e1->pnum)
350*c91a719dSKyungmin Park 				p = p->rb_left;
351*c91a719dSKyungmin Park 			else
352*c91a719dSKyungmin Park 				p = p->rb_right;
353*c91a719dSKyungmin Park 		}
354*c91a719dSKyungmin Park 	}
355*c91a719dSKyungmin Park 
356*c91a719dSKyungmin Park 	return 0;
357*c91a719dSKyungmin Park }
358*c91a719dSKyungmin Park 
359*c91a719dSKyungmin Park /**
360*c91a719dSKyungmin Park  * prot_tree_add - add physical eraseblock to protection trees.
361*c91a719dSKyungmin Park  * @ubi: UBI device description object
362*c91a719dSKyungmin Park  * @e: the physical eraseblock to add
363*c91a719dSKyungmin Park  * @pe: protection entry object to use
364*c91a719dSKyungmin Park  * @abs_ec: absolute erase counter value when this physical eraseblock has
365*c91a719dSKyungmin Park  * to be removed from the protection trees.
366*c91a719dSKyungmin Park  *
367*c91a719dSKyungmin Park  * @wl->lock has to be locked.
368*c91a719dSKyungmin Park  */
369*c91a719dSKyungmin Park static void prot_tree_add(struct ubi_device *ubi, struct ubi_wl_entry *e,
370*c91a719dSKyungmin Park 			  struct ubi_wl_prot_entry *pe, int abs_ec)
371*c91a719dSKyungmin Park {
372*c91a719dSKyungmin Park 	struct rb_node **p, *parent = NULL;
373*c91a719dSKyungmin Park 	struct ubi_wl_prot_entry *pe1;
374*c91a719dSKyungmin Park 
375*c91a719dSKyungmin Park 	pe->e = e;
376*c91a719dSKyungmin Park 	pe->abs_ec = ubi->abs_ec + abs_ec;
377*c91a719dSKyungmin Park 
378*c91a719dSKyungmin Park 	p = &ubi->prot.pnum.rb_node;
379*c91a719dSKyungmin Park 	while (*p) {
380*c91a719dSKyungmin Park 		parent = *p;
381*c91a719dSKyungmin Park 		pe1 = rb_entry(parent, struct ubi_wl_prot_entry, rb_pnum);
382*c91a719dSKyungmin Park 
383*c91a719dSKyungmin Park 		if (e->pnum < pe1->e->pnum)
384*c91a719dSKyungmin Park 			p = &(*p)->rb_left;
385*c91a719dSKyungmin Park 		else
386*c91a719dSKyungmin Park 			p = &(*p)->rb_right;
387*c91a719dSKyungmin Park 	}
388*c91a719dSKyungmin Park 	rb_link_node(&pe->rb_pnum, parent, p);
389*c91a719dSKyungmin Park 	rb_insert_color(&pe->rb_pnum, &ubi->prot.pnum);
390*c91a719dSKyungmin Park 
391*c91a719dSKyungmin Park 	p = &ubi->prot.aec.rb_node;
392*c91a719dSKyungmin Park 	parent = NULL;
393*c91a719dSKyungmin Park 	while (*p) {
394*c91a719dSKyungmin Park 		parent = *p;
395*c91a719dSKyungmin Park 		pe1 = rb_entry(parent, struct ubi_wl_prot_entry, rb_aec);
396*c91a719dSKyungmin Park 
397*c91a719dSKyungmin Park 		if (pe->abs_ec < pe1->abs_ec)
398*c91a719dSKyungmin Park 			p = &(*p)->rb_left;
399*c91a719dSKyungmin Park 		else
400*c91a719dSKyungmin Park 			p = &(*p)->rb_right;
401*c91a719dSKyungmin Park 	}
402*c91a719dSKyungmin Park 	rb_link_node(&pe->rb_aec, parent, p);
403*c91a719dSKyungmin Park 	rb_insert_color(&pe->rb_aec, &ubi->prot.aec);
404*c91a719dSKyungmin Park }
405*c91a719dSKyungmin Park 
406*c91a719dSKyungmin Park /**
407*c91a719dSKyungmin Park  * find_wl_entry - find wear-leveling entry closest to certain erase counter.
408*c91a719dSKyungmin Park  * @root: the RB-tree where to look for
409*c91a719dSKyungmin Park  * @max: highest possible erase counter
410*c91a719dSKyungmin Park  *
411*c91a719dSKyungmin Park  * This function looks for a wear leveling entry with erase counter closest to
412*c91a719dSKyungmin Park  * @max and less then @max.
413*c91a719dSKyungmin Park  */
414*c91a719dSKyungmin Park static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int max)
415*c91a719dSKyungmin Park {
416*c91a719dSKyungmin Park 	struct rb_node *p;
417*c91a719dSKyungmin Park 	struct ubi_wl_entry *e;
418*c91a719dSKyungmin Park 
419*c91a719dSKyungmin Park 	e = rb_entry(rb_first(root), struct ubi_wl_entry, rb);
420*c91a719dSKyungmin Park 	max += e->ec;
421*c91a719dSKyungmin Park 
422*c91a719dSKyungmin Park 	p = root->rb_node;
423*c91a719dSKyungmin Park 	while (p) {
424*c91a719dSKyungmin Park 		struct ubi_wl_entry *e1;
425*c91a719dSKyungmin Park 
426*c91a719dSKyungmin Park 		e1 = rb_entry(p, struct ubi_wl_entry, rb);
427*c91a719dSKyungmin Park 		if (e1->ec >= max)
428*c91a719dSKyungmin Park 			p = p->rb_left;
429*c91a719dSKyungmin Park 		else {
430*c91a719dSKyungmin Park 			p = p->rb_right;
431*c91a719dSKyungmin Park 			e = e1;
432*c91a719dSKyungmin Park 		}
433*c91a719dSKyungmin Park 	}
434*c91a719dSKyungmin Park 
435*c91a719dSKyungmin Park 	return e;
436*c91a719dSKyungmin Park }
437*c91a719dSKyungmin Park 
438*c91a719dSKyungmin Park /**
439*c91a719dSKyungmin Park  * ubi_wl_get_peb - get a physical eraseblock.
440*c91a719dSKyungmin Park  * @ubi: UBI device description object
441*c91a719dSKyungmin Park  * @dtype: type of data which will be stored in this physical eraseblock
442*c91a719dSKyungmin Park  *
443*c91a719dSKyungmin Park  * This function returns a physical eraseblock in case of success and a
444*c91a719dSKyungmin Park  * negative error code in case of failure. Might sleep.
445*c91a719dSKyungmin Park  */
446*c91a719dSKyungmin Park int ubi_wl_get_peb(struct ubi_device *ubi, int dtype)
447*c91a719dSKyungmin Park {
448*c91a719dSKyungmin Park 	int err, protect, medium_ec;
449*c91a719dSKyungmin Park 	struct ubi_wl_entry *e, *first, *last;
450*c91a719dSKyungmin Park 	struct ubi_wl_prot_entry *pe;
451*c91a719dSKyungmin Park 
452*c91a719dSKyungmin Park 	ubi_assert(dtype == UBI_LONGTERM || dtype == UBI_SHORTTERM ||
453*c91a719dSKyungmin Park 		   dtype == UBI_UNKNOWN);
454*c91a719dSKyungmin Park 
455*c91a719dSKyungmin Park 	pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS);
456*c91a719dSKyungmin Park 	if (!pe)
457*c91a719dSKyungmin Park 		return -ENOMEM;
458*c91a719dSKyungmin Park 
459*c91a719dSKyungmin Park retry:
460*c91a719dSKyungmin Park 	spin_lock(&ubi->wl_lock);
461*c91a719dSKyungmin Park 	if (!ubi->free.rb_node) {
462*c91a719dSKyungmin Park 		if (ubi->works_count == 0) {
463*c91a719dSKyungmin Park 			ubi_assert(list_empty(&ubi->works));
464*c91a719dSKyungmin Park 			ubi_err("no free eraseblocks");
465*c91a719dSKyungmin Park 			spin_unlock(&ubi->wl_lock);
466*c91a719dSKyungmin Park 			kfree(pe);
467*c91a719dSKyungmin Park 			return -ENOSPC;
468*c91a719dSKyungmin Park 		}
469*c91a719dSKyungmin Park 		spin_unlock(&ubi->wl_lock);
470*c91a719dSKyungmin Park 
471*c91a719dSKyungmin Park 		err = produce_free_peb(ubi);
472*c91a719dSKyungmin Park 		if (err < 0) {
473*c91a719dSKyungmin Park 			kfree(pe);
474*c91a719dSKyungmin Park 			return err;
475*c91a719dSKyungmin Park 		}
476*c91a719dSKyungmin Park 		goto retry;
477*c91a719dSKyungmin Park 	}
478*c91a719dSKyungmin Park 
479*c91a719dSKyungmin Park 	switch (dtype) {
480*c91a719dSKyungmin Park 		case UBI_LONGTERM:
481*c91a719dSKyungmin Park 			/*
482*c91a719dSKyungmin Park 			 * For long term data we pick a physical eraseblock
483*c91a719dSKyungmin Park 			 * with high erase counter. But the highest erase
484*c91a719dSKyungmin Park 			 * counter we can pick is bounded by the the lowest
485*c91a719dSKyungmin Park 			 * erase counter plus %WL_FREE_MAX_DIFF.
486*c91a719dSKyungmin Park 			 */
487*c91a719dSKyungmin Park 			e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
488*c91a719dSKyungmin Park 			protect = LT_PROTECTION;
489*c91a719dSKyungmin Park 			break;
490*c91a719dSKyungmin Park 		case UBI_UNKNOWN:
491*c91a719dSKyungmin Park 			/*
492*c91a719dSKyungmin Park 			 * For unknown data we pick a physical eraseblock with
493*c91a719dSKyungmin Park 			 * medium erase counter. But we by no means can pick a
494*c91a719dSKyungmin Park 			 * physical eraseblock with erase counter greater or
495*c91a719dSKyungmin Park 			 * equivalent than the lowest erase counter plus
496*c91a719dSKyungmin Park 			 * %WL_FREE_MAX_DIFF.
497*c91a719dSKyungmin Park 			 */
498*c91a719dSKyungmin Park 			first = rb_entry(rb_first(&ubi->free),
499*c91a719dSKyungmin Park 					 struct ubi_wl_entry, rb);
500*c91a719dSKyungmin Park 			last = rb_entry(rb_last(&ubi->free),
501*c91a719dSKyungmin Park 					struct ubi_wl_entry, rb);
502*c91a719dSKyungmin Park 
503*c91a719dSKyungmin Park 			if (last->ec - first->ec < WL_FREE_MAX_DIFF)
504*c91a719dSKyungmin Park 				e = rb_entry(ubi->free.rb_node,
505*c91a719dSKyungmin Park 						struct ubi_wl_entry, rb);
506*c91a719dSKyungmin Park 			else {
507*c91a719dSKyungmin Park 				medium_ec = (first->ec + WL_FREE_MAX_DIFF)/2;
508*c91a719dSKyungmin Park 				e = find_wl_entry(&ubi->free, medium_ec);
509*c91a719dSKyungmin Park 			}
510*c91a719dSKyungmin Park 			protect = U_PROTECTION;
511*c91a719dSKyungmin Park 			break;
512*c91a719dSKyungmin Park 		case UBI_SHORTTERM:
513*c91a719dSKyungmin Park 			/*
514*c91a719dSKyungmin Park 			 * For short term data we pick a physical eraseblock
515*c91a719dSKyungmin Park 			 * with the lowest erase counter as we expect it will
516*c91a719dSKyungmin Park 			 * be erased soon.
517*c91a719dSKyungmin Park 			 */
518*c91a719dSKyungmin Park 			e = rb_entry(rb_first(&ubi->free),
519*c91a719dSKyungmin Park 				     struct ubi_wl_entry, rb);
520*c91a719dSKyungmin Park 			protect = ST_PROTECTION;
521*c91a719dSKyungmin Park 			break;
522*c91a719dSKyungmin Park 		default:
523*c91a719dSKyungmin Park 			protect = 0;
524*c91a719dSKyungmin Park 			e = NULL;
525*c91a719dSKyungmin Park 			BUG();
526*c91a719dSKyungmin Park 	}
527*c91a719dSKyungmin Park 
528*c91a719dSKyungmin Park 	/*
529*c91a719dSKyungmin Park 	 * Move the physical eraseblock to the protection trees where it will
530*c91a719dSKyungmin Park 	 * be protected from being moved for some time.
531*c91a719dSKyungmin Park 	 */
532*c91a719dSKyungmin Park 	paranoid_check_in_wl_tree(e, &ubi->free);
533*c91a719dSKyungmin Park 	rb_erase(&e->rb, &ubi->free);
534*c91a719dSKyungmin Park 	prot_tree_add(ubi, e, pe, protect);
535*c91a719dSKyungmin Park 
536*c91a719dSKyungmin Park 	dbg_wl("PEB %d EC %d, protection %d", e->pnum, e->ec, protect);
537*c91a719dSKyungmin Park 	spin_unlock(&ubi->wl_lock);
538*c91a719dSKyungmin Park 
539*c91a719dSKyungmin Park 	return e->pnum;
540*c91a719dSKyungmin Park }
541*c91a719dSKyungmin Park 
542*c91a719dSKyungmin Park /**
543*c91a719dSKyungmin Park  * prot_tree_del - remove a physical eraseblock from the protection trees
544*c91a719dSKyungmin Park  * @ubi: UBI device description object
545*c91a719dSKyungmin Park  * @pnum: the physical eraseblock to remove
546*c91a719dSKyungmin Park  *
547*c91a719dSKyungmin Park  * This function returns PEB @pnum from the protection trees and returns zero
548*c91a719dSKyungmin Park  * in case of success and %-ENODEV if the PEB was not found in the protection
549*c91a719dSKyungmin Park  * trees.
550*c91a719dSKyungmin Park  */
551*c91a719dSKyungmin Park static int prot_tree_del(struct ubi_device *ubi, int pnum)
552*c91a719dSKyungmin Park {
553*c91a719dSKyungmin Park 	struct rb_node *p;
554*c91a719dSKyungmin Park 	struct ubi_wl_prot_entry *pe = NULL;
555*c91a719dSKyungmin Park 
556*c91a719dSKyungmin Park 	p = ubi->prot.pnum.rb_node;
557*c91a719dSKyungmin Park 	while (p) {
558*c91a719dSKyungmin Park 
559*c91a719dSKyungmin Park 		pe = rb_entry(p, struct ubi_wl_prot_entry, rb_pnum);
560*c91a719dSKyungmin Park 
561*c91a719dSKyungmin Park 		if (pnum == pe->e->pnum)
562*c91a719dSKyungmin Park 			goto found;
563*c91a719dSKyungmin Park 
564*c91a719dSKyungmin Park 		if (pnum < pe->e->pnum)
565*c91a719dSKyungmin Park 			p = p->rb_left;
566*c91a719dSKyungmin Park 		else
567*c91a719dSKyungmin Park 			p = p->rb_right;
568*c91a719dSKyungmin Park 	}
569*c91a719dSKyungmin Park 
570*c91a719dSKyungmin Park 	return -ENODEV;
571*c91a719dSKyungmin Park 
572*c91a719dSKyungmin Park found:
573*c91a719dSKyungmin Park 	ubi_assert(pe->e->pnum == pnum);
574*c91a719dSKyungmin Park 	rb_erase(&pe->rb_aec, &ubi->prot.aec);
575*c91a719dSKyungmin Park 	rb_erase(&pe->rb_pnum, &ubi->prot.pnum);
576*c91a719dSKyungmin Park 	kfree(pe);
577*c91a719dSKyungmin Park 	return 0;
578*c91a719dSKyungmin Park }
579*c91a719dSKyungmin Park 
580*c91a719dSKyungmin Park /**
581*c91a719dSKyungmin Park  * sync_erase - synchronously erase a physical eraseblock.
582*c91a719dSKyungmin Park  * @ubi: UBI device description object
583*c91a719dSKyungmin Park  * @e: the the physical eraseblock to erase
584*c91a719dSKyungmin Park  * @torture: if the physical eraseblock has to be tortured
585*c91a719dSKyungmin Park  *
586*c91a719dSKyungmin Park  * This function returns zero in case of success and a negative error code in
587*c91a719dSKyungmin Park  * case of failure.
588*c91a719dSKyungmin Park  */
589*c91a719dSKyungmin Park static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, int torture)
590*c91a719dSKyungmin Park {
591*c91a719dSKyungmin Park 	int err;
592*c91a719dSKyungmin Park 	struct ubi_ec_hdr *ec_hdr;
593*c91a719dSKyungmin Park 	unsigned long long ec = e->ec;
594*c91a719dSKyungmin Park 
595*c91a719dSKyungmin Park 	dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec);
596*c91a719dSKyungmin Park 
597*c91a719dSKyungmin Park 	err = paranoid_check_ec(ubi, e->pnum, e->ec);
598*c91a719dSKyungmin Park 	if (err > 0)
599*c91a719dSKyungmin Park 		return -EINVAL;
600*c91a719dSKyungmin Park 
601*c91a719dSKyungmin Park 	ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
602*c91a719dSKyungmin Park 	if (!ec_hdr)
603*c91a719dSKyungmin Park 		return -ENOMEM;
604*c91a719dSKyungmin Park 
605*c91a719dSKyungmin Park 	err = ubi_io_sync_erase(ubi, e->pnum, torture);
606*c91a719dSKyungmin Park 	if (err < 0)
607*c91a719dSKyungmin Park 		goto out_free;
608*c91a719dSKyungmin Park 
609*c91a719dSKyungmin Park 	ec += err;
610*c91a719dSKyungmin Park 	if (ec > UBI_MAX_ERASECOUNTER) {
611*c91a719dSKyungmin Park 		/*
612*c91a719dSKyungmin Park 		 * Erase counter overflow. Upgrade UBI and use 64-bit
613*c91a719dSKyungmin Park 		 * erase counters internally.
614*c91a719dSKyungmin Park 		 */
615*c91a719dSKyungmin Park 		ubi_err("erase counter overflow at PEB %d, EC %llu",
616*c91a719dSKyungmin Park 			e->pnum, ec);
617*c91a719dSKyungmin Park 		err = -EINVAL;
618*c91a719dSKyungmin Park 		goto out_free;
619*c91a719dSKyungmin Park 	}
620*c91a719dSKyungmin Park 
621*c91a719dSKyungmin Park 	dbg_wl("erased PEB %d, new EC %llu", e->pnum, ec);
622*c91a719dSKyungmin Park 
623*c91a719dSKyungmin Park 	ec_hdr->ec = cpu_to_be64(ec);
624*c91a719dSKyungmin Park 
625*c91a719dSKyungmin Park 	err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr);
626*c91a719dSKyungmin Park 	if (err)
627*c91a719dSKyungmin Park 		goto out_free;
628*c91a719dSKyungmin Park 
629*c91a719dSKyungmin Park 	e->ec = ec;
630*c91a719dSKyungmin Park 	spin_lock(&ubi->wl_lock);
631*c91a719dSKyungmin Park 	if (e->ec > ubi->max_ec)
632*c91a719dSKyungmin Park 		ubi->max_ec = e->ec;
633*c91a719dSKyungmin Park 	spin_unlock(&ubi->wl_lock);
634*c91a719dSKyungmin Park 
635*c91a719dSKyungmin Park out_free:
636*c91a719dSKyungmin Park 	kfree(ec_hdr);
637*c91a719dSKyungmin Park 	return err;
638*c91a719dSKyungmin Park }
639*c91a719dSKyungmin Park 
640*c91a719dSKyungmin Park /**
641*c91a719dSKyungmin Park  * check_protection_over - check if it is time to stop protecting some
642*c91a719dSKyungmin Park  * physical eraseblocks.
643*c91a719dSKyungmin Park  * @ubi: UBI device description object
644*c91a719dSKyungmin Park  *
645*c91a719dSKyungmin Park  * This function is called after each erase operation, when the absolute erase
646*c91a719dSKyungmin Park  * counter is incremented, to check if some physical eraseblock  have not to be
647*c91a719dSKyungmin Park  * protected any longer. These physical eraseblocks are moved from the
648*c91a719dSKyungmin Park  * protection trees to the used tree.
649*c91a719dSKyungmin Park  */
650*c91a719dSKyungmin Park static void check_protection_over(struct ubi_device *ubi)
651*c91a719dSKyungmin Park {
652*c91a719dSKyungmin Park 	struct ubi_wl_prot_entry *pe;
653*c91a719dSKyungmin Park 
654*c91a719dSKyungmin Park 	/*
655*c91a719dSKyungmin Park 	 * There may be several protected physical eraseblock to remove,
656*c91a719dSKyungmin Park 	 * process them all.
657*c91a719dSKyungmin Park 	 */
658*c91a719dSKyungmin Park 	while (1) {
659*c91a719dSKyungmin Park 		spin_lock(&ubi->wl_lock);
660*c91a719dSKyungmin Park 		if (!ubi->prot.aec.rb_node) {
661*c91a719dSKyungmin Park 			spin_unlock(&ubi->wl_lock);
662*c91a719dSKyungmin Park 			break;
663*c91a719dSKyungmin Park 		}
664*c91a719dSKyungmin Park 
665*c91a719dSKyungmin Park 		pe = rb_entry(rb_first(&ubi->prot.aec),
666*c91a719dSKyungmin Park 			      struct ubi_wl_prot_entry, rb_aec);
667*c91a719dSKyungmin Park 
668*c91a719dSKyungmin Park 		if (pe->abs_ec > ubi->abs_ec) {
669*c91a719dSKyungmin Park 			spin_unlock(&ubi->wl_lock);
670*c91a719dSKyungmin Park 			break;
671*c91a719dSKyungmin Park 		}
672*c91a719dSKyungmin Park 
673*c91a719dSKyungmin Park 		dbg_wl("PEB %d protection over, abs_ec %llu, PEB abs_ec %llu",
674*c91a719dSKyungmin Park 		       pe->e->pnum, ubi->abs_ec, pe->abs_ec);
675*c91a719dSKyungmin Park 		rb_erase(&pe->rb_aec, &ubi->prot.aec);
676*c91a719dSKyungmin Park 		rb_erase(&pe->rb_pnum, &ubi->prot.pnum);
677*c91a719dSKyungmin Park 		wl_tree_add(pe->e, &ubi->used);
678*c91a719dSKyungmin Park 		spin_unlock(&ubi->wl_lock);
679*c91a719dSKyungmin Park 
680*c91a719dSKyungmin Park 		kfree(pe);
681*c91a719dSKyungmin Park 		cond_resched();
682*c91a719dSKyungmin Park 	}
683*c91a719dSKyungmin Park }
684*c91a719dSKyungmin Park 
685*c91a719dSKyungmin Park /**
686*c91a719dSKyungmin Park  * schedule_ubi_work - schedule a work.
687*c91a719dSKyungmin Park  * @ubi: UBI device description object
688*c91a719dSKyungmin Park  * @wrk: the work to schedule
689*c91a719dSKyungmin Park  *
690*c91a719dSKyungmin Park  * This function enqueues a work defined by @wrk to the tail of the pending
691*c91a719dSKyungmin Park  * works list.
692*c91a719dSKyungmin Park  */
693*c91a719dSKyungmin Park static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
694*c91a719dSKyungmin Park {
695*c91a719dSKyungmin Park 	spin_lock(&ubi->wl_lock);
696*c91a719dSKyungmin Park 	list_add_tail(&wrk->list, &ubi->works);
697*c91a719dSKyungmin Park 	ubi_assert(ubi->works_count >= 0);
698*c91a719dSKyungmin Park 	ubi->works_count += 1;
699*c91a719dSKyungmin Park 	if (ubi->thread_enabled)
700*c91a719dSKyungmin Park 		wake_up_process(ubi->bgt_thread);
701*c91a719dSKyungmin Park 	spin_unlock(&ubi->wl_lock);
702*c91a719dSKyungmin Park }
703*c91a719dSKyungmin Park 
704*c91a719dSKyungmin Park static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
705*c91a719dSKyungmin Park 			int cancel);
706*c91a719dSKyungmin Park 
707*c91a719dSKyungmin Park /**
708*c91a719dSKyungmin Park  * schedule_erase - schedule an erase work.
709*c91a719dSKyungmin Park  * @ubi: UBI device description object
710*c91a719dSKyungmin Park  * @e: the WL entry of the physical eraseblock to erase
711*c91a719dSKyungmin Park  * @torture: if the physical eraseblock has to be tortured
712*c91a719dSKyungmin Park  *
713*c91a719dSKyungmin Park  * This function returns zero in case of success and a %-ENOMEM in case of
714*c91a719dSKyungmin Park  * failure.
715*c91a719dSKyungmin Park  */
716*c91a719dSKyungmin Park static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
717*c91a719dSKyungmin Park 			  int torture)
718*c91a719dSKyungmin Park {
719*c91a719dSKyungmin Park 	struct ubi_work *wl_wrk;
720*c91a719dSKyungmin Park 
721*c91a719dSKyungmin Park 	dbg_wl("schedule erasure of PEB %d, EC %d, torture %d",
722*c91a719dSKyungmin Park 	       e->pnum, e->ec, torture);
723*c91a719dSKyungmin Park 
724*c91a719dSKyungmin Park 	wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
725*c91a719dSKyungmin Park 	if (!wl_wrk)
726*c91a719dSKyungmin Park 		return -ENOMEM;
727*c91a719dSKyungmin Park 
728*c91a719dSKyungmin Park 	wl_wrk->func = &erase_worker;
729*c91a719dSKyungmin Park 	wl_wrk->e = e;
730*c91a719dSKyungmin Park 	wl_wrk->torture = torture;
731*c91a719dSKyungmin Park 
732*c91a719dSKyungmin Park 	schedule_ubi_work(ubi, wl_wrk);
733*c91a719dSKyungmin Park 	return 0;
734*c91a719dSKyungmin Park }
735*c91a719dSKyungmin Park 
736*c91a719dSKyungmin Park /**
737*c91a719dSKyungmin Park  * wear_leveling_worker - wear-leveling worker function.
738*c91a719dSKyungmin Park  * @ubi: UBI device description object
739*c91a719dSKyungmin Park  * @wrk: the work object
740*c91a719dSKyungmin Park  * @cancel: non-zero if the worker has to free memory and exit
741*c91a719dSKyungmin Park  *
742*c91a719dSKyungmin Park  * This function copies a more worn out physical eraseblock to a less worn out
743*c91a719dSKyungmin Park  * one. Returns zero in case of success and a negative error code in case of
744*c91a719dSKyungmin Park  * failure.
745*c91a719dSKyungmin Park  */
746*c91a719dSKyungmin Park static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
747*c91a719dSKyungmin Park 				int cancel)
748*c91a719dSKyungmin Park {
749*c91a719dSKyungmin Park 	int err, put = 0, scrubbing = 0, protect = 0;
750*c91a719dSKyungmin Park 	struct ubi_wl_prot_entry *uninitialized_var(pe);
751*c91a719dSKyungmin Park 	struct ubi_wl_entry *e1, *e2;
752*c91a719dSKyungmin Park 	struct ubi_vid_hdr *vid_hdr;
753*c91a719dSKyungmin Park 
754*c91a719dSKyungmin Park 	kfree(wrk);
755*c91a719dSKyungmin Park 
756*c91a719dSKyungmin Park 	if (cancel)
757*c91a719dSKyungmin Park 		return 0;
758*c91a719dSKyungmin Park 
759*c91a719dSKyungmin Park 	vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
760*c91a719dSKyungmin Park 	if (!vid_hdr)
761*c91a719dSKyungmin Park 		return -ENOMEM;
762*c91a719dSKyungmin Park 
763*c91a719dSKyungmin Park 	mutex_lock(&ubi->move_mutex);
764*c91a719dSKyungmin Park 	spin_lock(&ubi->wl_lock);
765*c91a719dSKyungmin Park 	ubi_assert(!ubi->move_from && !ubi->move_to);
766*c91a719dSKyungmin Park 	ubi_assert(!ubi->move_to_put);
767*c91a719dSKyungmin Park 
768*c91a719dSKyungmin Park 	if (!ubi->free.rb_node ||
769*c91a719dSKyungmin Park 	    (!ubi->used.rb_node && !ubi->scrub.rb_node)) {
770*c91a719dSKyungmin Park 		/*
771*c91a719dSKyungmin Park 		 * No free physical eraseblocks? Well, they must be waiting in
772*c91a719dSKyungmin Park 		 * the queue to be erased. Cancel movement - it will be
773*c91a719dSKyungmin Park 		 * triggered again when a free physical eraseblock appears.
774*c91a719dSKyungmin Park 		 *
775*c91a719dSKyungmin Park 		 * No used physical eraseblocks? They must be temporarily
776*c91a719dSKyungmin Park 		 * protected from being moved. They will be moved to the
777*c91a719dSKyungmin Park 		 * @ubi->used tree later and the wear-leveling will be
778*c91a719dSKyungmin Park 		 * triggered again.
779*c91a719dSKyungmin Park 		 */
780*c91a719dSKyungmin Park 		dbg_wl("cancel WL, a list is empty: free %d, used %d",
781*c91a719dSKyungmin Park 		       !ubi->free.rb_node, !ubi->used.rb_node);
782*c91a719dSKyungmin Park 		goto out_cancel;
783*c91a719dSKyungmin Park 	}
784*c91a719dSKyungmin Park 
785*c91a719dSKyungmin Park 	if (!ubi->scrub.rb_node) {
786*c91a719dSKyungmin Park 		/*
787*c91a719dSKyungmin Park 		 * Now pick the least worn-out used physical eraseblock and a
788*c91a719dSKyungmin Park 		 * highly worn-out free physical eraseblock. If the erase
789*c91a719dSKyungmin Park 		 * counters differ much enough, start wear-leveling.
790*c91a719dSKyungmin Park 		 */
791*c91a719dSKyungmin Park 		e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, rb);
792*c91a719dSKyungmin Park 		e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
793*c91a719dSKyungmin Park 
794*c91a719dSKyungmin Park 		if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
795*c91a719dSKyungmin Park 			dbg_wl("no WL needed: min used EC %d, max free EC %d",
796*c91a719dSKyungmin Park 			       e1->ec, e2->ec);
797*c91a719dSKyungmin Park 			goto out_cancel;
798*c91a719dSKyungmin Park 		}
799*c91a719dSKyungmin Park 		paranoid_check_in_wl_tree(e1, &ubi->used);
800*c91a719dSKyungmin Park 		rb_erase(&e1->rb, &ubi->used);
801*c91a719dSKyungmin Park 		dbg_wl("move PEB %d EC %d to PEB %d EC %d",
802*c91a719dSKyungmin Park 		       e1->pnum, e1->ec, e2->pnum, e2->ec);
803*c91a719dSKyungmin Park 	} else {
804*c91a719dSKyungmin Park 		/* Perform scrubbing */
805*c91a719dSKyungmin Park 		scrubbing = 1;
806*c91a719dSKyungmin Park 		e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, rb);
807*c91a719dSKyungmin Park 		e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
808*c91a719dSKyungmin Park 		paranoid_check_in_wl_tree(e1, &ubi->scrub);
809*c91a719dSKyungmin Park 		rb_erase(&e1->rb, &ubi->scrub);
810*c91a719dSKyungmin Park 		dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
811*c91a719dSKyungmin Park 	}
812*c91a719dSKyungmin Park 
813*c91a719dSKyungmin Park 	paranoid_check_in_wl_tree(e2, &ubi->free);
814*c91a719dSKyungmin Park 	rb_erase(&e2->rb, &ubi->free);
815*c91a719dSKyungmin Park 	ubi->move_from = e1;
816*c91a719dSKyungmin Park 	ubi->move_to = e2;
817*c91a719dSKyungmin Park 	spin_unlock(&ubi->wl_lock);
818*c91a719dSKyungmin Park 
819*c91a719dSKyungmin Park 	/*
820*c91a719dSKyungmin Park 	 * Now we are going to copy physical eraseblock @e1->pnum to @e2->pnum.
821*c91a719dSKyungmin Park 	 * We so far do not know which logical eraseblock our physical
822*c91a719dSKyungmin Park 	 * eraseblock (@e1) belongs to. We have to read the volume identifier
823*c91a719dSKyungmin Park 	 * header first.
824*c91a719dSKyungmin Park 	 *
825*c91a719dSKyungmin Park 	 * Note, we are protected from this PEB being unmapped and erased. The
826*c91a719dSKyungmin Park 	 * 'ubi_wl_put_peb()' would wait for moving to be finished if the PEB
827*c91a719dSKyungmin Park 	 * which is being moved was unmapped.
828*c91a719dSKyungmin Park 	 */
829*c91a719dSKyungmin Park 
830*c91a719dSKyungmin Park 	err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0);
831*c91a719dSKyungmin Park 	if (err && err != UBI_IO_BITFLIPS) {
832*c91a719dSKyungmin Park 		if (err == UBI_IO_PEB_FREE) {
833*c91a719dSKyungmin Park 			/*
834*c91a719dSKyungmin Park 			 * We are trying to move PEB without a VID header. UBI
835*c91a719dSKyungmin Park 			 * always write VID headers shortly after the PEB was
836*c91a719dSKyungmin Park 			 * given, so we have a situation when it did not have
837*c91a719dSKyungmin Park 			 * chance to write it down because it was preempted.
838*c91a719dSKyungmin Park 			 * Just re-schedule the work, so that next time it will
839*c91a719dSKyungmin Park 			 * likely have the VID header in place.
840*c91a719dSKyungmin Park 			 */
841*c91a719dSKyungmin Park 			dbg_wl("PEB %d has no VID header", e1->pnum);
842*c91a719dSKyungmin Park 			goto out_not_moved;
843*c91a719dSKyungmin Park 		}
844*c91a719dSKyungmin Park 
845*c91a719dSKyungmin Park 		ubi_err("error %d while reading VID header from PEB %d",
846*c91a719dSKyungmin Park 			err, e1->pnum);
847*c91a719dSKyungmin Park 		if (err > 0)
848*c91a719dSKyungmin Park 			err = -EIO;
849*c91a719dSKyungmin Park 		goto out_error;
850*c91a719dSKyungmin Park 	}
851*c91a719dSKyungmin Park 
852*c91a719dSKyungmin Park 	err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr);
853*c91a719dSKyungmin Park 	if (err) {
854*c91a719dSKyungmin Park 
855*c91a719dSKyungmin Park 		if (err < 0)
856*c91a719dSKyungmin Park 			goto out_error;
857*c91a719dSKyungmin Park 		if (err == 1)
858*c91a719dSKyungmin Park 			goto out_not_moved;
859*c91a719dSKyungmin Park 
860*c91a719dSKyungmin Park 		/*
861*c91a719dSKyungmin Park 		 * For some reason the LEB was not moved - it might be because
862*c91a719dSKyungmin Park 		 * the volume is being deleted. We should prevent this PEB from
863*c91a719dSKyungmin Park 		 * being selected for wear-levelling movement for some "time",
864*c91a719dSKyungmin Park 		 * so put it to the protection tree.
865*c91a719dSKyungmin Park 		 */
866*c91a719dSKyungmin Park 
867*c91a719dSKyungmin Park 		dbg_wl("cancelled moving PEB %d", e1->pnum);
868*c91a719dSKyungmin Park 		pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS);
869*c91a719dSKyungmin Park 		if (!pe) {
870*c91a719dSKyungmin Park 			err = -ENOMEM;
871*c91a719dSKyungmin Park 			goto out_error;
872*c91a719dSKyungmin Park 		}
873*c91a719dSKyungmin Park 
874*c91a719dSKyungmin Park 		protect = 1;
875*c91a719dSKyungmin Park 	}
876*c91a719dSKyungmin Park 
877*c91a719dSKyungmin Park 	ubi_free_vid_hdr(ubi, vid_hdr);
878*c91a719dSKyungmin Park 	spin_lock(&ubi->wl_lock);
879*c91a719dSKyungmin Park 	if (protect)
880*c91a719dSKyungmin Park 		prot_tree_add(ubi, e1, pe, protect);
881*c91a719dSKyungmin Park 	if (!ubi->move_to_put)
882*c91a719dSKyungmin Park 		wl_tree_add(e2, &ubi->used);
883*c91a719dSKyungmin Park 	else
884*c91a719dSKyungmin Park 		put = 1;
885*c91a719dSKyungmin Park 	ubi->move_from = ubi->move_to = NULL;
886*c91a719dSKyungmin Park 	ubi->move_to_put = ubi->wl_scheduled = 0;
887*c91a719dSKyungmin Park 	spin_unlock(&ubi->wl_lock);
888*c91a719dSKyungmin Park 
889*c91a719dSKyungmin Park 	if (put) {
890*c91a719dSKyungmin Park 		/*
891*c91a719dSKyungmin Park 		 * Well, the target PEB was put meanwhile, schedule it for
892*c91a719dSKyungmin Park 		 * erasure.
893*c91a719dSKyungmin Park 		 */
894*c91a719dSKyungmin Park 		dbg_wl("PEB %d was put meanwhile, erase", e2->pnum);
895*c91a719dSKyungmin Park 		err = schedule_erase(ubi, e2, 0);
896*c91a719dSKyungmin Park 		if (err)
897*c91a719dSKyungmin Park 			goto out_error;
898*c91a719dSKyungmin Park 	}
899*c91a719dSKyungmin Park 
900*c91a719dSKyungmin Park 	if (!protect) {
901*c91a719dSKyungmin Park 		err = schedule_erase(ubi, e1, 0);
902*c91a719dSKyungmin Park 		if (err)
903*c91a719dSKyungmin Park 			goto out_error;
904*c91a719dSKyungmin Park 	}
905*c91a719dSKyungmin Park 
906*c91a719dSKyungmin Park 
907*c91a719dSKyungmin Park 	dbg_wl("done");
908*c91a719dSKyungmin Park 	mutex_unlock(&ubi->move_mutex);
909*c91a719dSKyungmin Park 	return 0;
910*c91a719dSKyungmin Park 
911*c91a719dSKyungmin Park 	/*
912*c91a719dSKyungmin Park 	 * For some reasons the LEB was not moved, might be an error, might be
913*c91a719dSKyungmin Park 	 * something else. @e1 was not changed, so return it back. @e2 might
914*c91a719dSKyungmin Park 	 * be changed, schedule it for erasure.
915*c91a719dSKyungmin Park 	 */
916*c91a719dSKyungmin Park out_not_moved:
917*c91a719dSKyungmin Park 	ubi_free_vid_hdr(ubi, vid_hdr);
918*c91a719dSKyungmin Park 	spin_lock(&ubi->wl_lock);
919*c91a719dSKyungmin Park 	if (scrubbing)
920*c91a719dSKyungmin Park 		wl_tree_add(e1, &ubi->scrub);
921*c91a719dSKyungmin Park 	else
922*c91a719dSKyungmin Park 		wl_tree_add(e1, &ubi->used);
923*c91a719dSKyungmin Park 	ubi->move_from = ubi->move_to = NULL;
924*c91a719dSKyungmin Park 	ubi->move_to_put = ubi->wl_scheduled = 0;
925*c91a719dSKyungmin Park 	spin_unlock(&ubi->wl_lock);
926*c91a719dSKyungmin Park 
927*c91a719dSKyungmin Park 	err = schedule_erase(ubi, e2, 0);
928*c91a719dSKyungmin Park 	if (err)
929*c91a719dSKyungmin Park 		goto out_error;
930*c91a719dSKyungmin Park 
931*c91a719dSKyungmin Park 	mutex_unlock(&ubi->move_mutex);
932*c91a719dSKyungmin Park 	return 0;
933*c91a719dSKyungmin Park 
934*c91a719dSKyungmin Park out_error:
935*c91a719dSKyungmin Park 	ubi_err("error %d while moving PEB %d to PEB %d",
936*c91a719dSKyungmin Park 		err, e1->pnum, e2->pnum);
937*c91a719dSKyungmin Park 
938*c91a719dSKyungmin Park 	ubi_free_vid_hdr(ubi, vid_hdr);
939*c91a719dSKyungmin Park 	spin_lock(&ubi->wl_lock);
940*c91a719dSKyungmin Park 	ubi->move_from = ubi->move_to = NULL;
941*c91a719dSKyungmin Park 	ubi->move_to_put = ubi->wl_scheduled = 0;
942*c91a719dSKyungmin Park 	spin_unlock(&ubi->wl_lock);
943*c91a719dSKyungmin Park 
944*c91a719dSKyungmin Park 	kmem_cache_free(ubi_wl_entry_slab, e1);
945*c91a719dSKyungmin Park 	kmem_cache_free(ubi_wl_entry_slab, e2);
946*c91a719dSKyungmin Park 	ubi_ro_mode(ubi);
947*c91a719dSKyungmin Park 
948*c91a719dSKyungmin Park 	mutex_unlock(&ubi->move_mutex);
949*c91a719dSKyungmin Park 	return err;
950*c91a719dSKyungmin Park 
951*c91a719dSKyungmin Park out_cancel:
952*c91a719dSKyungmin Park 	ubi->wl_scheduled = 0;
953*c91a719dSKyungmin Park 	spin_unlock(&ubi->wl_lock);
954*c91a719dSKyungmin Park 	mutex_unlock(&ubi->move_mutex);
955*c91a719dSKyungmin Park 	ubi_free_vid_hdr(ubi, vid_hdr);
956*c91a719dSKyungmin Park 	return 0;
957*c91a719dSKyungmin Park }
958*c91a719dSKyungmin Park 
959*c91a719dSKyungmin Park /**
960*c91a719dSKyungmin Park  * ensure_wear_leveling - schedule wear-leveling if it is needed.
961*c91a719dSKyungmin Park  * @ubi: UBI device description object
962*c91a719dSKyungmin Park  *
963*c91a719dSKyungmin Park  * This function checks if it is time to start wear-leveling and schedules it
964*c91a719dSKyungmin Park  * if yes. This function returns zero in case of success and a negative error
965*c91a719dSKyungmin Park  * code in case of failure.
966*c91a719dSKyungmin Park  */
967*c91a719dSKyungmin Park static int ensure_wear_leveling(struct ubi_device *ubi)
968*c91a719dSKyungmin Park {
969*c91a719dSKyungmin Park 	int err = 0;
970*c91a719dSKyungmin Park 	struct ubi_wl_entry *e1;
971*c91a719dSKyungmin Park 	struct ubi_wl_entry *e2;
972*c91a719dSKyungmin Park 	struct ubi_work *wrk;
973*c91a719dSKyungmin Park 
974*c91a719dSKyungmin Park 	spin_lock(&ubi->wl_lock);
975*c91a719dSKyungmin Park 	if (ubi->wl_scheduled)
976*c91a719dSKyungmin Park 		/* Wear-leveling is already in the work queue */
977*c91a719dSKyungmin Park 		goto out_unlock;
978*c91a719dSKyungmin Park 
979*c91a719dSKyungmin Park 	/*
980*c91a719dSKyungmin Park 	 * If the ubi->scrub tree is not empty, scrubbing is needed, and the
981*c91a719dSKyungmin Park 	 * the WL worker has to be scheduled anyway.
982*c91a719dSKyungmin Park 	 */
983*c91a719dSKyungmin Park 	if (!ubi->scrub.rb_node) {
984*c91a719dSKyungmin Park 		if (!ubi->used.rb_node || !ubi->free.rb_node)
985*c91a719dSKyungmin Park 			/* No physical eraseblocks - no deal */
986*c91a719dSKyungmin Park 			goto out_unlock;
987*c91a719dSKyungmin Park 
988*c91a719dSKyungmin Park 		/*
989*c91a719dSKyungmin Park 		 * We schedule wear-leveling only if the difference between the
990*c91a719dSKyungmin Park 		 * lowest erase counter of used physical eraseblocks and a high
991*c91a719dSKyungmin Park 		 * erase counter of free physical eraseblocks is greater then
992*c91a719dSKyungmin Park 		 * %UBI_WL_THRESHOLD.
993*c91a719dSKyungmin Park 		 */
994*c91a719dSKyungmin Park 		e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, rb);
995*c91a719dSKyungmin Park 		e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
996*c91a719dSKyungmin Park 
997*c91a719dSKyungmin Park 		if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
998*c91a719dSKyungmin Park 			goto out_unlock;
999*c91a719dSKyungmin Park 		dbg_wl("schedule wear-leveling");
1000*c91a719dSKyungmin Park 	} else
1001*c91a719dSKyungmin Park 		dbg_wl("schedule scrubbing");
1002*c91a719dSKyungmin Park 
1003*c91a719dSKyungmin Park 	ubi->wl_scheduled = 1;
1004*c91a719dSKyungmin Park 	spin_unlock(&ubi->wl_lock);
1005*c91a719dSKyungmin Park 
1006*c91a719dSKyungmin Park 	wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
1007*c91a719dSKyungmin Park 	if (!wrk) {
1008*c91a719dSKyungmin Park 		err = -ENOMEM;
1009*c91a719dSKyungmin Park 		goto out_cancel;
1010*c91a719dSKyungmin Park 	}
1011*c91a719dSKyungmin Park 
1012*c91a719dSKyungmin Park 	wrk->func = &wear_leveling_worker;
1013*c91a719dSKyungmin Park 	schedule_ubi_work(ubi, wrk);
1014*c91a719dSKyungmin Park 	return err;
1015*c91a719dSKyungmin Park 
1016*c91a719dSKyungmin Park out_cancel:
1017*c91a719dSKyungmin Park 	spin_lock(&ubi->wl_lock);
1018*c91a719dSKyungmin Park 	ubi->wl_scheduled = 0;
1019*c91a719dSKyungmin Park out_unlock:
1020*c91a719dSKyungmin Park 	spin_unlock(&ubi->wl_lock);
1021*c91a719dSKyungmin Park 	return err;
1022*c91a719dSKyungmin Park }
1023*c91a719dSKyungmin Park 
1024*c91a719dSKyungmin Park /**
1025*c91a719dSKyungmin Park  * erase_worker - physical eraseblock erase worker function.
1026*c91a719dSKyungmin Park  * @ubi: UBI device description object
1027*c91a719dSKyungmin Park  * @wl_wrk: the work object
1028*c91a719dSKyungmin Park  * @cancel: non-zero if the worker has to free memory and exit
1029*c91a719dSKyungmin Park  *
1030*c91a719dSKyungmin Park  * This function erases a physical eraseblock and perform torture testing if
1031*c91a719dSKyungmin Park  * needed. It also takes care about marking the physical eraseblock bad if
1032*c91a719dSKyungmin Park  * needed. Returns zero in case of success and a negative error code in case of
1033*c91a719dSKyungmin Park  * failure.
1034*c91a719dSKyungmin Park  */
1035*c91a719dSKyungmin Park static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1036*c91a719dSKyungmin Park 			int cancel)
1037*c91a719dSKyungmin Park {
1038*c91a719dSKyungmin Park 	struct ubi_wl_entry *e = wl_wrk->e;
1039*c91a719dSKyungmin Park 	int pnum = e->pnum, err, need;
1040*c91a719dSKyungmin Park 
1041*c91a719dSKyungmin Park 	if (cancel) {
1042*c91a719dSKyungmin Park 		dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
1043*c91a719dSKyungmin Park 		kfree(wl_wrk);
1044*c91a719dSKyungmin Park 		kmem_cache_free(ubi_wl_entry_slab, e);
1045*c91a719dSKyungmin Park 		return 0;
1046*c91a719dSKyungmin Park 	}
1047*c91a719dSKyungmin Park 
1048*c91a719dSKyungmin Park 	dbg_wl("erase PEB %d EC %d", pnum, e->ec);
1049*c91a719dSKyungmin Park 
1050*c91a719dSKyungmin Park 	err = sync_erase(ubi, e, wl_wrk->torture);
1051*c91a719dSKyungmin Park 	if (!err) {
1052*c91a719dSKyungmin Park 		/* Fine, we've erased it successfully */
1053*c91a719dSKyungmin Park 		kfree(wl_wrk);
1054*c91a719dSKyungmin Park 
1055*c91a719dSKyungmin Park 		spin_lock(&ubi->wl_lock);
1056*c91a719dSKyungmin Park 		ubi->abs_ec += 1;
1057*c91a719dSKyungmin Park 		wl_tree_add(e, &ubi->free);
1058*c91a719dSKyungmin Park 		spin_unlock(&ubi->wl_lock);
1059*c91a719dSKyungmin Park 
1060*c91a719dSKyungmin Park 		/*
1061*c91a719dSKyungmin Park 		 * One more erase operation has happened, take care about protected
1062*c91a719dSKyungmin Park 		 * physical eraseblocks.
1063*c91a719dSKyungmin Park 		 */
1064*c91a719dSKyungmin Park 		check_protection_over(ubi);
1065*c91a719dSKyungmin Park 
1066*c91a719dSKyungmin Park 		/* And take care about wear-leveling */
1067*c91a719dSKyungmin Park 		err = ensure_wear_leveling(ubi);
1068*c91a719dSKyungmin Park 		return err;
1069*c91a719dSKyungmin Park 	}
1070*c91a719dSKyungmin Park 
1071*c91a719dSKyungmin Park 	ubi_err("failed to erase PEB %d, error %d", pnum, err);
1072*c91a719dSKyungmin Park 	kfree(wl_wrk);
1073*c91a719dSKyungmin Park 	kmem_cache_free(ubi_wl_entry_slab, e);
1074*c91a719dSKyungmin Park 
1075*c91a719dSKyungmin Park 	if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
1076*c91a719dSKyungmin Park 	    err == -EBUSY) {
1077*c91a719dSKyungmin Park 		int err1;
1078*c91a719dSKyungmin Park 
1079*c91a719dSKyungmin Park 		/* Re-schedule the LEB for erasure */
1080*c91a719dSKyungmin Park 		err1 = schedule_erase(ubi, e, 0);
1081*c91a719dSKyungmin Park 		if (err1) {
1082*c91a719dSKyungmin Park 			err = err1;
1083*c91a719dSKyungmin Park 			goto out_ro;
1084*c91a719dSKyungmin Park 		}
1085*c91a719dSKyungmin Park 		return err;
1086*c91a719dSKyungmin Park 	} else if (err != -EIO) {
1087*c91a719dSKyungmin Park 		/*
1088*c91a719dSKyungmin Park 		 * If this is not %-EIO, we have no idea what to do. Scheduling
1089*c91a719dSKyungmin Park 		 * this physical eraseblock for erasure again would cause
1090*c91a719dSKyungmin Park 		 * errors again and again. Well, lets switch to RO mode.
1091*c91a719dSKyungmin Park 		 */
1092*c91a719dSKyungmin Park 		goto out_ro;
1093*c91a719dSKyungmin Park 	}
1094*c91a719dSKyungmin Park 
1095*c91a719dSKyungmin Park 	/* It is %-EIO, the PEB went bad */
1096*c91a719dSKyungmin Park 
1097*c91a719dSKyungmin Park 	if (!ubi->bad_allowed) {
1098*c91a719dSKyungmin Park 		ubi_err("bad physical eraseblock %d detected", pnum);
1099*c91a719dSKyungmin Park 		goto out_ro;
1100*c91a719dSKyungmin Park 	}
1101*c91a719dSKyungmin Park 
1102*c91a719dSKyungmin Park 	spin_lock(&ubi->volumes_lock);
1103*c91a719dSKyungmin Park 	need = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs + 1;
1104*c91a719dSKyungmin Park 	if (need > 0) {
1105*c91a719dSKyungmin Park 		need = ubi->avail_pebs >= need ? need : ubi->avail_pebs;
1106*c91a719dSKyungmin Park 		ubi->avail_pebs -= need;
1107*c91a719dSKyungmin Park 		ubi->rsvd_pebs += need;
1108*c91a719dSKyungmin Park 		ubi->beb_rsvd_pebs += need;
1109*c91a719dSKyungmin Park 		if (need > 0)
1110*c91a719dSKyungmin Park 			ubi_msg("reserve more %d PEBs", need);
1111*c91a719dSKyungmin Park 	}
1112*c91a719dSKyungmin Park 
1113*c91a719dSKyungmin Park 	if (ubi->beb_rsvd_pebs == 0) {
1114*c91a719dSKyungmin Park 		spin_unlock(&ubi->volumes_lock);
1115*c91a719dSKyungmin Park 		ubi_err("no reserved physical eraseblocks");
1116*c91a719dSKyungmin Park 		goto out_ro;
1117*c91a719dSKyungmin Park 	}
1118*c91a719dSKyungmin Park 
1119*c91a719dSKyungmin Park 	spin_unlock(&ubi->volumes_lock);
1120*c91a719dSKyungmin Park 	ubi_msg("mark PEB %d as bad", pnum);
1121*c91a719dSKyungmin Park 
1122*c91a719dSKyungmin Park 	err = ubi_io_mark_bad(ubi, pnum);
1123*c91a719dSKyungmin Park 	if (err)
1124*c91a719dSKyungmin Park 		goto out_ro;
1125*c91a719dSKyungmin Park 
1126*c91a719dSKyungmin Park 	spin_lock(&ubi->volumes_lock);
1127*c91a719dSKyungmin Park 	ubi->beb_rsvd_pebs -= 1;
1128*c91a719dSKyungmin Park 	ubi->bad_peb_count += 1;
1129*c91a719dSKyungmin Park 	ubi->good_peb_count -= 1;
1130*c91a719dSKyungmin Park 	ubi_calculate_reserved(ubi);
1131*c91a719dSKyungmin Park 	if (ubi->beb_rsvd_pebs == 0)
1132*c91a719dSKyungmin Park 		ubi_warn("last PEB from the reserved pool was used");
1133*c91a719dSKyungmin Park 	spin_unlock(&ubi->volumes_lock);
1134*c91a719dSKyungmin Park 
1135*c91a719dSKyungmin Park 	return err;
1136*c91a719dSKyungmin Park 
1137*c91a719dSKyungmin Park out_ro:
1138*c91a719dSKyungmin Park 	ubi_ro_mode(ubi);
1139*c91a719dSKyungmin Park 	return err;
1140*c91a719dSKyungmin Park }
1141*c91a719dSKyungmin Park 
1142*c91a719dSKyungmin Park /**
1143*c91a719dSKyungmin Park  * ubi_wl_put_peb - return a physical eraseblock to the wear-leveling unit.
1144*c91a719dSKyungmin Park  * @ubi: UBI device description object
1145*c91a719dSKyungmin Park  * @pnum: physical eraseblock to return
1146*c91a719dSKyungmin Park  * @torture: if this physical eraseblock has to be tortured
1147*c91a719dSKyungmin Park  *
1148*c91a719dSKyungmin Park  * This function is called to return physical eraseblock @pnum to the pool of
1149*c91a719dSKyungmin Park  * free physical eraseblocks. The @torture flag has to be set if an I/O error
1150*c91a719dSKyungmin Park  * occurred to this @pnum and it has to be tested. This function returns zero
1151*c91a719dSKyungmin Park  * in case of success, and a negative error code in case of failure.
1152*c91a719dSKyungmin Park  */
1153*c91a719dSKyungmin Park int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
1154*c91a719dSKyungmin Park {
1155*c91a719dSKyungmin Park 	int err;
1156*c91a719dSKyungmin Park 	struct ubi_wl_entry *e;
1157*c91a719dSKyungmin Park 
1158*c91a719dSKyungmin Park 	dbg_wl("PEB %d", pnum);
1159*c91a719dSKyungmin Park 	ubi_assert(pnum >= 0);
1160*c91a719dSKyungmin Park 	ubi_assert(pnum < ubi->peb_count);
1161*c91a719dSKyungmin Park 
1162*c91a719dSKyungmin Park retry:
1163*c91a719dSKyungmin Park 	spin_lock(&ubi->wl_lock);
1164*c91a719dSKyungmin Park 	e = ubi->lookuptbl[pnum];
1165*c91a719dSKyungmin Park 	if (e == ubi->move_from) {
1166*c91a719dSKyungmin Park 		/*
1167*c91a719dSKyungmin Park 		 * User is putting the physical eraseblock which was selected to
1168*c91a719dSKyungmin Park 		 * be moved. It will be scheduled for erasure in the
1169*c91a719dSKyungmin Park 		 * wear-leveling worker.
1170*c91a719dSKyungmin Park 		 */
1171*c91a719dSKyungmin Park 		dbg_wl("PEB %d is being moved, wait", pnum);
1172*c91a719dSKyungmin Park 		spin_unlock(&ubi->wl_lock);
1173*c91a719dSKyungmin Park 
1174*c91a719dSKyungmin Park 		/* Wait for the WL worker by taking the @ubi->move_mutex */
1175*c91a719dSKyungmin Park 		mutex_lock(&ubi->move_mutex);
1176*c91a719dSKyungmin Park 		mutex_unlock(&ubi->move_mutex);
1177*c91a719dSKyungmin Park 		goto retry;
1178*c91a719dSKyungmin Park 	} else if (e == ubi->move_to) {
1179*c91a719dSKyungmin Park 		/*
1180*c91a719dSKyungmin Park 		 * User is putting the physical eraseblock which was selected
1181*c91a719dSKyungmin Park 		 * as the target the data is moved to. It may happen if the EBA
1182*c91a719dSKyungmin Park 		 * unit already re-mapped the LEB in 'ubi_eba_copy_leb()' but
1183*c91a719dSKyungmin Park 		 * the WL unit has not put the PEB to the "used" tree yet, but
1184*c91a719dSKyungmin Park 		 * it is about to do this. So we just set a flag which will
1185*c91a719dSKyungmin Park 		 * tell the WL worker that the PEB is not needed anymore and
1186*c91a719dSKyungmin Park 		 * should be scheduled for erasure.
1187*c91a719dSKyungmin Park 		 */
1188*c91a719dSKyungmin Park 		dbg_wl("PEB %d is the target of data moving", pnum);
1189*c91a719dSKyungmin Park 		ubi_assert(!ubi->move_to_put);
1190*c91a719dSKyungmin Park 		ubi->move_to_put = 1;
1191*c91a719dSKyungmin Park 		spin_unlock(&ubi->wl_lock);
1192*c91a719dSKyungmin Park 		return 0;
1193*c91a719dSKyungmin Park 	} else {
1194*c91a719dSKyungmin Park 		if (in_wl_tree(e, &ubi->used)) {
1195*c91a719dSKyungmin Park 			paranoid_check_in_wl_tree(e, &ubi->used);
1196*c91a719dSKyungmin Park 			rb_erase(&e->rb, &ubi->used);
1197*c91a719dSKyungmin Park 		} else if (in_wl_tree(e, &ubi->scrub)) {
1198*c91a719dSKyungmin Park 			paranoid_check_in_wl_tree(e, &ubi->scrub);
1199*c91a719dSKyungmin Park 			rb_erase(&e->rb, &ubi->scrub);
1200*c91a719dSKyungmin Park 		} else {
1201*c91a719dSKyungmin Park 			err = prot_tree_del(ubi, e->pnum);
1202*c91a719dSKyungmin Park 			if (err) {
1203*c91a719dSKyungmin Park 				ubi_err("PEB %d not found", pnum);
1204*c91a719dSKyungmin Park 				ubi_ro_mode(ubi);
1205*c91a719dSKyungmin Park 				spin_unlock(&ubi->wl_lock);
1206*c91a719dSKyungmin Park 				return err;
1207*c91a719dSKyungmin Park 			}
1208*c91a719dSKyungmin Park 		}
1209*c91a719dSKyungmin Park 	}
1210*c91a719dSKyungmin Park 	spin_unlock(&ubi->wl_lock);
1211*c91a719dSKyungmin Park 
1212*c91a719dSKyungmin Park 	err = schedule_erase(ubi, e, torture);
1213*c91a719dSKyungmin Park 	if (err) {
1214*c91a719dSKyungmin Park 		spin_lock(&ubi->wl_lock);
1215*c91a719dSKyungmin Park 		wl_tree_add(e, &ubi->used);
1216*c91a719dSKyungmin Park 		spin_unlock(&ubi->wl_lock);
1217*c91a719dSKyungmin Park 	}
1218*c91a719dSKyungmin Park 
1219*c91a719dSKyungmin Park 	return err;
1220*c91a719dSKyungmin Park }
1221*c91a719dSKyungmin Park 
1222*c91a719dSKyungmin Park /**
1223*c91a719dSKyungmin Park  * ubi_wl_scrub_peb - schedule a physical eraseblock for scrubbing.
1224*c91a719dSKyungmin Park  * @ubi: UBI device description object
1225*c91a719dSKyungmin Park  * @pnum: the physical eraseblock to schedule
1226*c91a719dSKyungmin Park  *
1227*c91a719dSKyungmin Park  * If a bit-flip in a physical eraseblock is detected, this physical eraseblock
1228*c91a719dSKyungmin Park  * needs scrubbing. This function schedules a physical eraseblock for
1229*c91a719dSKyungmin Park  * scrubbing which is done in background. This function returns zero in case of
1230*c91a719dSKyungmin Park  * success and a negative error code in case of failure.
1231*c91a719dSKyungmin Park  */
1232*c91a719dSKyungmin Park int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
1233*c91a719dSKyungmin Park {
1234*c91a719dSKyungmin Park 	struct ubi_wl_entry *e;
1235*c91a719dSKyungmin Park 
1236*c91a719dSKyungmin Park 	ubi_msg("schedule PEB %d for scrubbing", pnum);
1237*c91a719dSKyungmin Park 
1238*c91a719dSKyungmin Park retry:
1239*c91a719dSKyungmin Park 	spin_lock(&ubi->wl_lock);
1240*c91a719dSKyungmin Park 	e = ubi->lookuptbl[pnum];
1241*c91a719dSKyungmin Park 	if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub)) {
1242*c91a719dSKyungmin Park 		spin_unlock(&ubi->wl_lock);
1243*c91a719dSKyungmin Park 		return 0;
1244*c91a719dSKyungmin Park 	}
1245*c91a719dSKyungmin Park 
1246*c91a719dSKyungmin Park 	if (e == ubi->move_to) {
1247*c91a719dSKyungmin Park 		/*
1248*c91a719dSKyungmin Park 		 * This physical eraseblock was used to move data to. The data
1249*c91a719dSKyungmin Park 		 * was moved but the PEB was not yet inserted to the proper
1250*c91a719dSKyungmin Park 		 * tree. We should just wait a little and let the WL worker
1251*c91a719dSKyungmin Park 		 * proceed.
1252*c91a719dSKyungmin Park 		 */
1253*c91a719dSKyungmin Park 		spin_unlock(&ubi->wl_lock);
1254*c91a719dSKyungmin Park 		dbg_wl("the PEB %d is not in proper tree, retry", pnum);
1255*c91a719dSKyungmin Park 		yield();
1256*c91a719dSKyungmin Park 		goto retry;
1257*c91a719dSKyungmin Park 	}
1258*c91a719dSKyungmin Park 
1259*c91a719dSKyungmin Park 	if (in_wl_tree(e, &ubi->used)) {
1260*c91a719dSKyungmin Park 		paranoid_check_in_wl_tree(e, &ubi->used);
1261*c91a719dSKyungmin Park 		rb_erase(&e->rb, &ubi->used);
1262*c91a719dSKyungmin Park 	} else {
1263*c91a719dSKyungmin Park 		int err;
1264*c91a719dSKyungmin Park 
1265*c91a719dSKyungmin Park 		err = prot_tree_del(ubi, e->pnum);
1266*c91a719dSKyungmin Park 		if (err) {
1267*c91a719dSKyungmin Park 			ubi_err("PEB %d not found", pnum);
1268*c91a719dSKyungmin Park 			ubi_ro_mode(ubi);
1269*c91a719dSKyungmin Park 			spin_unlock(&ubi->wl_lock);
1270*c91a719dSKyungmin Park 			return err;
1271*c91a719dSKyungmin Park 		}
1272*c91a719dSKyungmin Park 	}
1273*c91a719dSKyungmin Park 
1274*c91a719dSKyungmin Park 	wl_tree_add(e, &ubi->scrub);
1275*c91a719dSKyungmin Park 	spin_unlock(&ubi->wl_lock);
1276*c91a719dSKyungmin Park 
1277*c91a719dSKyungmin Park 	/*
1278*c91a719dSKyungmin Park 	 * Technically scrubbing is the same as wear-leveling, so it is done
1279*c91a719dSKyungmin Park 	 * by the WL worker.
1280*c91a719dSKyungmin Park 	 */
1281*c91a719dSKyungmin Park 	return ensure_wear_leveling(ubi);
1282*c91a719dSKyungmin Park }
1283*c91a719dSKyungmin Park 
1284*c91a719dSKyungmin Park /**
1285*c91a719dSKyungmin Park  * ubi_wl_flush - flush all pending works.
1286*c91a719dSKyungmin Park  * @ubi: UBI device description object
1287*c91a719dSKyungmin Park  *
1288*c91a719dSKyungmin Park  * This function returns zero in case of success and a negative error code in
1289*c91a719dSKyungmin Park  * case of failure.
1290*c91a719dSKyungmin Park  */
1291*c91a719dSKyungmin Park int ubi_wl_flush(struct ubi_device *ubi)
1292*c91a719dSKyungmin Park {
1293*c91a719dSKyungmin Park 	int err;
1294*c91a719dSKyungmin Park 
1295*c91a719dSKyungmin Park 	/*
1296*c91a719dSKyungmin Park 	 * Erase while the pending works queue is not empty, but not more then
1297*c91a719dSKyungmin Park 	 * the number of currently pending works.
1298*c91a719dSKyungmin Park 	 */
1299*c91a719dSKyungmin Park 	dbg_wl("flush (%d pending works)", ubi->works_count);
1300*c91a719dSKyungmin Park 	while (ubi->works_count) {
1301*c91a719dSKyungmin Park 		err = do_work(ubi);
1302*c91a719dSKyungmin Park 		if (err)
1303*c91a719dSKyungmin Park 			return err;
1304*c91a719dSKyungmin Park 	}
1305*c91a719dSKyungmin Park 
1306*c91a719dSKyungmin Park 	/*
1307*c91a719dSKyungmin Park 	 * Make sure all the works which have been done in parallel are
1308*c91a719dSKyungmin Park 	 * finished.
1309*c91a719dSKyungmin Park 	 */
1310*c91a719dSKyungmin Park 	down_write(&ubi->work_sem);
1311*c91a719dSKyungmin Park 	up_write(&ubi->work_sem);
1312*c91a719dSKyungmin Park 
1313*c91a719dSKyungmin Park 	/*
1314*c91a719dSKyungmin Park 	 * And in case last was the WL worker and it cancelled the LEB
1315*c91a719dSKyungmin Park 	 * movement, flush again.
1316*c91a719dSKyungmin Park 	 */
1317*c91a719dSKyungmin Park 	while (ubi->works_count) {
1318*c91a719dSKyungmin Park 		dbg_wl("flush more (%d pending works)", ubi->works_count);
1319*c91a719dSKyungmin Park 		err = do_work(ubi);
1320*c91a719dSKyungmin Park 		if (err)
1321*c91a719dSKyungmin Park 			return err;
1322*c91a719dSKyungmin Park 	}
1323*c91a719dSKyungmin Park 
1324*c91a719dSKyungmin Park 	return 0;
1325*c91a719dSKyungmin Park }
1326*c91a719dSKyungmin Park 
1327*c91a719dSKyungmin Park /**
1328*c91a719dSKyungmin Park  * tree_destroy - destroy an RB-tree.
1329*c91a719dSKyungmin Park  * @root: the root of the tree to destroy
1330*c91a719dSKyungmin Park  */
1331*c91a719dSKyungmin Park static void tree_destroy(struct rb_root *root)
1332*c91a719dSKyungmin Park {
1333*c91a719dSKyungmin Park 	struct rb_node *rb;
1334*c91a719dSKyungmin Park 	struct ubi_wl_entry *e;
1335*c91a719dSKyungmin Park 
1336*c91a719dSKyungmin Park 	rb = root->rb_node;
1337*c91a719dSKyungmin Park 	while (rb) {
1338*c91a719dSKyungmin Park 		if (rb->rb_left)
1339*c91a719dSKyungmin Park 			rb = rb->rb_left;
1340*c91a719dSKyungmin Park 		else if (rb->rb_right)
1341*c91a719dSKyungmin Park 			rb = rb->rb_right;
1342*c91a719dSKyungmin Park 		else {
1343*c91a719dSKyungmin Park 			e = rb_entry(rb, struct ubi_wl_entry, rb);
1344*c91a719dSKyungmin Park 
1345*c91a719dSKyungmin Park 			rb = rb_parent(rb);
1346*c91a719dSKyungmin Park 			if (rb) {
1347*c91a719dSKyungmin Park 				if (rb->rb_left == &e->rb)
1348*c91a719dSKyungmin Park 					rb->rb_left = NULL;
1349*c91a719dSKyungmin Park 				else
1350*c91a719dSKyungmin Park 					rb->rb_right = NULL;
1351*c91a719dSKyungmin Park 			}
1352*c91a719dSKyungmin Park 
1353*c91a719dSKyungmin Park 			kmem_cache_free(ubi_wl_entry_slab, e);
1354*c91a719dSKyungmin Park 		}
1355*c91a719dSKyungmin Park 	}
1356*c91a719dSKyungmin Park }
1357*c91a719dSKyungmin Park 
1358*c91a719dSKyungmin Park /**
1359*c91a719dSKyungmin Park  * ubi_thread - UBI background thread.
1360*c91a719dSKyungmin Park  * @u: the UBI device description object pointer
1361*c91a719dSKyungmin Park  */
1362*c91a719dSKyungmin Park int ubi_thread(void *u)
1363*c91a719dSKyungmin Park {
1364*c91a719dSKyungmin Park 	int failures = 0;
1365*c91a719dSKyungmin Park 	struct ubi_device *ubi = u;
1366*c91a719dSKyungmin Park 
1367*c91a719dSKyungmin Park 	ubi_msg("background thread \"%s\" started, PID %d",
1368*c91a719dSKyungmin Park 		ubi->bgt_name, task_pid_nr(current));
1369*c91a719dSKyungmin Park 
1370*c91a719dSKyungmin Park 	set_freezable();
1371*c91a719dSKyungmin Park 	for (;;) {
1372*c91a719dSKyungmin Park 		int err;
1373*c91a719dSKyungmin Park 
1374*c91a719dSKyungmin Park 		if (kthread_should_stop())
1375*c91a719dSKyungmin Park 			break;
1376*c91a719dSKyungmin Park 
1377*c91a719dSKyungmin Park 		if (try_to_freeze())
1378*c91a719dSKyungmin Park 			continue;
1379*c91a719dSKyungmin Park 
1380*c91a719dSKyungmin Park 		spin_lock(&ubi->wl_lock);
1381*c91a719dSKyungmin Park 		if (list_empty(&ubi->works) || ubi->ro_mode ||
1382*c91a719dSKyungmin Park 			       !ubi->thread_enabled) {
1383*c91a719dSKyungmin Park 			set_current_state(TASK_INTERRUPTIBLE);
1384*c91a719dSKyungmin Park 			spin_unlock(&ubi->wl_lock);
1385*c91a719dSKyungmin Park 			schedule();
1386*c91a719dSKyungmin Park 			continue;
1387*c91a719dSKyungmin Park 		}
1388*c91a719dSKyungmin Park 		spin_unlock(&ubi->wl_lock);
1389*c91a719dSKyungmin Park 
1390*c91a719dSKyungmin Park 		err = do_work(ubi);
1391*c91a719dSKyungmin Park 		if (err) {
1392*c91a719dSKyungmin Park 			ubi_err("%s: work failed with error code %d",
1393*c91a719dSKyungmin Park 				ubi->bgt_name, err);
1394*c91a719dSKyungmin Park 			if (failures++ > WL_MAX_FAILURES) {
1395*c91a719dSKyungmin Park 				/*
1396*c91a719dSKyungmin Park 				 * Too many failures, disable the thread and
1397*c91a719dSKyungmin Park 				 * switch to read-only mode.
1398*c91a719dSKyungmin Park 				 */
1399*c91a719dSKyungmin Park 				ubi_msg("%s: %d consecutive failures",
1400*c91a719dSKyungmin Park 					ubi->bgt_name, WL_MAX_FAILURES);
1401*c91a719dSKyungmin Park 				ubi_ro_mode(ubi);
1402*c91a719dSKyungmin Park 				break;
1403*c91a719dSKyungmin Park 			}
1404*c91a719dSKyungmin Park 		} else
1405*c91a719dSKyungmin Park 			failures = 0;
1406*c91a719dSKyungmin Park 
1407*c91a719dSKyungmin Park 		cond_resched();
1408*c91a719dSKyungmin Park 	}
1409*c91a719dSKyungmin Park 
1410*c91a719dSKyungmin Park 	dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
1411*c91a719dSKyungmin Park 	return 0;
1412*c91a719dSKyungmin Park }
1413*c91a719dSKyungmin Park 
1414*c91a719dSKyungmin Park /**
1415*c91a719dSKyungmin Park  * cancel_pending - cancel all pending works.
1416*c91a719dSKyungmin Park  * @ubi: UBI device description object
1417*c91a719dSKyungmin Park  */
1418*c91a719dSKyungmin Park static void cancel_pending(struct ubi_device *ubi)
1419*c91a719dSKyungmin Park {
1420*c91a719dSKyungmin Park 	while (!list_empty(&ubi->works)) {
1421*c91a719dSKyungmin Park 		struct ubi_work *wrk;
1422*c91a719dSKyungmin Park 
1423*c91a719dSKyungmin Park 		wrk = list_entry(ubi->works.next, struct ubi_work, list);
1424*c91a719dSKyungmin Park 		list_del(&wrk->list);
1425*c91a719dSKyungmin Park 		wrk->func(ubi, wrk, 1);
1426*c91a719dSKyungmin Park 		ubi->works_count -= 1;
1427*c91a719dSKyungmin Park 		ubi_assert(ubi->works_count >= 0);
1428*c91a719dSKyungmin Park 	}
1429*c91a719dSKyungmin Park }
1430*c91a719dSKyungmin Park 
1431*c91a719dSKyungmin Park /**
1432*c91a719dSKyungmin Park  * ubi_wl_init_scan - initialize the wear-leveling unit using scanning
1433*c91a719dSKyungmin Park  * information.
1434*c91a719dSKyungmin Park  * @ubi: UBI device description object
1435*c91a719dSKyungmin Park  * @si: scanning information
1436*c91a719dSKyungmin Park  *
1437*c91a719dSKyungmin Park  * This function returns zero in case of success, and a negative error code in
1438*c91a719dSKyungmin Park  * case of failure.
1439*c91a719dSKyungmin Park  */
1440*c91a719dSKyungmin Park int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1441*c91a719dSKyungmin Park {
1442*c91a719dSKyungmin Park 	int err;
1443*c91a719dSKyungmin Park 	struct rb_node *rb1, *rb2;
1444*c91a719dSKyungmin Park 	struct ubi_scan_volume *sv;
1445*c91a719dSKyungmin Park 	struct ubi_scan_leb *seb, *tmp;
1446*c91a719dSKyungmin Park 	struct ubi_wl_entry *e;
1447*c91a719dSKyungmin Park 
1448*c91a719dSKyungmin Park 
1449*c91a719dSKyungmin Park 	ubi->used = ubi->free = ubi->scrub = RB_ROOT;
1450*c91a719dSKyungmin Park 	ubi->prot.pnum = ubi->prot.aec = RB_ROOT;
1451*c91a719dSKyungmin Park 	spin_lock_init(&ubi->wl_lock);
1452*c91a719dSKyungmin Park 	mutex_init(&ubi->move_mutex);
1453*c91a719dSKyungmin Park 	init_rwsem(&ubi->work_sem);
1454*c91a719dSKyungmin Park 	ubi->max_ec = si->max_ec;
1455*c91a719dSKyungmin Park 	INIT_LIST_HEAD(&ubi->works);
1456*c91a719dSKyungmin Park 
1457*c91a719dSKyungmin Park 	sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
1458*c91a719dSKyungmin Park 
1459*c91a719dSKyungmin Park 	err = -ENOMEM;
1460*c91a719dSKyungmin Park 	ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL);
1461*c91a719dSKyungmin Park 	if (!ubi->lookuptbl)
1462*c91a719dSKyungmin Park 		return err;
1463*c91a719dSKyungmin Park 
1464*c91a719dSKyungmin Park 	list_for_each_entry_safe(seb, tmp, &si->erase, u.list) {
1465*c91a719dSKyungmin Park 		cond_resched();
1466*c91a719dSKyungmin Park 
1467*c91a719dSKyungmin Park 		e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1468*c91a719dSKyungmin Park 		if (!e)
1469*c91a719dSKyungmin Park 			goto out_free;
1470*c91a719dSKyungmin Park 
1471*c91a719dSKyungmin Park 		e->pnum = seb->pnum;
1472*c91a719dSKyungmin Park 		e->ec = seb->ec;
1473*c91a719dSKyungmin Park 		ubi->lookuptbl[e->pnum] = e;
1474*c91a719dSKyungmin Park 		if (schedule_erase(ubi, e, 0)) {
1475*c91a719dSKyungmin Park 			kmem_cache_free(ubi_wl_entry_slab, e);
1476*c91a719dSKyungmin Park 			goto out_free;
1477*c91a719dSKyungmin Park 		}
1478*c91a719dSKyungmin Park 	}
1479*c91a719dSKyungmin Park 
1480*c91a719dSKyungmin Park 	list_for_each_entry(seb, &si->free, u.list) {
1481*c91a719dSKyungmin Park 		cond_resched();
1482*c91a719dSKyungmin Park 
1483*c91a719dSKyungmin Park 		e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1484*c91a719dSKyungmin Park 		if (!e)
1485*c91a719dSKyungmin Park 			goto out_free;
1486*c91a719dSKyungmin Park 
1487*c91a719dSKyungmin Park 		e->pnum = seb->pnum;
1488*c91a719dSKyungmin Park 		e->ec = seb->ec;
1489*c91a719dSKyungmin Park 		ubi_assert(e->ec >= 0);
1490*c91a719dSKyungmin Park 		wl_tree_add(e, &ubi->free);
1491*c91a719dSKyungmin Park 		ubi->lookuptbl[e->pnum] = e;
1492*c91a719dSKyungmin Park 	}
1493*c91a719dSKyungmin Park 
1494*c91a719dSKyungmin Park 	list_for_each_entry(seb, &si->corr, u.list) {
1495*c91a719dSKyungmin Park 		cond_resched();
1496*c91a719dSKyungmin Park 
1497*c91a719dSKyungmin Park 		e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1498*c91a719dSKyungmin Park 		if (!e)
1499*c91a719dSKyungmin Park 			goto out_free;
1500*c91a719dSKyungmin Park 
1501*c91a719dSKyungmin Park 		e->pnum = seb->pnum;
1502*c91a719dSKyungmin Park 		e->ec = seb->ec;
1503*c91a719dSKyungmin Park 		ubi->lookuptbl[e->pnum] = e;
1504*c91a719dSKyungmin Park 		if (schedule_erase(ubi, e, 0)) {
1505*c91a719dSKyungmin Park 			kmem_cache_free(ubi_wl_entry_slab, e);
1506*c91a719dSKyungmin Park 			goto out_free;
1507*c91a719dSKyungmin Park 		}
1508*c91a719dSKyungmin Park 	}
1509*c91a719dSKyungmin Park 
1510*c91a719dSKyungmin Park 	ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) {
1511*c91a719dSKyungmin Park 		ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) {
1512*c91a719dSKyungmin Park 			cond_resched();
1513*c91a719dSKyungmin Park 
1514*c91a719dSKyungmin Park 			e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1515*c91a719dSKyungmin Park 			if (!e)
1516*c91a719dSKyungmin Park 				goto out_free;
1517*c91a719dSKyungmin Park 
1518*c91a719dSKyungmin Park 			e->pnum = seb->pnum;
1519*c91a719dSKyungmin Park 			e->ec = seb->ec;
1520*c91a719dSKyungmin Park 			ubi->lookuptbl[e->pnum] = e;
1521*c91a719dSKyungmin Park 			if (!seb->scrub) {
1522*c91a719dSKyungmin Park 				dbg_wl("add PEB %d EC %d to the used tree",
1523*c91a719dSKyungmin Park 				       e->pnum, e->ec);
1524*c91a719dSKyungmin Park 				wl_tree_add(e, &ubi->used);
1525*c91a719dSKyungmin Park 			} else {
1526*c91a719dSKyungmin Park 				dbg_wl("add PEB %d EC %d to the scrub tree",
1527*c91a719dSKyungmin Park 				       e->pnum, e->ec);
1528*c91a719dSKyungmin Park 				wl_tree_add(e, &ubi->scrub);
1529*c91a719dSKyungmin Park 			}
1530*c91a719dSKyungmin Park 		}
1531*c91a719dSKyungmin Park 	}
1532*c91a719dSKyungmin Park 
1533*c91a719dSKyungmin Park 	if (ubi->avail_pebs < WL_RESERVED_PEBS) {
1534*c91a719dSKyungmin Park 		ubi_err("no enough physical eraseblocks (%d, need %d)",
1535*c91a719dSKyungmin Park 			ubi->avail_pebs, WL_RESERVED_PEBS);
1536*c91a719dSKyungmin Park 		goto out_free;
1537*c91a719dSKyungmin Park 	}
1538*c91a719dSKyungmin Park 	ubi->avail_pebs -= WL_RESERVED_PEBS;
1539*c91a719dSKyungmin Park 	ubi->rsvd_pebs += WL_RESERVED_PEBS;
1540*c91a719dSKyungmin Park 
1541*c91a719dSKyungmin Park 	/* Schedule wear-leveling if needed */
1542*c91a719dSKyungmin Park 	err = ensure_wear_leveling(ubi);
1543*c91a719dSKyungmin Park 	if (err)
1544*c91a719dSKyungmin Park 		goto out_free;
1545*c91a719dSKyungmin Park 
1546*c91a719dSKyungmin Park 	return 0;
1547*c91a719dSKyungmin Park 
1548*c91a719dSKyungmin Park out_free:
1549*c91a719dSKyungmin Park 	cancel_pending(ubi);
1550*c91a719dSKyungmin Park 	tree_destroy(&ubi->used);
1551*c91a719dSKyungmin Park 	tree_destroy(&ubi->free);
1552*c91a719dSKyungmin Park 	tree_destroy(&ubi->scrub);
1553*c91a719dSKyungmin Park 	kfree(ubi->lookuptbl);
1554*c91a719dSKyungmin Park 	return err;
1555*c91a719dSKyungmin Park }
1556*c91a719dSKyungmin Park 
1557*c91a719dSKyungmin Park /**
1558*c91a719dSKyungmin Park  * protection_trees_destroy - destroy the protection RB-trees.
1559*c91a719dSKyungmin Park  * @ubi: UBI device description object
1560*c91a719dSKyungmin Park  */
1561*c91a719dSKyungmin Park static void protection_trees_destroy(struct ubi_device *ubi)
1562*c91a719dSKyungmin Park {
1563*c91a719dSKyungmin Park 	struct rb_node *rb;
1564*c91a719dSKyungmin Park 	struct ubi_wl_prot_entry *pe;
1565*c91a719dSKyungmin Park 
1566*c91a719dSKyungmin Park 	rb = ubi->prot.aec.rb_node;
1567*c91a719dSKyungmin Park 	while (rb) {
1568*c91a719dSKyungmin Park 		if (rb->rb_left)
1569*c91a719dSKyungmin Park 			rb = rb->rb_left;
1570*c91a719dSKyungmin Park 		else if (rb->rb_right)
1571*c91a719dSKyungmin Park 			rb = rb->rb_right;
1572*c91a719dSKyungmin Park 		else {
1573*c91a719dSKyungmin Park 			pe = rb_entry(rb, struct ubi_wl_prot_entry, rb_aec);
1574*c91a719dSKyungmin Park 
1575*c91a719dSKyungmin Park 			rb = rb_parent(rb);
1576*c91a719dSKyungmin Park 			if (rb) {
1577*c91a719dSKyungmin Park 				if (rb->rb_left == &pe->rb_aec)
1578*c91a719dSKyungmin Park 					rb->rb_left = NULL;
1579*c91a719dSKyungmin Park 				else
1580*c91a719dSKyungmin Park 					rb->rb_right = NULL;
1581*c91a719dSKyungmin Park 			}
1582*c91a719dSKyungmin Park 
1583*c91a719dSKyungmin Park 			kmem_cache_free(ubi_wl_entry_slab, pe->e);
1584*c91a719dSKyungmin Park 			kfree(pe);
1585*c91a719dSKyungmin Park 		}
1586*c91a719dSKyungmin Park 	}
1587*c91a719dSKyungmin Park }
1588*c91a719dSKyungmin Park 
1589*c91a719dSKyungmin Park /**
1590*c91a719dSKyungmin Park  * ubi_wl_close - close the wear-leveling unit.
1591*c91a719dSKyungmin Park  * @ubi: UBI device description object
1592*c91a719dSKyungmin Park  */
1593*c91a719dSKyungmin Park void ubi_wl_close(struct ubi_device *ubi)
1594*c91a719dSKyungmin Park {
1595*c91a719dSKyungmin Park 	dbg_wl("close the UBI wear-leveling unit");
1596*c91a719dSKyungmin Park 
1597*c91a719dSKyungmin Park 	cancel_pending(ubi);
1598*c91a719dSKyungmin Park 	protection_trees_destroy(ubi);
1599*c91a719dSKyungmin Park 	tree_destroy(&ubi->used);
1600*c91a719dSKyungmin Park 	tree_destroy(&ubi->free);
1601*c91a719dSKyungmin Park 	tree_destroy(&ubi->scrub);
1602*c91a719dSKyungmin Park 	kfree(ubi->lookuptbl);
1603*c91a719dSKyungmin Park }
1604*c91a719dSKyungmin Park 
1605*c91a719dSKyungmin Park #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
1606*c91a719dSKyungmin Park 
1607*c91a719dSKyungmin Park /**
1608*c91a719dSKyungmin Park  * paranoid_check_ec - make sure that the erase counter of a physical eraseblock
1609*c91a719dSKyungmin Park  * is correct.
1610*c91a719dSKyungmin Park  * @ubi: UBI device description object
1611*c91a719dSKyungmin Park  * @pnum: the physical eraseblock number to check
1612*c91a719dSKyungmin Park  * @ec: the erase counter to check
1613*c91a719dSKyungmin Park  *
1614*c91a719dSKyungmin Park  * This function returns zero if the erase counter of physical eraseblock @pnum
1615*c91a719dSKyungmin Park  * is equivalent to @ec, %1 if not, and a negative error code if an error
1616*c91a719dSKyungmin Park  * occurred.
1617*c91a719dSKyungmin Park  */
1618*c91a719dSKyungmin Park static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec)
1619*c91a719dSKyungmin Park {
1620*c91a719dSKyungmin Park 	int err;
1621*c91a719dSKyungmin Park 	long long read_ec;
1622*c91a719dSKyungmin Park 	struct ubi_ec_hdr *ec_hdr;
1623*c91a719dSKyungmin Park 
1624*c91a719dSKyungmin Park 	ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
1625*c91a719dSKyungmin Park 	if (!ec_hdr)
1626*c91a719dSKyungmin Park 		return -ENOMEM;
1627*c91a719dSKyungmin Park 
1628*c91a719dSKyungmin Park 	err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
1629*c91a719dSKyungmin Park 	if (err && err != UBI_IO_BITFLIPS) {
1630*c91a719dSKyungmin Park 		/* The header does not have to exist */
1631*c91a719dSKyungmin Park 		err = 0;
1632*c91a719dSKyungmin Park 		goto out_free;
1633*c91a719dSKyungmin Park 	}
1634*c91a719dSKyungmin Park 
1635*c91a719dSKyungmin Park 	read_ec = be64_to_cpu(ec_hdr->ec);
1636*c91a719dSKyungmin Park 	if (ec != read_ec) {
1637*c91a719dSKyungmin Park 		ubi_err("paranoid check failed for PEB %d", pnum);
1638*c91a719dSKyungmin Park 		ubi_err("read EC is %lld, should be %d", read_ec, ec);
1639*c91a719dSKyungmin Park 		ubi_dbg_dump_stack();
1640*c91a719dSKyungmin Park 		err = 1;
1641*c91a719dSKyungmin Park 	} else
1642*c91a719dSKyungmin Park 		err = 0;
1643*c91a719dSKyungmin Park 
1644*c91a719dSKyungmin Park out_free:
1645*c91a719dSKyungmin Park 	kfree(ec_hdr);
1646*c91a719dSKyungmin Park 	return err;
1647*c91a719dSKyungmin Park }
1648*c91a719dSKyungmin Park 
1649*c91a719dSKyungmin Park /**
1650*c91a719dSKyungmin Park  * paranoid_check_in_wl_tree - make sure that a wear-leveling entry is present
1651*c91a719dSKyungmin Park  * in a WL RB-tree.
1652*c91a719dSKyungmin Park  * @e: the wear-leveling entry to check
1653*c91a719dSKyungmin Park  * @root: the root of the tree
1654*c91a719dSKyungmin Park  *
1655*c91a719dSKyungmin Park  * This function returns zero if @e is in the @root RB-tree and %1 if it
1656*c91a719dSKyungmin Park  * is not.
1657*c91a719dSKyungmin Park  */
1658*c91a719dSKyungmin Park static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
1659*c91a719dSKyungmin Park 				     struct rb_root *root)
1660*c91a719dSKyungmin Park {
1661*c91a719dSKyungmin Park 	if (in_wl_tree(e, root))
1662*c91a719dSKyungmin Park 		return 0;
1663*c91a719dSKyungmin Park 
1664*c91a719dSKyungmin Park 	ubi_err("paranoid check failed for PEB %d, EC %d, RB-tree %p ",
1665*c91a719dSKyungmin Park 		e->pnum, e->ec, root);
1666*c91a719dSKyungmin Park 	ubi_dbg_dump_stack();
1667*c91a719dSKyungmin Park 	return 1;
1668*c91a719dSKyungmin Park }
1669*c91a719dSKyungmin Park 
1670*c91a719dSKyungmin Park #endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */
1671