xref: /OK3568_Linux_fs/kernel/drivers/mtd/ubi/fastmap-wl.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (c) 2012 Linutronix GmbH
4*4882a593Smuzhiyun  * Copyright (c) 2014 sigma star gmbh
5*4882a593Smuzhiyun  * Author: Richard Weinberger <richard@nod.at>
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun /**
9*4882a593Smuzhiyun  * update_fastmap_work_fn - calls ubi_update_fastmap from a work queue
10*4882a593Smuzhiyun  * @wrk: the work description object
11*4882a593Smuzhiyun  */
update_fastmap_work_fn(struct work_struct * wrk)12*4882a593Smuzhiyun static void update_fastmap_work_fn(struct work_struct *wrk)
13*4882a593Smuzhiyun {
14*4882a593Smuzhiyun 	struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work);
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun 	ubi_update_fastmap(ubi);
17*4882a593Smuzhiyun 	spin_lock(&ubi->wl_lock);
18*4882a593Smuzhiyun 	ubi->fm_work_scheduled = 0;
19*4882a593Smuzhiyun 	spin_unlock(&ubi->wl_lock);
20*4882a593Smuzhiyun }
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun /**
23*4882a593Smuzhiyun  * find_anchor_wl_entry - find wear-leveling entry to used as anchor PEB.
24*4882a593Smuzhiyun  * @root: the RB-tree where to look for
25*4882a593Smuzhiyun  */
find_anchor_wl_entry(struct rb_root * root)26*4882a593Smuzhiyun static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root)
27*4882a593Smuzhiyun {
28*4882a593Smuzhiyun 	struct rb_node *p;
29*4882a593Smuzhiyun 	struct ubi_wl_entry *e, *victim = NULL;
30*4882a593Smuzhiyun 	int max_ec = UBI_MAX_ERASECOUNTER;
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun 	ubi_rb_for_each_entry(p, e, root, u.rb) {
33*4882a593Smuzhiyun 		if (e->pnum < UBI_FM_MAX_START && e->ec < max_ec) {
34*4882a593Smuzhiyun 			victim = e;
35*4882a593Smuzhiyun 			max_ec = e->ec;
36*4882a593Smuzhiyun 		}
37*4882a593Smuzhiyun 	}
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun 	return victim;
40*4882a593Smuzhiyun }
41*4882a593Smuzhiyun 
return_unused_peb(struct ubi_device * ubi,struct ubi_wl_entry * e)42*4882a593Smuzhiyun static inline void return_unused_peb(struct ubi_device *ubi,
43*4882a593Smuzhiyun 				     struct ubi_wl_entry *e)
44*4882a593Smuzhiyun {
45*4882a593Smuzhiyun 	wl_tree_add(e, &ubi->free);
46*4882a593Smuzhiyun 	ubi->free_count++;
47*4882a593Smuzhiyun }
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun /**
50*4882a593Smuzhiyun  * return_unused_pool_pebs - returns unused PEB to the free tree.
51*4882a593Smuzhiyun  * @ubi: UBI device description object
52*4882a593Smuzhiyun  * @pool: fastmap pool description object
53*4882a593Smuzhiyun  */
return_unused_pool_pebs(struct ubi_device * ubi,struct ubi_fm_pool * pool)54*4882a593Smuzhiyun static void return_unused_pool_pebs(struct ubi_device *ubi,
55*4882a593Smuzhiyun 				    struct ubi_fm_pool *pool)
56*4882a593Smuzhiyun {
57*4882a593Smuzhiyun 	int i;
58*4882a593Smuzhiyun 	struct ubi_wl_entry *e;
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun 	for (i = pool->used; i < pool->size; i++) {
61*4882a593Smuzhiyun 		e = ubi->lookuptbl[pool->pebs[i]];
62*4882a593Smuzhiyun 		return_unused_peb(ubi, e);
63*4882a593Smuzhiyun 	}
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun /**
67*4882a593Smuzhiyun  * ubi_wl_get_fm_peb - find a physical erase block with a given maximal number.
68*4882a593Smuzhiyun  * @ubi: UBI device description object
69*4882a593Smuzhiyun  * @anchor: This PEB will be used as anchor PEB by fastmap
70*4882a593Smuzhiyun  *
71*4882a593Smuzhiyun  * The function returns a physical erase block with a given maximal number
72*4882a593Smuzhiyun  * and removes it from the wl subsystem.
73*4882a593Smuzhiyun  * Must be called with wl_lock held!
74*4882a593Smuzhiyun  */
ubi_wl_get_fm_peb(struct ubi_device * ubi,int anchor)75*4882a593Smuzhiyun struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun 	struct ubi_wl_entry *e = NULL;
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 	if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1))
80*4882a593Smuzhiyun 		goto out;
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 	if (anchor)
83*4882a593Smuzhiyun 		e = find_anchor_wl_entry(&ubi->free);
84*4882a593Smuzhiyun 	else
85*4882a593Smuzhiyun 		e = find_mean_wl_entry(ubi, &ubi->free);
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	if (!e)
88*4882a593Smuzhiyun 		goto out;
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	self_check_in_wl_tree(ubi, e, &ubi->free);
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	/* remove it from the free list,
93*4882a593Smuzhiyun 	 * the wl subsystem does no longer know this erase block */
94*4882a593Smuzhiyun 	rb_erase(&e->u.rb, &ubi->free);
95*4882a593Smuzhiyun 	ubi->free_count--;
96*4882a593Smuzhiyun out:
97*4882a593Smuzhiyun 	return e;
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun /*
101*4882a593Smuzhiyun  * has_enough_free_count - whether ubi has enough free pebs to fill fm pools
102*4882a593Smuzhiyun  * @ubi: UBI device description object
103*4882a593Smuzhiyun  * @is_wl_pool: whether UBI is filling wear leveling pool
104*4882a593Smuzhiyun  *
105*4882a593Smuzhiyun  * This helper function checks whether there are enough free pebs (deducted
106*4882a593Smuzhiyun  * by fastmap pebs) to fill fm_pool and fm_wl_pool, above rule works after
107*4882a593Smuzhiyun  * there is at least one of free pebs is filled into fm_wl_pool.
108*4882a593Smuzhiyun  * For wear leveling pool, UBI should also reserve free pebs for bad pebs
109*4882a593Smuzhiyun  * handling, because there maybe no enough free pebs for user volumes after
110*4882a593Smuzhiyun  * producing new bad pebs.
111*4882a593Smuzhiyun  */
has_enough_free_count(struct ubi_device * ubi,bool is_wl_pool)112*4882a593Smuzhiyun static bool has_enough_free_count(struct ubi_device *ubi, bool is_wl_pool)
113*4882a593Smuzhiyun {
114*4882a593Smuzhiyun 	int fm_used = 0;	// fastmap non anchor pebs.
115*4882a593Smuzhiyun 	int beb_rsvd_pebs;
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 	if (!ubi->free.rb_node)
118*4882a593Smuzhiyun 		return false;
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	beb_rsvd_pebs = is_wl_pool ? ubi->beb_rsvd_pebs : 0;
121*4882a593Smuzhiyun 	if (ubi->fm_wl_pool.size > 0 && !(ubi->ro_mode || ubi->fm_disabled))
122*4882a593Smuzhiyun 		fm_used = ubi->fm_size / ubi->leb_size - 1;
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	return ubi->free_count - beb_rsvd_pebs > fm_used;
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun /**
128*4882a593Smuzhiyun  * ubi_refill_pools - refills all fastmap PEB pools.
129*4882a593Smuzhiyun  * @ubi: UBI device description object
130*4882a593Smuzhiyun  */
ubi_refill_pools(struct ubi_device * ubi)131*4882a593Smuzhiyun void ubi_refill_pools(struct ubi_device *ubi)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun 	struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
134*4882a593Smuzhiyun 	struct ubi_fm_pool *pool = &ubi->fm_pool;
135*4882a593Smuzhiyun 	struct ubi_wl_entry *e;
136*4882a593Smuzhiyun 	int enough;
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 	spin_lock(&ubi->wl_lock);
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	return_unused_pool_pebs(ubi, wl_pool);
141*4882a593Smuzhiyun 	return_unused_pool_pebs(ubi, pool);
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	wl_pool->size = 0;
144*4882a593Smuzhiyun 	pool->size = 0;
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	if (ubi->fm_anchor) {
147*4882a593Smuzhiyun 		wl_tree_add(ubi->fm_anchor, &ubi->free);
148*4882a593Smuzhiyun 		ubi->free_count++;
149*4882a593Smuzhiyun 	}
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	/*
152*4882a593Smuzhiyun 	 * All available PEBs are in ubi->free, now is the time to get
153*4882a593Smuzhiyun 	 * the best anchor PEBs.
154*4882a593Smuzhiyun 	 */
155*4882a593Smuzhiyun 	ubi->fm_anchor = ubi_wl_get_fm_peb(ubi, 1);
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 	for (;;) {
158*4882a593Smuzhiyun 		enough = 0;
159*4882a593Smuzhiyun 		if (pool->size < pool->max_size) {
160*4882a593Smuzhiyun 			if (!has_enough_free_count(ubi, false))
161*4882a593Smuzhiyun 				break;
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 			e = wl_get_wle(ubi);
164*4882a593Smuzhiyun 			if (!e)
165*4882a593Smuzhiyun 				break;
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun 			pool->pebs[pool->size] = e->pnum;
168*4882a593Smuzhiyun 			pool->size++;
169*4882a593Smuzhiyun 		} else
170*4882a593Smuzhiyun 			enough++;
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 		if (wl_pool->size < wl_pool->max_size) {
173*4882a593Smuzhiyun 			if (!has_enough_free_count(ubi, true))
174*4882a593Smuzhiyun 				break;
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 			e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
177*4882a593Smuzhiyun 			self_check_in_wl_tree(ubi, e, &ubi->free);
178*4882a593Smuzhiyun 			rb_erase(&e->u.rb, &ubi->free);
179*4882a593Smuzhiyun 			ubi->free_count--;
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 			wl_pool->pebs[wl_pool->size] = e->pnum;
182*4882a593Smuzhiyun 			wl_pool->size++;
183*4882a593Smuzhiyun 		} else
184*4882a593Smuzhiyun 			enough++;
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 		if (enough == 2)
187*4882a593Smuzhiyun 			break;
188*4882a593Smuzhiyun 	}
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 	wl_pool->used = 0;
191*4882a593Smuzhiyun 	pool->used = 0;
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 	spin_unlock(&ubi->wl_lock);
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun /**
197*4882a593Smuzhiyun  * produce_free_peb - produce a free physical eraseblock.
198*4882a593Smuzhiyun  * @ubi: UBI device description object
199*4882a593Smuzhiyun  *
200*4882a593Smuzhiyun  * This function tries to make a free PEB by means of synchronous execution of
201*4882a593Smuzhiyun  * pending works. This may be needed if, for example the background thread is
202*4882a593Smuzhiyun  * disabled. Returns zero in case of success and a negative error code in case
203*4882a593Smuzhiyun  * of failure.
204*4882a593Smuzhiyun  */
produce_free_peb(struct ubi_device * ubi)205*4882a593Smuzhiyun static int produce_free_peb(struct ubi_device *ubi)
206*4882a593Smuzhiyun {
207*4882a593Smuzhiyun 	int err;
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	while (!ubi->free.rb_node && ubi->works_count) {
210*4882a593Smuzhiyun 		dbg_wl("do one work synchronously");
211*4882a593Smuzhiyun 		err = do_work(ubi);
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 		if (err)
214*4882a593Smuzhiyun 			return err;
215*4882a593Smuzhiyun 	}
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 	return 0;
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun /**
221*4882a593Smuzhiyun  * ubi_wl_get_peb - get a physical eraseblock.
222*4882a593Smuzhiyun  * @ubi: UBI device description object
223*4882a593Smuzhiyun  *
224*4882a593Smuzhiyun  * This function returns a physical eraseblock in case of success and a
225*4882a593Smuzhiyun  * negative error code in case of failure.
226*4882a593Smuzhiyun  * Returns with ubi->fm_eba_sem held in read mode!
227*4882a593Smuzhiyun  */
ubi_wl_get_peb(struct ubi_device * ubi)228*4882a593Smuzhiyun int ubi_wl_get_peb(struct ubi_device *ubi)
229*4882a593Smuzhiyun {
230*4882a593Smuzhiyun 	int ret, attempts = 0;
231*4882a593Smuzhiyun 	struct ubi_fm_pool *pool = &ubi->fm_pool;
232*4882a593Smuzhiyun 	struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun again:
235*4882a593Smuzhiyun 	down_read(&ubi->fm_eba_sem);
236*4882a593Smuzhiyun 	spin_lock(&ubi->wl_lock);
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	/* We check here also for the WL pool because at this point we can
239*4882a593Smuzhiyun 	 * refill the WL pool synchronous. */
240*4882a593Smuzhiyun 	if (pool->used == pool->size || wl_pool->used == wl_pool->size) {
241*4882a593Smuzhiyun 		spin_unlock(&ubi->wl_lock);
242*4882a593Smuzhiyun 		up_read(&ubi->fm_eba_sem);
243*4882a593Smuzhiyun 		ret = ubi_update_fastmap(ubi);
244*4882a593Smuzhiyun 		if (ret) {
245*4882a593Smuzhiyun 			ubi_msg(ubi, "Unable to write a new fastmap: %i", ret);
246*4882a593Smuzhiyun 			down_read(&ubi->fm_eba_sem);
247*4882a593Smuzhiyun 			return -ENOSPC;
248*4882a593Smuzhiyun 		}
249*4882a593Smuzhiyun 		down_read(&ubi->fm_eba_sem);
250*4882a593Smuzhiyun 		spin_lock(&ubi->wl_lock);
251*4882a593Smuzhiyun 	}
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 	if (pool->used == pool->size) {
254*4882a593Smuzhiyun 		spin_unlock(&ubi->wl_lock);
255*4882a593Smuzhiyun 		attempts++;
256*4882a593Smuzhiyun 		if (attempts == 10) {
257*4882a593Smuzhiyun 			ubi_err(ubi, "Unable to get a free PEB from user WL pool");
258*4882a593Smuzhiyun 			ret = -ENOSPC;
259*4882a593Smuzhiyun 			goto out;
260*4882a593Smuzhiyun 		}
261*4882a593Smuzhiyun 		up_read(&ubi->fm_eba_sem);
262*4882a593Smuzhiyun 		ret = produce_free_peb(ubi);
263*4882a593Smuzhiyun 		if (ret < 0) {
264*4882a593Smuzhiyun 			down_read(&ubi->fm_eba_sem);
265*4882a593Smuzhiyun 			goto out;
266*4882a593Smuzhiyun 		}
267*4882a593Smuzhiyun 		goto again;
268*4882a593Smuzhiyun 	}
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun 	ubi_assert(pool->used < pool->size);
271*4882a593Smuzhiyun 	ret = pool->pebs[pool->used++];
272*4882a593Smuzhiyun 	prot_queue_add(ubi, ubi->lookuptbl[ret]);
273*4882a593Smuzhiyun 	spin_unlock(&ubi->wl_lock);
274*4882a593Smuzhiyun out:
275*4882a593Smuzhiyun 	return ret;
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun /* get_peb_for_wl - returns a PEB to be used internally by the WL sub-system.
279*4882a593Smuzhiyun  *
280*4882a593Smuzhiyun  * @ubi: UBI device description object
281*4882a593Smuzhiyun  */
get_peb_for_wl(struct ubi_device * ubi)282*4882a593Smuzhiyun static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
283*4882a593Smuzhiyun {
284*4882a593Smuzhiyun 	struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
285*4882a593Smuzhiyun 	int pnum;
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 	ubi_assert(rwsem_is_locked(&ubi->fm_eba_sem));
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 	if (pool->used == pool->size) {
290*4882a593Smuzhiyun 		/* We cannot update the fastmap here because this
291*4882a593Smuzhiyun 		 * function is called in atomic context.
292*4882a593Smuzhiyun 		 * Let's fail here and refill/update it as soon as possible. */
293*4882a593Smuzhiyun 		if (!ubi->fm_work_scheduled) {
294*4882a593Smuzhiyun 			ubi->fm_work_scheduled = 1;
295*4882a593Smuzhiyun 			schedule_work(&ubi->fm_work);
296*4882a593Smuzhiyun 		}
297*4882a593Smuzhiyun 		return NULL;
298*4882a593Smuzhiyun 	}
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	pnum = pool->pebs[pool->used++];
301*4882a593Smuzhiyun 	return ubi->lookuptbl[pnum];
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun /**
305*4882a593Smuzhiyun  * ubi_ensure_anchor_pebs - schedule wear-leveling to produce an anchor PEB.
306*4882a593Smuzhiyun  * @ubi: UBI device description object
307*4882a593Smuzhiyun  */
ubi_ensure_anchor_pebs(struct ubi_device * ubi)308*4882a593Smuzhiyun int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
309*4882a593Smuzhiyun {
310*4882a593Smuzhiyun 	struct ubi_work *wrk;
311*4882a593Smuzhiyun 	struct ubi_wl_entry *anchor;
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 	spin_lock(&ubi->wl_lock);
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun 	/* Do we already have an anchor? */
316*4882a593Smuzhiyun 	if (ubi->fm_anchor) {
317*4882a593Smuzhiyun 		spin_unlock(&ubi->wl_lock);
318*4882a593Smuzhiyun 		return 0;
319*4882a593Smuzhiyun 	}
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 	/* See if we can find an anchor PEB on the list of free PEBs */
322*4882a593Smuzhiyun 	anchor = ubi_wl_get_fm_peb(ubi, 1);
323*4882a593Smuzhiyun 	if (anchor) {
324*4882a593Smuzhiyun 		ubi->fm_anchor = anchor;
325*4882a593Smuzhiyun 		spin_unlock(&ubi->wl_lock);
326*4882a593Smuzhiyun 		return 0;
327*4882a593Smuzhiyun 	}
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun 	ubi->fm_do_produce_anchor = 1;
330*4882a593Smuzhiyun 	/* No luck, trigger wear leveling to produce a new anchor PEB. */
331*4882a593Smuzhiyun 	if (ubi->wl_scheduled) {
332*4882a593Smuzhiyun 		spin_unlock(&ubi->wl_lock);
333*4882a593Smuzhiyun 		return 0;
334*4882a593Smuzhiyun 	}
335*4882a593Smuzhiyun 	ubi->wl_scheduled = 1;
336*4882a593Smuzhiyun 	spin_unlock(&ubi->wl_lock);
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 	wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
339*4882a593Smuzhiyun 	if (!wrk) {
340*4882a593Smuzhiyun 		spin_lock(&ubi->wl_lock);
341*4882a593Smuzhiyun 		ubi->wl_scheduled = 0;
342*4882a593Smuzhiyun 		spin_unlock(&ubi->wl_lock);
343*4882a593Smuzhiyun 		return -ENOMEM;
344*4882a593Smuzhiyun 	}
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun 	wrk->func = &wear_leveling_worker;
347*4882a593Smuzhiyun 	__schedule_ubi_work(ubi, wrk);
348*4882a593Smuzhiyun 	return 0;
349*4882a593Smuzhiyun }
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun /**
352*4882a593Smuzhiyun  * ubi_wl_put_fm_peb - returns a PEB used in a fastmap to the wear-leveling
353*4882a593Smuzhiyun  * sub-system.
354*4882a593Smuzhiyun  * see: ubi_wl_put_peb()
355*4882a593Smuzhiyun  *
356*4882a593Smuzhiyun  * @ubi: UBI device description object
357*4882a593Smuzhiyun  * @fm_e: physical eraseblock to return
358*4882a593Smuzhiyun  * @lnum: the last used logical eraseblock number for the PEB
359*4882a593Smuzhiyun  * @torture: if this physical eraseblock has to be tortured
360*4882a593Smuzhiyun  */
ubi_wl_put_fm_peb(struct ubi_device * ubi,struct ubi_wl_entry * fm_e,int lnum,int torture)361*4882a593Smuzhiyun int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e,
362*4882a593Smuzhiyun 		      int lnum, int torture)
363*4882a593Smuzhiyun {
364*4882a593Smuzhiyun 	struct ubi_wl_entry *e;
365*4882a593Smuzhiyun 	int vol_id, pnum = fm_e->pnum;
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun 	dbg_wl("PEB %d", pnum);
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun 	ubi_assert(pnum >= 0);
370*4882a593Smuzhiyun 	ubi_assert(pnum < ubi->peb_count);
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 	spin_lock(&ubi->wl_lock);
373*4882a593Smuzhiyun 	e = ubi->lookuptbl[pnum];
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun 	/* This can happen if we recovered from a fastmap the very
376*4882a593Smuzhiyun 	 * first time and writing now a new one. In this case the wl system
377*4882a593Smuzhiyun 	 * has never seen any PEB used by the original fastmap.
378*4882a593Smuzhiyun 	 */
379*4882a593Smuzhiyun 	if (!e) {
380*4882a593Smuzhiyun 		e = fm_e;
381*4882a593Smuzhiyun 		ubi_assert(e->ec >= 0);
382*4882a593Smuzhiyun 		ubi->lookuptbl[pnum] = e;
383*4882a593Smuzhiyun 	}
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 	spin_unlock(&ubi->wl_lock);
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun 	vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID;
388*4882a593Smuzhiyun 	return schedule_erase(ubi, e, vol_id, lnum, torture, true);
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun /**
392*4882a593Smuzhiyun  * ubi_is_erase_work - checks whether a work is erase work.
393*4882a593Smuzhiyun  * @wrk: The work object to be checked
394*4882a593Smuzhiyun  */
ubi_is_erase_work(struct ubi_work * wrk)395*4882a593Smuzhiyun int ubi_is_erase_work(struct ubi_work *wrk)
396*4882a593Smuzhiyun {
397*4882a593Smuzhiyun 	return wrk->func == erase_worker;
398*4882a593Smuzhiyun }
399*4882a593Smuzhiyun 
ubi_fastmap_close(struct ubi_device * ubi)400*4882a593Smuzhiyun static void ubi_fastmap_close(struct ubi_device *ubi)
401*4882a593Smuzhiyun {
402*4882a593Smuzhiyun 	int i;
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun 	return_unused_pool_pebs(ubi, &ubi->fm_pool);
405*4882a593Smuzhiyun 	return_unused_pool_pebs(ubi, &ubi->fm_wl_pool);
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun 	if (ubi->fm_anchor) {
408*4882a593Smuzhiyun 		return_unused_peb(ubi, ubi->fm_anchor);
409*4882a593Smuzhiyun 		ubi->fm_anchor = NULL;
410*4882a593Smuzhiyun 	}
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun 	if (ubi->fm) {
413*4882a593Smuzhiyun 		for (i = 0; i < ubi->fm->used_blocks; i++)
414*4882a593Smuzhiyun 			kfree(ubi->fm->e[i]);
415*4882a593Smuzhiyun 	}
416*4882a593Smuzhiyun 	kfree(ubi->fm);
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun /**
420*4882a593Smuzhiyun  * may_reserve_for_fm - tests whether a PEB shall be reserved for fastmap.
421*4882a593Smuzhiyun  * See find_mean_wl_entry()
422*4882a593Smuzhiyun  *
423*4882a593Smuzhiyun  * @ubi: UBI device description object
424*4882a593Smuzhiyun  * @e: physical eraseblock to return
425*4882a593Smuzhiyun  * @root: RB tree to test against.
426*4882a593Smuzhiyun  */
may_reserve_for_fm(struct ubi_device * ubi,struct ubi_wl_entry * e,struct rb_root * root)427*4882a593Smuzhiyun static struct ubi_wl_entry *may_reserve_for_fm(struct ubi_device *ubi,
428*4882a593Smuzhiyun 					   struct ubi_wl_entry *e,
429*4882a593Smuzhiyun 					   struct rb_root *root) {
430*4882a593Smuzhiyun 	if (e && !ubi->fm_disabled && !ubi->fm &&
431*4882a593Smuzhiyun 	    e->pnum < UBI_FM_MAX_START)
432*4882a593Smuzhiyun 		e = rb_entry(rb_next(root->rb_node),
433*4882a593Smuzhiyun 			     struct ubi_wl_entry, u.rb);
434*4882a593Smuzhiyun 
435*4882a593Smuzhiyun 	return e;
436*4882a593Smuzhiyun }
437