xref: /OK3568_Linux_fs/u-boot/drivers/mtd/ubi/fastmap-wl.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /*
2  * Copyright (c) 2012 Linutronix GmbH
3  * Copyright (c) 2014 sigma star gmbh
4  * Author: Richard Weinberger <richard@nod.at>
5  *
6  * SPDX-License-Identifier:	GPL-2.0+
7  *
8  */
9 
10 /**
11  * update_fastmap_work_fn - calls ubi_update_fastmap from a work queue
12  * @wrk: the work description object
13  */
14 #ifndef __UBOOT__
update_fastmap_work_fn(struct work_struct * wrk)15 static void update_fastmap_work_fn(struct work_struct *wrk)
16 #else
17 void update_fastmap_work_fn(struct ubi_device *ubi)
18 #endif
19 {
20 #ifndef __UBOOT__
21 	struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work);
22 #endif
23 
24 	ubi_update_fastmap(ubi);
25 	spin_lock(&ubi->wl_lock);
26 	ubi->fm_work_scheduled = 0;
27 	spin_unlock(&ubi->wl_lock);
28 }
29 
30 /**
31  * find_anchor_wl_entry - find wear-leveling entry to used as anchor PEB.
32  * @root: the RB-tree where to look for
33  */
find_anchor_wl_entry(struct rb_root * root)34 static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root)
35 {
36 	struct rb_node *p;
37 	struct ubi_wl_entry *e, *victim = NULL;
38 	int max_ec = UBI_MAX_ERASECOUNTER;
39 
40 	ubi_rb_for_each_entry(p, e, root, u.rb) {
41 		if (e->pnum < UBI_FM_MAX_START && e->ec < max_ec) {
42 			victim = e;
43 			max_ec = e->ec;
44 		}
45 	}
46 
47 	return victim;
48 }
49 
50 /**
51  * return_unused_pool_pebs - returns unused PEB to the free tree.
52  * @ubi: UBI device description object
53  * @pool: fastmap pool description object
54  */
return_unused_pool_pebs(struct ubi_device * ubi,struct ubi_fm_pool * pool)55 static void return_unused_pool_pebs(struct ubi_device *ubi,
56 				    struct ubi_fm_pool *pool)
57 {
58 	int i;
59 	struct ubi_wl_entry *e;
60 
61 	for (i = pool->used; i < pool->size; i++) {
62 		e = ubi->lookuptbl[pool->pebs[i]];
63 		wl_tree_add(e, &ubi->free);
64 		ubi->free_count++;
65 	}
66 }
67 
anchor_pebs_avalible(struct rb_root * root)68 static int anchor_pebs_avalible(struct rb_root *root)
69 {
70 	struct rb_node *p;
71 	struct ubi_wl_entry *e;
72 
73 	ubi_rb_for_each_entry(p, e, root, u.rb)
74 		if (e->pnum < UBI_FM_MAX_START)
75 			return 1;
76 
77 	return 0;
78 }
79 
80 /**
81  * ubi_wl_get_fm_peb - find a physical erase block with a given maximal number.
82  * @ubi: UBI device description object
83  * @anchor: This PEB will be used as anchor PEB by fastmap
84  *
85  * The function returns a physical erase block with a given maximal number
86  * and removes it from the wl subsystem.
87  * Must be called with wl_lock held!
88  */
ubi_wl_get_fm_peb(struct ubi_device * ubi,int anchor)89 struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor)
90 {
91 	struct ubi_wl_entry *e = NULL;
92 
93 	if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1))
94 		goto out;
95 
96 	if (anchor)
97 		e = find_anchor_wl_entry(&ubi->free);
98 	else
99 		e = find_mean_wl_entry(ubi, &ubi->free);
100 
101 	if (!e)
102 		goto out;
103 
104 	self_check_in_wl_tree(ubi, e, &ubi->free);
105 
106 	/* remove it from the free list,
107 	 * the wl subsystem does no longer know this erase block */
108 	rb_erase(&e->u.rb, &ubi->free);
109 	ubi->free_count--;
110 out:
111 	return e;
112 }
113 
114 /**
115  * ubi_refill_pools - refills all fastmap PEB pools.
116  * @ubi: UBI device description object
117  */
ubi_refill_pools(struct ubi_device * ubi)118 void ubi_refill_pools(struct ubi_device *ubi)
119 {
120 	struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
121 	struct ubi_fm_pool *pool = &ubi->fm_pool;
122 	struct ubi_wl_entry *e;
123 	int enough;
124 
125 	spin_lock(&ubi->wl_lock);
126 
127 	return_unused_pool_pebs(ubi, wl_pool);
128 	return_unused_pool_pebs(ubi, pool);
129 
130 	wl_pool->size = 0;
131 	pool->size = 0;
132 
133 	for (;;) {
134 		enough = 0;
135 		if (pool->size < pool->max_size) {
136 			if (!ubi->free.rb_node)
137 				break;
138 
139 			e = wl_get_wle(ubi);
140 			if (!e)
141 				break;
142 
143 			pool->pebs[pool->size] = e->pnum;
144 			pool->size++;
145 		} else
146 			enough++;
147 
148 		if (wl_pool->size < wl_pool->max_size) {
149 			if (!ubi->free.rb_node ||
150 			   (ubi->free_count - ubi->beb_rsvd_pebs < 5))
151 				break;
152 
153 			e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
154 			self_check_in_wl_tree(ubi, e, &ubi->free);
155 			rb_erase(&e->u.rb, &ubi->free);
156 			ubi->free_count--;
157 
158 			wl_pool->pebs[wl_pool->size] = e->pnum;
159 			wl_pool->size++;
160 		} else
161 			enough++;
162 
163 		if (enough == 2)
164 			break;
165 	}
166 
167 	wl_pool->used = 0;
168 	pool->used = 0;
169 
170 	spin_unlock(&ubi->wl_lock);
171 }
172 
173 /**
174  * produce_free_peb - produce a free physical eraseblock.
175  * @ubi: UBI device description object
176  *
177  * This function tries to make a free PEB by means of synchronous execution of
178  * pending works. This may be needed if, for example the background thread is
179  * disabled. Returns zero in case of success and a negative error code in case
180  * of failure.
181  */
produce_free_peb(struct ubi_device * ubi)182 static int produce_free_peb(struct ubi_device *ubi)
183 {
184 	int err;
185 
186 	while (!ubi->free.rb_node && ubi->works_count) {
187 		dbg_wl("do one work synchronously");
188 		err = do_work(ubi);
189 
190 		if (err)
191 			return err;
192 	}
193 
194 	return 0;
195 }
196 
197 /**
198  * ubi_wl_get_peb - get a physical eraseblock.
199  * @ubi: UBI device description object
200  *
201  * This function returns a physical eraseblock in case of success and a
202  * negative error code in case of failure.
203  * Returns with ubi->fm_eba_sem held in read mode!
204  */
ubi_wl_get_peb(struct ubi_device * ubi)205 int ubi_wl_get_peb(struct ubi_device *ubi)
206 {
207 	int ret, retried = 0;
208 	struct ubi_fm_pool *pool = &ubi->fm_pool;
209 	struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
210 
211 again:
212 	down_read(&ubi->fm_eba_sem);
213 	spin_lock(&ubi->wl_lock);
214 
215 	/* We check here also for the WL pool because at this point we can
216 	 * refill the WL pool synchronous. */
217 	if (pool->used == pool->size || wl_pool->used == wl_pool->size) {
218 		spin_unlock(&ubi->wl_lock);
219 		up_read(&ubi->fm_eba_sem);
220 		ret = ubi_update_fastmap(ubi);
221 		if (ret) {
222 			ubi_msg(ubi, "Unable to write a new fastmap: %i", ret);
223 			down_read(&ubi->fm_eba_sem);
224 			return -ENOSPC;
225 		}
226 		down_read(&ubi->fm_eba_sem);
227 		spin_lock(&ubi->wl_lock);
228 	}
229 
230 	if (pool->used == pool->size) {
231 		spin_unlock(&ubi->wl_lock);
232 		if (retried) {
233 			ubi_err(ubi, "Unable to get a free PEB from user WL pool");
234 			ret = -ENOSPC;
235 			goto out;
236 		}
237 		retried = 1;
238 		up_read(&ubi->fm_eba_sem);
239 		ret = produce_free_peb(ubi);
240 		if (ret < 0) {
241 			down_read(&ubi->fm_eba_sem);
242 			goto out;
243 		}
244 		goto again;
245 	}
246 
247 	ubi_assert(pool->used < pool->size);
248 	ret = pool->pebs[pool->used++];
249 	prot_queue_add(ubi, ubi->lookuptbl[ret]);
250 	spin_unlock(&ubi->wl_lock);
251 out:
252 	return ret;
253 }
254 
255 /* get_peb_for_wl - returns a PEB to be used internally by the WL sub-system.
256  *
257  * @ubi: UBI device description object
258  */
get_peb_for_wl(struct ubi_device * ubi)259 static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
260 {
261 	struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
262 	int pnum;
263 
264 	if (pool->used == pool->size) {
265 #ifndef __UBOOT__
266 		/* We cannot update the fastmap here because this
267 		 * function is called in atomic context.
268 		 * Let's fail here and refill/update it as soon as possible. */
269 		if (!ubi->fm_work_scheduled) {
270 			ubi->fm_work_scheduled = 1;
271 			schedule_work(&ubi->fm_work);
272 		}
273 		return NULL;
274 #else
275 		/*
276 		 * No work queues in U-Boot, we must do this immediately
277 		 */
278 		update_fastmap_work_fn(ubi);
279 #endif
280 	}
281 
282 	pnum = pool->pebs[pool->used++];
283 	return ubi->lookuptbl[pnum];
284 }
285 
286 /**
287  * ubi_ensure_anchor_pebs - schedule wear-leveling to produce an anchor PEB.
288  * @ubi: UBI device description object
289  */
ubi_ensure_anchor_pebs(struct ubi_device * ubi)290 int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
291 {
292 	struct ubi_work *wrk;
293 
294 	spin_lock(&ubi->wl_lock);
295 	if (ubi->wl_scheduled) {
296 		spin_unlock(&ubi->wl_lock);
297 		return 0;
298 	}
299 	ubi->wl_scheduled = 1;
300 	spin_unlock(&ubi->wl_lock);
301 
302 	wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
303 	if (!wrk) {
304 		spin_lock(&ubi->wl_lock);
305 		ubi->wl_scheduled = 0;
306 		spin_unlock(&ubi->wl_lock);
307 		return -ENOMEM;
308 	}
309 
310 	wrk->anchor = 1;
311 	wrk->func = &wear_leveling_worker;
312 	schedule_ubi_work(ubi, wrk);
313 	return 0;
314 }
315 
316 /**
317  * ubi_wl_put_fm_peb - returns a PEB used in a fastmap to the wear-leveling
318  * sub-system.
319  * see: ubi_wl_put_peb()
320  *
321  * @ubi: UBI device description object
322  * @fm_e: physical eraseblock to return
323  * @lnum: the last used logical eraseblock number for the PEB
324  * @torture: if this physical eraseblock has to be tortured
325  */
ubi_wl_put_fm_peb(struct ubi_device * ubi,struct ubi_wl_entry * fm_e,int lnum,int torture)326 int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e,
327 		      int lnum, int torture)
328 {
329 	struct ubi_wl_entry *e;
330 	int vol_id, pnum = fm_e->pnum;
331 
332 	dbg_wl("PEB %d", pnum);
333 
334 	ubi_assert(pnum >= 0);
335 	ubi_assert(pnum < ubi->peb_count);
336 
337 	spin_lock(&ubi->wl_lock);
338 	e = ubi->lookuptbl[pnum];
339 
340 	/* This can happen if we recovered from a fastmap the very
341 	 * first time and writing now a new one. In this case the wl system
342 	 * has never seen any PEB used by the original fastmap.
343 	 */
344 	if (!e) {
345 		e = fm_e;
346 		ubi_assert(e->ec >= 0);
347 		ubi->lookuptbl[pnum] = e;
348 	}
349 
350 	spin_unlock(&ubi->wl_lock);
351 
352 	vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID;
353 	return schedule_erase(ubi, e, vol_id, lnum, torture);
354 }
355 
356 /**
357  * ubi_is_erase_work - checks whether a work is erase work.
358  * @wrk: The work object to be checked
359  */
ubi_is_erase_work(struct ubi_work * wrk)360 int ubi_is_erase_work(struct ubi_work *wrk)
361 {
362 	return wrk->func == erase_worker;
363 }
364 
ubi_fastmap_close(struct ubi_device * ubi)365 static void ubi_fastmap_close(struct ubi_device *ubi)
366 {
367 	int i;
368 
369 	return_unused_pool_pebs(ubi, &ubi->fm_pool);
370 	return_unused_pool_pebs(ubi, &ubi->fm_wl_pool);
371 
372 	if (ubi->fm) {
373 		for (i = 0; i < ubi->fm->used_blocks; i++)
374 			kfree(ubi->fm->e[i]);
375 	}
376 	kfree(ubi->fm);
377 }
378 
379 /**
380  * may_reserve_for_fm - tests whether a PEB shall be reserved for fastmap.
381  * See find_mean_wl_entry()
382  *
383  * @ubi: UBI device description object
384  * @e: physical eraseblock to return
385  * @root: RB tree to test against.
386  */
may_reserve_for_fm(struct ubi_device * ubi,struct ubi_wl_entry * e,struct rb_root * root)387 static struct ubi_wl_entry *may_reserve_for_fm(struct ubi_device *ubi,
388 					   struct ubi_wl_entry *e,
389 					   struct rb_root *root) {
390 	if (e && !ubi->fm_disabled && !ubi->fm &&
391 	    e->pnum < UBI_FM_MAX_START)
392 		e = rb_entry(rb_next(root->rb_node),
393 			     struct ubi_wl_entry, u.rb);
394 
395 	return e;
396 }
397