Lines Matching refs:e

133 				 struct ubi_wl_entry *e, struct rb_root *root);
135 struct ubi_wl_entry *e);
145 static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root) in wl_tree_add() argument
156 if (e->ec < e1->ec) in wl_tree_add()
158 else if (e->ec > e1->ec) in wl_tree_add()
161 ubi_assert(e->pnum != e1->pnum); in wl_tree_add()
162 if (e->pnum < e1->pnum) in wl_tree_add()
169 rb_link_node(&e->u.rb, parent, p); in wl_tree_add()
170 rb_insert_color(&e->u.rb, root); in wl_tree_add()
181 static void wl_entry_destroy(struct ubi_device *ubi, struct ubi_wl_entry *e) in wl_entry_destroy() argument
183 ubi->lookuptbl[e->pnum] = NULL; in wl_entry_destroy()
184 kmem_cache_free(ubi_wl_entry_slab, e); in wl_entry_destroy()
242 static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root) in in_wl_tree() argument
252 if (e->pnum == e1->pnum) { in in_wl_tree()
253 ubi_assert(e == e1); in in_wl_tree()
257 if (e->ec < e1->ec) in in_wl_tree()
259 else if (e->ec > e1->ec) in in_wl_tree()
262 ubi_assert(e->pnum != e1->pnum); in in_wl_tree()
263 if (e->pnum < e1->pnum) in in_wl_tree()
283 static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e) in prot_queue_add() argument
290 list_add_tail(&e->u.list, &ubi->pq[pq_tail]); in prot_queue_add()
291 dbg_wl("added PEB %d EC %d to the protection queue", e->pnum, e->ec); in prot_queue_add()
307 struct ubi_wl_entry *e, *prev_e = NULL; in find_wl_entry() local
310 e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb); in find_wl_entry()
311 max = e->ec + diff; in find_wl_entry()
322 prev_e = e; in find_wl_entry()
323 e = e1; in find_wl_entry()
331 !ubi->fm && e->pnum < UBI_FM_MAX_START) in find_wl_entry()
334 return e; in find_wl_entry()
349 struct ubi_wl_entry *e, *first, *last; in find_mean_wl_entry() local
355 e = rb_entry(root->rb_node, struct ubi_wl_entry, u.rb); in find_mean_wl_entry()
360 e = may_reserve_for_fm(ubi, e, root); in find_mean_wl_entry()
362 e = find_wl_entry(ubi, root, WL_FREE_MAX_DIFF/2); in find_mean_wl_entry()
364 return e; in find_mean_wl_entry()
377 struct ubi_wl_entry *e; in wl_get_wle() local
379 e = find_mean_wl_entry(ubi, &ubi->free); in wl_get_wle()
380 if (!e) { in wl_get_wle()
385 self_check_in_wl_tree(ubi, e, &ubi->free); in wl_get_wle()
391 rb_erase(&e->u.rb, &ubi->free); in wl_get_wle()
393 dbg_wl("PEB %d EC %d", e->pnum, e->ec); in wl_get_wle()
395 return e; in wl_get_wle()
408 struct ubi_wl_entry *e; in prot_queue_del() local
410 e = ubi->lookuptbl[pnum]; in prot_queue_del()
411 if (!e) in prot_queue_del()
414 if (self_check_in_pq(ubi, e)) in prot_queue_del()
417 list_del(&e->u.list); in prot_queue_del()
418 dbg_wl("deleted PEB %d from the protection queue", e->pnum); in prot_queue_del()
431 static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, in sync_erase() argument
436 unsigned long long ec = e->ec; in sync_erase()
438 dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec); in sync_erase()
440 err = self_check_ec(ubi, e->pnum, e->ec); in sync_erase()
448 err = ubi_io_sync_erase(ubi, e->pnum, torture); in sync_erase()
459 e->pnum, ec); in sync_erase()
464 dbg_wl("erased PEB %d, new EC %llu", e->pnum, ec); in sync_erase()
468 err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr); in sync_erase()
472 e->ec = ec; in sync_erase()
474 if (e->ec > ubi->max_ec) in sync_erase()
475 ubi->max_ec = e->ec; in sync_erase()
493 struct ubi_wl_entry *e, *tmp; in serve_prot_queue() local
503 list_for_each_entry_safe(e, tmp, &ubi->pq[ubi->pq_head], u.list) { in serve_prot_queue()
505 e->pnum, e->ec); in serve_prot_queue()
507 list_del(&e->u.list); in serve_prot_queue()
508 wl_tree_add(e, &ubi->used); in serve_prot_queue()
604 static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, in schedule_erase() argument
609 ubi_assert(e); in schedule_erase()
612 e->pnum, e->ec, torture); in schedule_erase()
619 wl_wrk->e = e; in schedule_erase()
641 static int do_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, in do_sync_erase() argument
646 dbg_wl("sync erase of PEB %i", e->pnum); in do_sync_erase()
652 wl_wrk->e = e; in do_sync_erase()
1064 struct ubi_wl_entry *e = wl_wrk->e; local
1065 int pnum = e->pnum;
1071 dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
1073 wl_entry_destroy(ubi, e);
1078 pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum);
1080 err = sync_erase(ubi, e, wl_wrk->torture);
1086 wl_tree_add(e, &ubi->free);
1109 err1 = schedule_erase(ubi, e, vol_id, lnum, 0);
1117 wl_entry_destroy(ubi, e);
1203 struct ubi_wl_entry *e; local
1213 e = ubi->lookuptbl[pnum];
1214 if (e == ubi->move_from) {
1227 } else if (e == ubi->move_to) {
1244 if (in_wl_tree(e, &ubi->used)) {
1245 self_check_in_wl_tree(ubi, e, &ubi->used);
1246 rb_erase(&e->u.rb, &ubi->used);
1247 } else if (in_wl_tree(e, &ubi->scrub)) {
1248 self_check_in_wl_tree(ubi, e, &ubi->scrub);
1249 rb_erase(&e->u.rb, &ubi->scrub);
1250 } else if (in_wl_tree(e, &ubi->erroneous)) {
1251 self_check_in_wl_tree(ubi, e, &ubi->erroneous);
1252 rb_erase(&e->u.rb, &ubi->erroneous);
1258 err = prot_queue_del(ubi, e->pnum);
1270 err = schedule_erase(ubi, e, vol_id, lnum, torture);
1273 wl_tree_add(e, &ubi->used);
1293 struct ubi_wl_entry *e; local
1299 e = ubi->lookuptbl[pnum];
1300 if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub) ||
1301 in_wl_tree(e, &ubi->erroneous)) {
1306 if (e == ubi->move_to) {
1319 if (in_wl_tree(e, &ubi->used)) {
1320 self_check_in_wl_tree(ubi, e, &ubi->used);
1321 rb_erase(&e->u.rb, &ubi->used);
1325 err = prot_queue_del(ubi, e->pnum);
1334 wl_tree_add(e, &ubi->scrub);
1415 struct ubi_wl_entry *e; local
1424 e = rb_entry(rb, struct ubi_wl_entry, u.rb);
1428 if (rb->rb_left == &e->u.rb)
1434 wl_entry_destroy(ubi, e);
1534 struct ubi_wl_entry *e; local
1558 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1559 if (!e)
1562 e->pnum = aeb->pnum;
1563 e->ec = aeb->ec;
1564 ubi->lookuptbl[e->pnum] = e;
1565 if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0)) {
1566 wl_entry_destroy(ubi, e);
1576 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1577 if (!e)
1580 e->pnum = aeb->pnum;
1581 e->ec = aeb->ec;
1582 ubi_assert(e->ec >= 0);
1584 wl_tree_add(e, &ubi->free);
1587 ubi->lookuptbl[e->pnum] = e;
1596 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1597 if (!e)
1600 e->pnum = aeb->pnum;
1601 e->ec = aeb->ec;
1602 ubi->lookuptbl[e->pnum] = e;
1606 e->pnum, e->ec);
1607 wl_tree_add(e, &ubi->used);
1610 e->pnum, e->ec);
1611 wl_tree_add(e, &ubi->scrub);
1625 e = ubi->fm->e[i];
1626 ubi->lookuptbl[e->pnum] = e;
1669 struct ubi_wl_entry *e, *tmp; local
1672 list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) {
1673 list_del(&e->u.list);
1674 wl_entry_destroy(ubi, e);
1750 struct ubi_wl_entry *e, struct rb_root *root) argument
1755 if (in_wl_tree(e, root))
1759 e->pnum, e->ec, root);
1773 struct ubi_wl_entry *e) argument
1783 if (p == e)
1787 e->pnum, e->ec);
1794 struct ubi_wl_entry *e; local
1796 e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
1797 self_check_in_wl_tree(ubi, e, &ubi->free);
1800 rb_erase(&e->u.rb, &ubi->free);
1802 return e;
1843 struct ubi_wl_entry *e; local
1866 e = wl_get_wle(ubi);
1867 prot_queue_add(ubi, e);
1870 err = ubi_self_check_all_ff(ubi, e->pnum, ubi->vid_hdr_aloffset,
1873 ubi_err(ubi, "new PEB %d does not contain all 0xFF bytes", e->pnum);
1877 return e->pnum;