Lines Matching refs:ubi
131 static int self_check_ec(struct ubi_device *ubi, int pnum, int ec);
132 static int self_check_in_wl_tree(const struct ubi_device *ubi,
134 static int self_check_in_pq(const struct ubi_device *ubi,
181 static void wl_entry_destroy(struct ubi_device *ubi, struct ubi_wl_entry *e) in wl_entry_destroy() argument
183 ubi->lookuptbl[e->pnum] = NULL; in wl_entry_destroy()
194 static int do_work(struct ubi_device *ubi) in do_work() argument
207 down_read(&ubi->work_sem); in do_work()
208 spin_lock(&ubi->wl_lock); in do_work()
209 if (list_empty(&ubi->works)) { in do_work()
210 spin_unlock(&ubi->wl_lock); in do_work()
211 up_read(&ubi->work_sem); in do_work()
215 wrk = list_entry(ubi->works.next, struct ubi_work, list); in do_work()
217 ubi->works_count -= 1; in do_work()
218 ubi_assert(ubi->works_count >= 0); in do_work()
219 spin_unlock(&ubi->wl_lock); in do_work()
226 err = wrk->func(ubi, wrk, 0); in do_work()
228 ubi_err(ubi, "work failed with error code %d", err); in do_work()
229 up_read(&ubi->work_sem); in do_work()
283 static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e) in prot_queue_add() argument
285 int pq_tail = ubi->pq_head - 1; in prot_queue_add()
290 list_add_tail(&e->u.list, &ubi->pq[pq_tail]); in prot_queue_add()
303 static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi, in find_wl_entry() argument
330 if (prev_e && !ubi->fm_disabled && in find_wl_entry()
331 !ubi->fm && e->pnum < UBI_FM_MAX_START) in find_wl_entry()
346 static struct ubi_wl_entry *find_mean_wl_entry(struct ubi_device *ubi, in find_mean_wl_entry() argument
360 e = may_reserve_for_fm(ubi, e, root); in find_mean_wl_entry()
362 e = find_wl_entry(ubi, root, WL_FREE_MAX_DIFF/2); in find_mean_wl_entry()
375 static struct ubi_wl_entry *wl_get_wle(struct ubi_device *ubi) in wl_get_wle() argument
379 e = find_mean_wl_entry(ubi, &ubi->free); in wl_get_wle()
381 ubi_err(ubi, "no free eraseblocks"); in wl_get_wle()
385 self_check_in_wl_tree(ubi, e, &ubi->free); in wl_get_wle()
391 rb_erase(&e->u.rb, &ubi->free); in wl_get_wle()
392 ubi->free_count--; in wl_get_wle()
406 static int prot_queue_del(struct ubi_device *ubi, int pnum) in prot_queue_del() argument
410 e = ubi->lookuptbl[pnum]; in prot_queue_del()
414 if (self_check_in_pq(ubi, e)) in prot_queue_del()
431 static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, in sync_erase() argument
440 err = self_check_ec(ubi, e->pnum, e->ec); in sync_erase()
444 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS); in sync_erase()
448 err = ubi_io_sync_erase(ubi, e->pnum, torture); in sync_erase()
458 ubi_err(ubi, "erase counter overflow at PEB %d, EC %llu", in sync_erase()
468 err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr); in sync_erase()
473 spin_lock(&ubi->wl_lock); in sync_erase()
474 if (e->ec > ubi->max_ec) in sync_erase()
475 ubi->max_ec = e->ec; in sync_erase()
476 spin_unlock(&ubi->wl_lock); in sync_erase()
491 static void serve_prot_queue(struct ubi_device *ubi) in serve_prot_queue() argument
502 spin_lock(&ubi->wl_lock); in serve_prot_queue()
503 list_for_each_entry_safe(e, tmp, &ubi->pq[ubi->pq_head], u.list) { in serve_prot_queue()
508 wl_tree_add(e, &ubi->used); in serve_prot_queue()
514 spin_unlock(&ubi->wl_lock); in serve_prot_queue()
520 ubi->pq_head += 1; in serve_prot_queue()
521 if (ubi->pq_head == UBI_PROT_QUEUE_LEN) in serve_prot_queue()
522 ubi->pq_head = 0; in serve_prot_queue()
523 ubi_assert(ubi->pq_head >= 0 && ubi->pq_head < UBI_PROT_QUEUE_LEN); in serve_prot_queue()
524 spin_unlock(&ubi->wl_lock); in serve_prot_queue()
528 void ubi_do_worker(struct ubi_device *ubi) in ubi_do_worker() argument
532 if (list_empty(&ubi->works) || ubi->ro_mode || in ubi_do_worker()
533 !ubi->thread_enabled || ubi_dbg_is_bgt_disabled(ubi)) in ubi_do_worker()
536 spin_lock(&ubi->wl_lock); in ubi_do_worker()
537 while (!list_empty(&ubi->works)) { in ubi_do_worker()
542 spin_unlock(&ubi->wl_lock); in ubi_do_worker()
543 err = do_work(ubi); in ubi_do_worker()
544 spin_lock(&ubi->wl_lock); in ubi_do_worker()
546 ubi_err(ubi, "%s: work failed with error code %d", in ubi_do_worker()
547 ubi->bgt_name, err); in ubi_do_worker()
550 spin_unlock(&ubi->wl_lock); in ubi_do_worker()
562 static void __schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk) in __schedule_ubi_work() argument
564 spin_lock(&ubi->wl_lock); in __schedule_ubi_work()
565 list_add_tail(&wrk->list, &ubi->works); in __schedule_ubi_work()
566 ubi_assert(ubi->works_count >= 0); in __schedule_ubi_work()
567 ubi->works_count += 1; in __schedule_ubi_work()
569 if (ubi->thread_enabled && !ubi_dbg_is_bgt_disabled(ubi)) in __schedule_ubi_work()
570 wake_up_process(ubi->bgt_thread); in __schedule_ubi_work()
572 spin_unlock(&ubi->wl_lock); in __schedule_ubi_work()
583 static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk) in schedule_ubi_work() argument
585 down_read(&ubi->work_sem); in schedule_ubi_work()
586 __schedule_ubi_work(ubi, wrk); in schedule_ubi_work()
587 up_read(&ubi->work_sem); in schedule_ubi_work()
590 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
604 static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, in schedule_erase() argument
624 schedule_ubi_work(ubi, wl_wrk); in schedule_erase()
627 ubi_do_worker(ubi); in schedule_erase()
641 static int do_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, in do_sync_erase() argument
657 return erase_worker(ubi, wl_wrk, 0); in do_sync_erase()
671 static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, argument
686 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
690 mutex_lock(&ubi->move_mutex);
691 spin_lock(&ubi->wl_lock);
692 ubi_assert(!ubi->move_from && !ubi->move_to);
693 ubi_assert(!ubi->move_to_put);
695 if (!ubi->free.rb_node ||
696 (!ubi->used.rb_node && !ubi->scrub.rb_node)) {
708 !ubi->free.rb_node, !ubi->used.rb_node);
715 anchor = !anchor_pebs_avalible(&ubi->free);
718 e1 = find_anchor_wl_entry(&ubi->used);
721 e2 = get_peb_for_wl(ubi);
725 self_check_in_wl_tree(ubi, e1, &ubi->used);
726 rb_erase(&e1->u.rb, &ubi->used);
728 } else if (!ubi->scrub.rb_node) {
730 if (!ubi->scrub.rb_node) {
737 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
738 e2 = get_peb_for_wl(ubi);
747 wl_tree_add(e2, &ubi->free);
748 ubi->free_count++;
751 self_check_in_wl_tree(ubi, e1, &ubi->used);
752 rb_erase(&e1->u.rb, &ubi->used);
758 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
759 e2 = get_peb_for_wl(ubi);
763 self_check_in_wl_tree(ubi, e1, &ubi->scrub);
764 rb_erase(&e1->u.rb, &ubi->scrub);
768 ubi->move_from = e1;
769 ubi->move_to = e2;
770 spin_unlock(&ubi->wl_lock);
783 err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0);
811 ubi_err(ubi, "error %d while reading VID header from PEB %d",
819 err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr);
854 if (ubi->erroneous_peb_count > ubi->max_erroneous) {
855 ubi_err(ubi, "too many erroneous eraseblocks (%d)",
856 ubi->erroneous_peb_count);
871 ubi_msg(ubi, "scrubbed PEB %d (LEB %d:%d), data moved to PEB %d",
873 ubi_free_vid_hdr(ubi, vid_hdr);
875 spin_lock(&ubi->wl_lock);
876 if (!ubi->move_to_put) {
877 wl_tree_add(e2, &ubi->used);
880 ubi->move_from = ubi->move_to = NULL;
881 ubi->move_to_put = ubi->wl_scheduled = 0;
882 spin_unlock(&ubi->wl_lock);
884 err = do_sync_erase(ubi, e1, vol_id, lnum, 0);
887 wl_entry_destroy(ubi, e2);
898 err = do_sync_erase(ubi, e2, vol_id, lnum, 0);
904 mutex_unlock(&ubi->move_mutex);
919 spin_lock(&ubi->wl_lock);
921 prot_queue_add(ubi, e1);
923 wl_tree_add(e1, &ubi->erroneous);
924 ubi->erroneous_peb_count += 1;
926 wl_tree_add(e1, &ubi->scrub);
928 wl_tree_add(e1, &ubi->used);
929 ubi_assert(!ubi->move_to_put);
930 ubi->move_from = ubi->move_to = NULL;
931 ubi->wl_scheduled = 0;
932 spin_unlock(&ubi->wl_lock);
934 ubi_free_vid_hdr(ubi, vid_hdr);
935 err = do_sync_erase(ubi, e2, vol_id, lnum, torture);
939 mutex_unlock(&ubi->move_mutex);
944 ubi_err(ubi, "error %d while moving PEB %d to PEB %d",
947 ubi_err(ubi, "error %d while moving PEB %d (LEB %d:%d) to PEB %d",
949 spin_lock(&ubi->wl_lock);
950 ubi->move_from = ubi->move_to = NULL;
951 ubi->move_to_put = ubi->wl_scheduled = 0;
952 spin_unlock(&ubi->wl_lock);
954 ubi_free_vid_hdr(ubi, vid_hdr);
955 wl_entry_destroy(ubi, e1);
956 wl_entry_destroy(ubi, e2);
959 ubi_ro_mode(ubi);
960 mutex_unlock(&ubi->move_mutex);
965 ubi->wl_scheduled = 0;
966 spin_unlock(&ubi->wl_lock);
967 mutex_unlock(&ubi->move_mutex);
968 ubi_free_vid_hdr(ubi, vid_hdr);
981 static int ensure_wear_leveling(struct ubi_device *ubi, int nested) argument
988 spin_lock(&ubi->wl_lock);
989 if (ubi->wl_scheduled)
997 if (!ubi->scrub.rb_node) {
998 if (!ubi->used.rb_node || !ubi->free.rb_node)
1008 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
1009 e2 = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
1017 ubi->wl_scheduled = 1;
1018 spin_unlock(&ubi->wl_lock);
1029 __schedule_ubi_work(ubi, wrk);
1032 schedule_ubi_work(ubi, wrk);
1035 schedule_ubi_work(ubi, wrk);
1036 ubi_do_worker(ubi);
1042 spin_lock(&ubi->wl_lock);
1043 ubi->wl_scheduled = 0;
1045 spin_unlock(&ubi->wl_lock);
1061 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, argument
1073 wl_entry_destroy(ubi, e);
1080 err = sync_erase(ubi, e, wl_wrk->torture);
1085 spin_lock(&ubi->wl_lock);
1086 wl_tree_add(e, &ubi->free);
1087 ubi->free_count++;
1088 spin_unlock(&ubi->wl_lock);
1094 serve_prot_queue(ubi);
1097 err = ensure_wear_leveling(ubi, 1);
1101 ubi_err(ubi, "failed to erase PEB %d, error %d", pnum, err);
1109 err1 = schedule_erase(ubi, e, vol_id, lnum, 0);
1117 wl_entry_destroy(ubi, e);
1128 if (!ubi->bad_allowed) {
1129 ubi_err(ubi, "bad physical eraseblock %d detected", pnum);
1133 spin_lock(&ubi->volumes_lock);
1134 if (ubi->beb_rsvd_pebs == 0) {
1135 if (ubi->avail_pebs == 0) {
1136 spin_unlock(&ubi->volumes_lock);
1137 ubi_err(ubi, "no reserved/available physical eraseblocks");
1140 ubi->avail_pebs -= 1;
1143 spin_unlock(&ubi->volumes_lock);
1145 ubi_msg(ubi, "mark PEB %d as bad", pnum);
1146 err = ubi_io_mark_bad(ubi, pnum);
1150 spin_lock(&ubi->volumes_lock);
1151 if (ubi->beb_rsvd_pebs > 0) {
1157 ubi->avail_pebs += 1;
1160 ubi->beb_rsvd_pebs -= 1;
1162 ubi->bad_peb_count += 1;
1163 ubi->good_peb_count -= 1;
1164 ubi_calculate_reserved(ubi);
1166 ubi_warn(ubi, "no PEBs in the reserved pool, used an available PEB");
1167 else if (ubi->beb_rsvd_pebs)
1168 ubi_msg(ubi, "%d PEBs left in the reserve",
1169 ubi->beb_rsvd_pebs);
1171 ubi_warn(ubi, "last PEB from the reserve was used");
1172 spin_unlock(&ubi->volumes_lock);
1178 spin_lock(&ubi->volumes_lock);
1179 ubi->avail_pebs += 1;
1180 spin_unlock(&ubi->volumes_lock);
1182 ubi_ro_mode(ubi);
1199 int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum, argument
1207 ubi_assert(pnum < ubi->peb_count);
1209 down_read(&ubi->fm_protect);
1212 spin_lock(&ubi->wl_lock);
1213 e = ubi->lookuptbl[pnum];
1214 if (e == ubi->move_from) {
1221 spin_unlock(&ubi->wl_lock);
1224 mutex_lock(&ubi->move_mutex);
1225 mutex_unlock(&ubi->move_mutex);
1227 } else if (e == ubi->move_to) {
1238 ubi_assert(!ubi->move_to_put);
1239 ubi->move_to_put = 1;
1240 spin_unlock(&ubi->wl_lock);
1241 up_read(&ubi->fm_protect);
1244 if (in_wl_tree(e, &ubi->used)) {
1245 self_check_in_wl_tree(ubi, e, &ubi->used);
1246 rb_erase(&e->u.rb, &ubi->used);
1247 } else if (in_wl_tree(e, &ubi->scrub)) {
1248 self_check_in_wl_tree(ubi, e, &ubi->scrub);
1249 rb_erase(&e->u.rb, &ubi->scrub);
1250 } else if (in_wl_tree(e, &ubi->erroneous)) {
1251 self_check_in_wl_tree(ubi, e, &ubi->erroneous);
1252 rb_erase(&e->u.rb, &ubi->erroneous);
1253 ubi->erroneous_peb_count -= 1;
1254 ubi_assert(ubi->erroneous_peb_count >= 0);
1258 err = prot_queue_del(ubi, e->pnum);
1260 ubi_err(ubi, "PEB %d not found", pnum);
1261 ubi_ro_mode(ubi);
1262 spin_unlock(&ubi->wl_lock);
1263 up_read(&ubi->fm_protect);
1268 spin_unlock(&ubi->wl_lock);
1270 err = schedule_erase(ubi, e, vol_id, lnum, torture);
1272 spin_lock(&ubi->wl_lock);
1273 wl_tree_add(e, &ubi->used);
1274 spin_unlock(&ubi->wl_lock);
1277 up_read(&ubi->fm_protect);
1291 int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum) argument
1295 ubi_msg(ubi, "schedule PEB %d for scrubbing", pnum);
1298 spin_lock(&ubi->wl_lock);
1299 e = ubi->lookuptbl[pnum];
1300 if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub) ||
1301 in_wl_tree(e, &ubi->erroneous)) {
1302 spin_unlock(&ubi->wl_lock);
1306 if (e == ubi->move_to) {
1313 spin_unlock(&ubi->wl_lock);
1319 if (in_wl_tree(e, &ubi->used)) {
1320 self_check_in_wl_tree(ubi, e, &ubi->used);
1321 rb_erase(&e->u.rb, &ubi->used);
1325 err = prot_queue_del(ubi, e->pnum);
1327 ubi_err(ubi, "PEB %d not found", pnum);
1328 ubi_ro_mode(ubi);
1329 spin_unlock(&ubi->wl_lock);
1334 wl_tree_add(e, &ubi->scrub);
1335 spin_unlock(&ubi->wl_lock);
1341 return ensure_wear_leveling(ubi, 0);
1356 int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum) argument
1366 vol_id, lnum, ubi->works_count);
1372 down_read(&ubi->work_sem);
1373 spin_lock(&ubi->wl_lock);
1374 list_for_each_entry_safe(wrk, tmp, &ubi->works, list) {
1378 ubi->works_count -= 1;
1379 ubi_assert(ubi->works_count >= 0);
1380 spin_unlock(&ubi->wl_lock);
1382 err = wrk->func(ubi, wrk, 0);
1384 up_read(&ubi->work_sem);
1388 spin_lock(&ubi->wl_lock);
1393 spin_unlock(&ubi->wl_lock);
1394 up_read(&ubi->work_sem);
1401 down_write(&ubi->work_sem);
1402 up_write(&ubi->work_sem);
1412 static void tree_destroy(struct ubi_device *ubi, struct rb_root *root) argument
1434 wl_entry_destroy(ubi, e);
1446 struct ubi_device *ubi = u; local
1448 ubi_msg(ubi, "background thread \"%s\" started, PID %d",
1449 ubi->bgt_name, task_pid_nr(current));
1461 spin_lock(&ubi->wl_lock);
1462 if (list_empty(&ubi->works) || ubi->ro_mode ||
1463 !ubi->thread_enabled || ubi_dbg_is_bgt_disabled(ubi)) {
1465 spin_unlock(&ubi->wl_lock);
1469 spin_unlock(&ubi->wl_lock);
1471 err = do_work(ubi);
1473 ubi_err(ubi, "%s: work failed with error code %d",
1474 ubi->bgt_name, err);
1480 ubi_msg(ubi, "%s: %d consecutive failures",
1481 ubi->bgt_name, WL_MAX_FAILURES);
1482 ubi_ro_mode(ubi);
1483 ubi->thread_enabled = 0;
1492 dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
1500 static void shutdown_work(struct ubi_device *ubi) argument
1504 flush_work(&ubi->fm_work);
1509 while (!list_empty(&ubi->works)) {
1512 wrk = list_entry(ubi->works.next, struct ubi_work, list);
1514 wrk->func(ubi, wrk, 1);
1515 ubi->works_count -= 1;
1516 ubi_assert(ubi->works_count >= 0);
1528 int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai) argument
1536 ubi->used = ubi->erroneous = ubi->free = ubi->scrub = RB_ROOT;
1537 spin_lock_init(&ubi->wl_lock);
1538 mutex_init(&ubi->move_mutex);
1539 init_rwsem(&ubi->work_sem);
1540 ubi->max_ec = ai->max_ec;
1541 INIT_LIST_HEAD(&ubi->works);
1543 sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
1546 ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL);
1547 if (!ubi->lookuptbl)
1551 INIT_LIST_HEAD(&ubi->pq[i]);
1552 ubi->pq_head = 0;
1554 ubi->free_count = 0;
1564 ubi->lookuptbl[e->pnum] = e;
1565 if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0)) {
1566 wl_entry_destroy(ubi, e);
1584 wl_tree_add(e, &ubi->free);
1585 ubi->free_count++;
1587 ubi->lookuptbl[e->pnum] = e;
1602 ubi->lookuptbl[e->pnum] = e;
1607 wl_tree_add(e, &ubi->used);
1611 wl_tree_add(e, &ubi->scrub);
1620 if (ubi->fm) {
1621 ubi_assert(ubi->good_peb_count ==
1622 found_pebs + ubi->fm->used_blocks);
1624 for (i = 0; i < ubi->fm->used_blocks; i++) {
1625 e = ubi->fm->e[i];
1626 ubi->lookuptbl[e->pnum] = e;
1630 ubi_assert(ubi->good_peb_count == found_pebs);
1633 ubi_fastmap_init(ubi, &reserved_pebs);
1635 if (ubi->avail_pebs < reserved_pebs) {
1636 ubi_err(ubi, "no enough physical eraseblocks (%d, need %d)",
1637 ubi->avail_pebs, reserved_pebs);
1638 if (ubi->corr_peb_count)
1639 ubi_err(ubi, "%d PEBs are corrupted and not used",
1640 ubi->corr_peb_count);
1643 ubi->avail_pebs -= reserved_pebs;
1644 ubi->rsvd_pebs += reserved_pebs;
1647 err = ensure_wear_leveling(ubi, 0);
1654 shutdown_work(ubi);
1655 tree_destroy(ubi, &ubi->used);
1656 tree_destroy(ubi, &ubi->free);
1657 tree_destroy(ubi, &ubi->scrub);
1658 kfree(ubi->lookuptbl);
1666 static void protection_queue_destroy(struct ubi_device *ubi) argument
1672 list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) {
1674 wl_entry_destroy(ubi, e);
1683 void ubi_wl_close(struct ubi_device *ubi) argument
1686 ubi_fastmap_close(ubi);
1687 shutdown_work(ubi);
1688 protection_queue_destroy(ubi);
1689 tree_destroy(ubi, &ubi->used);
1690 tree_destroy(ubi, &ubi->erroneous);
1691 tree_destroy(ubi, &ubi->free);
1692 tree_destroy(ubi, &ubi->scrub);
1693 kfree(ubi->lookuptbl);
1706 static int self_check_ec(struct ubi_device *ubi, int pnum, int ec) argument
1712 if (!ubi_dbg_chk_gen(ubi))
1715 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
1719 err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
1728 ubi_err(ubi, "self-check failed for PEB %d", pnum);
1729 ubi_err(ubi, "read EC is %lld, should be %d", read_ec, ec);
1749 static int self_check_in_wl_tree(const struct ubi_device *ubi, argument
1752 if (!ubi_dbg_chk_gen(ubi))
1758 ubi_err(ubi, "self-check failed for PEB %d, EC %d, RB-tree %p ",
1772 static int self_check_in_pq(const struct ubi_device *ubi, argument
1778 if (!ubi_dbg_chk_gen(ubi))
1782 list_for_each_entry(p, &ubi->pq[i], u.list)
1786 ubi_err(ubi, "self-check failed for PEB %d, EC %d, Protect queue",
1792 static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi) argument
1796 e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
1797 self_check_in_wl_tree(ubi, e, &ubi->free);
1798 ubi->free_count--;
1799 ubi_assert(ubi->free_count >= 0);
1800 rb_erase(&e->u.rb, &ubi->free);
1814 static int produce_free_peb(struct ubi_device *ubi) argument
1818 while (!ubi->free.rb_node && ubi->works_count) {
1819 spin_unlock(&ubi->wl_lock);
1822 err = do_work(ubi);
1824 spin_lock(&ubi->wl_lock);
1840 int ubi_wl_get_peb(struct ubi_device *ubi) argument
1846 down_read(&ubi->fm_eba_sem);
1847 spin_lock(&ubi->wl_lock);
1848 if (!ubi->free.rb_node) {
1849 if (ubi->works_count == 0) {
1850 ubi_err(ubi, "no free eraseblocks");
1851 ubi_assert(list_empty(&ubi->works));
1852 spin_unlock(&ubi->wl_lock);
1856 err = produce_free_peb(ubi);
1858 spin_unlock(&ubi->wl_lock);
1861 spin_unlock(&ubi->wl_lock);
1862 up_read(&ubi->fm_eba_sem);
1866 e = wl_get_wle(ubi);
1867 prot_queue_add(ubi, e);
1868 spin_unlock(&ubi->wl_lock);
1870 err = ubi_self_check_all_ff(ubi, e->pnum, ubi->vid_hdr_aloffset,
1871 ubi->peb_size - ubi->vid_hdr_aloffset);
1873 ubi_err(ubi, "new PEB %d does not contain all 0xFF bytes", e->pnum);