1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright (c) Thomas Gleixner <tglx@linutronix.de>
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * The parts taken from the kernel implementation are:
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Copyright (c) International Business Machines Corp., 2006
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * SPDX-License-Identifier: GPL 2.0+ BSD-3-Clause
9*4882a593Smuzhiyun */
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include <common.h>
12*4882a593Smuzhiyun #include <errno.h>
13*4882a593Smuzhiyun #include <ubispl.h>
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun #include <linux/crc32.h>
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun #include "ubispl.h"
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun /**
20*4882a593Smuzhiyun * ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device.
21*4882a593Smuzhiyun * @ubi: UBI device description object
22*4882a593Smuzhiyun */
ubi_calc_fm_size(struct ubi_scan_info * ubi)23*4882a593Smuzhiyun static size_t ubi_calc_fm_size(struct ubi_scan_info *ubi)
24*4882a593Smuzhiyun {
25*4882a593Smuzhiyun size_t size;
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun size = sizeof(struct ubi_fm_sb) +
28*4882a593Smuzhiyun sizeof(struct ubi_fm_hdr) +
29*4882a593Smuzhiyun sizeof(struct ubi_fm_scan_pool) +
30*4882a593Smuzhiyun sizeof(struct ubi_fm_scan_pool) +
31*4882a593Smuzhiyun (ubi->peb_count * sizeof(struct ubi_fm_ec)) +
32*4882a593Smuzhiyun (sizeof(struct ubi_fm_eba) +
33*4882a593Smuzhiyun (ubi->peb_count * sizeof(__be32))) +
34*4882a593Smuzhiyun sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES;
35*4882a593Smuzhiyun return roundup(size, ubi->leb_size);
36*4882a593Smuzhiyun }
37*4882a593Smuzhiyun
ubi_io_read(struct ubi_scan_info * ubi,void * buf,int pnum,unsigned long from,unsigned long len)38*4882a593Smuzhiyun static int ubi_io_read(struct ubi_scan_info *ubi, void *buf, int pnum,
39*4882a593Smuzhiyun unsigned long from, unsigned long len)
40*4882a593Smuzhiyun {
41*4882a593Smuzhiyun return ubi->read(pnum + ubi->peb_offset, from, len, buf);
42*4882a593Smuzhiyun }
43*4882a593Smuzhiyun
ubi_io_is_bad(struct ubi_scan_info * ubi,int peb)44*4882a593Smuzhiyun static int ubi_io_is_bad(struct ubi_scan_info *ubi, int peb)
45*4882a593Smuzhiyun {
46*4882a593Smuzhiyun return peb >= ubi->peb_count || peb < 0;
47*4882a593Smuzhiyun }
48*4882a593Smuzhiyun
ubi_io_read_vid_hdr(struct ubi_scan_info * ubi,int pnum,struct ubi_vid_hdr * vh,int unused)49*4882a593Smuzhiyun static int ubi_io_read_vid_hdr(struct ubi_scan_info *ubi, int pnum,
50*4882a593Smuzhiyun struct ubi_vid_hdr *vh, int unused)
51*4882a593Smuzhiyun {
52*4882a593Smuzhiyun u32 magic;
53*4882a593Smuzhiyun int res;
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun /* No point in rescanning a corrupt block */
56*4882a593Smuzhiyun if (test_bit(pnum, ubi->corrupt))
57*4882a593Smuzhiyun return UBI_IO_BAD_HDR;
58*4882a593Smuzhiyun /*
59*4882a593Smuzhiyun * If the block has been scanned already, no need to rescan
60*4882a593Smuzhiyun */
61*4882a593Smuzhiyun if (test_and_set_bit(pnum, ubi->scanned))
62*4882a593Smuzhiyun return 0;
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun res = ubi_io_read(ubi, vh, pnum, ubi->vid_offset, sizeof(*vh));
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun /*
67*4882a593Smuzhiyun * Bad block, unrecoverable ECC error, skip the block
68*4882a593Smuzhiyun */
69*4882a593Smuzhiyun if (res) {
70*4882a593Smuzhiyun ubi_dbg("Skipping bad or unreadable block %d", pnum);
71*4882a593Smuzhiyun vh->magic = 0;
72*4882a593Smuzhiyun generic_set_bit(pnum, ubi->corrupt);
73*4882a593Smuzhiyun return res;
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun /* Magic number available ? */
77*4882a593Smuzhiyun magic = be32_to_cpu(vh->magic);
78*4882a593Smuzhiyun if (magic != UBI_VID_HDR_MAGIC) {
79*4882a593Smuzhiyun generic_set_bit(pnum, ubi->corrupt);
80*4882a593Smuzhiyun if (magic == 0xffffffff)
81*4882a593Smuzhiyun return UBI_IO_FF;
82*4882a593Smuzhiyun ubi_msg("Bad magic in block 0%d %08x", pnum, magic);
83*4882a593Smuzhiyun return UBI_IO_BAD_HDR;
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun /* Header CRC correct ? */
87*4882a593Smuzhiyun if (crc32(UBI_CRC32_INIT, vh, UBI_VID_HDR_SIZE_CRC) !=
88*4882a593Smuzhiyun be32_to_cpu(vh->hdr_crc)) {
89*4882a593Smuzhiyun ubi_msg("Bad CRC in block 0%d", pnum);
90*4882a593Smuzhiyun generic_set_bit(pnum, ubi->corrupt);
91*4882a593Smuzhiyun return UBI_IO_BAD_HDR;
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun ubi_dbg("RV: pnum: %i sqnum %llu", pnum, be64_to_cpu(vh->sqnum));
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun return 0;
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun
ubi_rescan_fm_vid_hdr(struct ubi_scan_info * ubi,struct ubi_vid_hdr * vh,u32 fm_pnum,u32 fm_vol_id,u32 fm_lnum)99*4882a593Smuzhiyun static int ubi_rescan_fm_vid_hdr(struct ubi_scan_info *ubi,
100*4882a593Smuzhiyun struct ubi_vid_hdr *vh,
101*4882a593Smuzhiyun u32 fm_pnum, u32 fm_vol_id, u32 fm_lnum)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun int res;
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun if (ubi_io_is_bad(ubi, fm_pnum))
106*4882a593Smuzhiyun return -EINVAL;
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun res = ubi_io_read_vid_hdr(ubi, fm_pnum, vh, 0);
109*4882a593Smuzhiyun if (!res) {
110*4882a593Smuzhiyun /* Check volume id, volume type and lnum */
111*4882a593Smuzhiyun if (be32_to_cpu(vh->vol_id) == fm_vol_id &&
112*4882a593Smuzhiyun vh->vol_type == UBI_VID_STATIC &&
113*4882a593Smuzhiyun be32_to_cpu(vh->lnum) == fm_lnum)
114*4882a593Smuzhiyun return 0;
115*4882a593Smuzhiyun ubi_dbg("RS: PEB %u vol: %u : %u typ %u lnum %u %u",
116*4882a593Smuzhiyun fm_pnum, fm_vol_id, vh->vol_type,
117*4882a593Smuzhiyun be32_to_cpu(vh->vol_id),
118*4882a593Smuzhiyun fm_lnum, be32_to_cpu(vh->lnum));
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun return res;
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun /* Insert the logic block into the volume info */
ubi_add_peb_to_vol(struct ubi_scan_info * ubi,struct ubi_vid_hdr * vh,u32 vol_id,u32 pnum,u32 lnum)124*4882a593Smuzhiyun static int ubi_add_peb_to_vol(struct ubi_scan_info *ubi,
125*4882a593Smuzhiyun struct ubi_vid_hdr *vh, u32 vol_id,
126*4882a593Smuzhiyun u32 pnum, u32 lnum)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun struct ubi_vol_info *vi = ubi->volinfo + vol_id;
129*4882a593Smuzhiyun u32 *ltp;
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun /*
132*4882a593Smuzhiyun * If the volume is larger than expected, yell and give up :(
133*4882a593Smuzhiyun */
134*4882a593Smuzhiyun if (lnum >= UBI_MAX_VOL_LEBS) {
135*4882a593Smuzhiyun ubi_warn("Vol: %u LEB %d > %d", vol_id, lnum, UBI_MAX_VOL_LEBS);
136*4882a593Smuzhiyun return -EINVAL;
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun ubi_dbg("SC: Add PEB %u to Vol %u as LEB %u fnd %d sc %d",
140*4882a593Smuzhiyun pnum, vol_id, lnum, !!test_bit(lnum, vi->found),
141*4882a593Smuzhiyun !!test_bit(pnum, ubi->scanned));
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun /* Points to the translation entry */
144*4882a593Smuzhiyun ltp = vi->lebs_to_pebs + lnum;
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun /* If the block is already assigned, check sqnum */
147*4882a593Smuzhiyun if (__test_and_set_bit(lnum, vi->found)) {
148*4882a593Smuzhiyun u32 cur_pnum = *ltp;
149*4882a593Smuzhiyun struct ubi_vid_hdr *cur = ubi->blockinfo + cur_pnum;
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun /*
152*4882a593Smuzhiyun * If the current block hase not yet been scanned, we
153*4882a593Smuzhiyun * need to do that. The other block might be stale or
154*4882a593Smuzhiyun * the current block corrupted and the FM not yet
155*4882a593Smuzhiyun * updated.
156*4882a593Smuzhiyun */
157*4882a593Smuzhiyun if (!test_bit(cur_pnum, ubi->scanned)) {
158*4882a593Smuzhiyun /*
159*4882a593Smuzhiyun * If the scan fails, we use the valid block
160*4882a593Smuzhiyun */
161*4882a593Smuzhiyun if (ubi_rescan_fm_vid_hdr(ubi, cur, cur_pnum, vol_id,
162*4882a593Smuzhiyun lnum)) {
163*4882a593Smuzhiyun *ltp = pnum;
164*4882a593Smuzhiyun return 0;
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun /*
169*4882a593Smuzhiyun * Should not happen ....
170*4882a593Smuzhiyun */
171*4882a593Smuzhiyun if (test_bit(cur_pnum, ubi->corrupt)) {
172*4882a593Smuzhiyun *ltp = pnum;
173*4882a593Smuzhiyun return 0;
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun ubi_dbg("Vol %u LEB %u PEB %u->sqnum %llu NPEB %u->sqnum %llu",
177*4882a593Smuzhiyun vol_id, lnum, cur_pnum, be64_to_cpu(cur->sqnum), pnum,
178*4882a593Smuzhiyun be64_to_cpu(vh->sqnum));
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun /*
181*4882a593Smuzhiyun * Compare sqnum and take the newer one
182*4882a593Smuzhiyun */
183*4882a593Smuzhiyun if (be64_to_cpu(cur->sqnum) < be64_to_cpu(vh->sqnum))
184*4882a593Smuzhiyun *ltp = pnum;
185*4882a593Smuzhiyun } else {
186*4882a593Smuzhiyun *ltp = pnum;
187*4882a593Smuzhiyun if (lnum > vi->last_block)
188*4882a593Smuzhiyun vi->last_block = lnum;
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun return 0;
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun
ubi_scan_vid_hdr(struct ubi_scan_info * ubi,struct ubi_vid_hdr * vh,u32 pnum)194*4882a593Smuzhiyun static int ubi_scan_vid_hdr(struct ubi_scan_info *ubi, struct ubi_vid_hdr *vh,
195*4882a593Smuzhiyun u32 pnum)
196*4882a593Smuzhiyun {
197*4882a593Smuzhiyun u32 vol_id, lnum;
198*4882a593Smuzhiyun int res;
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun if (ubi_io_is_bad(ubi, pnum))
201*4882a593Smuzhiyun return -EINVAL;
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun res = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
204*4882a593Smuzhiyun if (res)
205*4882a593Smuzhiyun return res;
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun /* Get volume id */
208*4882a593Smuzhiyun vol_id = be32_to_cpu(vh->vol_id);
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun /* If this is the fastmap anchor, return right away */
211*4882a593Smuzhiyun if (vol_id == UBI_FM_SB_VOLUME_ID)
212*4882a593Smuzhiyun return ubi->fm_enabled ? UBI_FASTMAP_ANCHOR : 0;
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun /* We only care about static volumes with an id < UBI_SPL_VOL_IDS */
215*4882a593Smuzhiyun if (vol_id >= UBI_SPL_VOL_IDS || vh->vol_type != UBI_VID_STATIC)
216*4882a593Smuzhiyun return 0;
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun /* We are only interested in the volumes to load */
219*4882a593Smuzhiyun if (!test_bit(vol_id, ubi->toload))
220*4882a593Smuzhiyun return 0;
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun lnum = be32_to_cpu(vh->lnum);
223*4882a593Smuzhiyun return ubi_add_peb_to_vol(ubi, vh, vol_id, pnum, lnum);
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun
assign_aeb_to_av(struct ubi_scan_info * ubi,u32 pnum,u32 lnum,u32 vol_id,u32 vol_type,u32 used)226*4882a593Smuzhiyun static int assign_aeb_to_av(struct ubi_scan_info *ubi, u32 pnum, u32 lnum,
227*4882a593Smuzhiyun u32 vol_id, u32 vol_type, u32 used)
228*4882a593Smuzhiyun {
229*4882a593Smuzhiyun struct ubi_vid_hdr *vh;
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun if (ubi_io_is_bad(ubi, pnum))
232*4882a593Smuzhiyun return -EINVAL;
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun ubi->fastmap_pebs++;
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun if (vol_id >= UBI_SPL_VOL_IDS || vol_type != UBI_STATIC_VOLUME)
237*4882a593Smuzhiyun return 0;
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun /* We are only interested in the volumes to load */
240*4882a593Smuzhiyun if (!test_bit(vol_id, ubi->toload))
241*4882a593Smuzhiyun return 0;
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun vh = ubi->blockinfo + pnum;
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun return ubi_scan_vid_hdr(ubi, vh, pnum);
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun
scan_pool(struct ubi_scan_info * ubi,__be32 * pebs,int pool_size)248*4882a593Smuzhiyun static int scan_pool(struct ubi_scan_info *ubi, __be32 *pebs, int pool_size)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun struct ubi_vid_hdr *vh;
251*4882a593Smuzhiyun u32 pnum;
252*4882a593Smuzhiyun int i;
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun ubi_dbg("Scanning pool size: %d", pool_size);
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun for (i = 0; i < pool_size; i++) {
257*4882a593Smuzhiyun pnum = be32_to_cpu(pebs[i]);
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun if (ubi_io_is_bad(ubi, pnum)) {
260*4882a593Smuzhiyun ubi_err("FM: Bad PEB in fastmap pool! %u", pnum);
261*4882a593Smuzhiyun return UBI_BAD_FASTMAP;
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun vh = ubi->blockinfo + pnum;
265*4882a593Smuzhiyun /*
266*4882a593Smuzhiyun * We allow the scan to fail here. The loader will notice
267*4882a593Smuzhiyun * and look for a replacement.
268*4882a593Smuzhiyun */
269*4882a593Smuzhiyun ubi_scan_vid_hdr(ubi, vh, pnum);
270*4882a593Smuzhiyun }
271*4882a593Smuzhiyun return 0;
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun /*
275*4882a593Smuzhiyun * Fastmap code is stolen from Linux kernel and this stub structure is used
276*4882a593Smuzhiyun * to make it happy.
277*4882a593Smuzhiyun */
278*4882a593Smuzhiyun struct ubi_attach_info {
279*4882a593Smuzhiyun int i;
280*4882a593Smuzhiyun };
281*4882a593Smuzhiyun
ubi_attach_fastmap(struct ubi_scan_info * ubi,struct ubi_attach_info * ai,struct ubi_fastmap_layout * fm)282*4882a593Smuzhiyun static int ubi_attach_fastmap(struct ubi_scan_info *ubi,
283*4882a593Smuzhiyun struct ubi_attach_info *ai,
284*4882a593Smuzhiyun struct ubi_fastmap_layout *fm)
285*4882a593Smuzhiyun {
286*4882a593Smuzhiyun struct ubi_fm_hdr *fmhdr;
287*4882a593Smuzhiyun struct ubi_fm_scan_pool *fmpl1, *fmpl2;
288*4882a593Smuzhiyun struct ubi_fm_ec *fmec;
289*4882a593Smuzhiyun struct ubi_fm_volhdr *fmvhdr;
290*4882a593Smuzhiyun struct ubi_fm_eba *fm_eba;
291*4882a593Smuzhiyun int ret, i, j, pool_size, wl_pool_size;
292*4882a593Smuzhiyun size_t fm_pos = 0, fm_size = ubi->fm_size;
293*4882a593Smuzhiyun void *fm_raw = ubi->fm_buf;
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun memset(ubi->fm_used, 0, sizeof(ubi->fm_used));
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun fm_pos += sizeof(struct ubi_fm_sb);
298*4882a593Smuzhiyun if (fm_pos >= fm_size)
299*4882a593Smuzhiyun goto fail_bad;
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun fmhdr = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
302*4882a593Smuzhiyun fm_pos += sizeof(*fmhdr);
303*4882a593Smuzhiyun if (fm_pos >= fm_size)
304*4882a593Smuzhiyun goto fail_bad;
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun if (be32_to_cpu(fmhdr->magic) != UBI_FM_HDR_MAGIC) {
307*4882a593Smuzhiyun ubi_err("bad fastmap header magic: 0x%x, expected: 0x%x",
308*4882a593Smuzhiyun be32_to_cpu(fmhdr->magic), UBI_FM_HDR_MAGIC);
309*4882a593Smuzhiyun goto fail_bad;
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun fmpl1 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
313*4882a593Smuzhiyun fm_pos += sizeof(*fmpl1);
314*4882a593Smuzhiyun if (fm_pos >= fm_size)
315*4882a593Smuzhiyun goto fail_bad;
316*4882a593Smuzhiyun if (be32_to_cpu(fmpl1->magic) != UBI_FM_POOL_MAGIC) {
317*4882a593Smuzhiyun ubi_err("bad fastmap pool magic: 0x%x, expected: 0x%x",
318*4882a593Smuzhiyun be32_to_cpu(fmpl1->magic), UBI_FM_POOL_MAGIC);
319*4882a593Smuzhiyun goto fail_bad;
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
323*4882a593Smuzhiyun fm_pos += sizeof(*fmpl2);
324*4882a593Smuzhiyun if (fm_pos >= fm_size)
325*4882a593Smuzhiyun goto fail_bad;
326*4882a593Smuzhiyun if (be32_to_cpu(fmpl2->magic) != UBI_FM_POOL_MAGIC) {
327*4882a593Smuzhiyun ubi_err("bad fastmap pool magic: 0x%x, expected: 0x%x",
328*4882a593Smuzhiyun be32_to_cpu(fmpl2->magic), UBI_FM_POOL_MAGIC);
329*4882a593Smuzhiyun goto fail_bad;
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun pool_size = be16_to_cpu(fmpl1->size);
333*4882a593Smuzhiyun wl_pool_size = be16_to_cpu(fmpl2->size);
334*4882a593Smuzhiyun fm->max_pool_size = be16_to_cpu(fmpl1->max_size);
335*4882a593Smuzhiyun fm->max_wl_pool_size = be16_to_cpu(fmpl2->max_size);
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) {
338*4882a593Smuzhiyun ubi_err("bad pool size: %i", pool_size);
339*4882a593Smuzhiyun goto fail_bad;
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) {
343*4882a593Smuzhiyun ubi_err("bad WL pool size: %i", wl_pool_size);
344*4882a593Smuzhiyun goto fail_bad;
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE ||
348*4882a593Smuzhiyun fm->max_pool_size < 0) {
349*4882a593Smuzhiyun ubi_err("bad maximal pool size: %i", fm->max_pool_size);
350*4882a593Smuzhiyun goto fail_bad;
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE ||
354*4882a593Smuzhiyun fm->max_wl_pool_size < 0) {
355*4882a593Smuzhiyun ubi_err("bad maximal WL pool size: %i", fm->max_wl_pool_size);
356*4882a593Smuzhiyun goto fail_bad;
357*4882a593Smuzhiyun }
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun /* read EC values from free list */
360*4882a593Smuzhiyun for (i = 0; i < be32_to_cpu(fmhdr->free_peb_count); i++) {
361*4882a593Smuzhiyun fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
362*4882a593Smuzhiyun fm_pos += sizeof(*fmec);
363*4882a593Smuzhiyun if (fm_pos >= fm_size)
364*4882a593Smuzhiyun goto fail_bad;
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun /* read EC values from used list */
368*4882a593Smuzhiyun for (i = 0; i < be32_to_cpu(fmhdr->used_peb_count); i++) {
369*4882a593Smuzhiyun fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
370*4882a593Smuzhiyun fm_pos += sizeof(*fmec);
371*4882a593Smuzhiyun if (fm_pos >= fm_size)
372*4882a593Smuzhiyun goto fail_bad;
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun generic_set_bit(be32_to_cpu(fmec->pnum), ubi->fm_used);
375*4882a593Smuzhiyun }
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun /* read EC values from scrub list */
378*4882a593Smuzhiyun for (i = 0; i < be32_to_cpu(fmhdr->scrub_peb_count); i++) {
379*4882a593Smuzhiyun fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
380*4882a593Smuzhiyun fm_pos += sizeof(*fmec);
381*4882a593Smuzhiyun if (fm_pos >= fm_size)
382*4882a593Smuzhiyun goto fail_bad;
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun /* read EC values from erase list */
386*4882a593Smuzhiyun for (i = 0; i < be32_to_cpu(fmhdr->erase_peb_count); i++) {
387*4882a593Smuzhiyun fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
388*4882a593Smuzhiyun fm_pos += sizeof(*fmec);
389*4882a593Smuzhiyun if (fm_pos >= fm_size)
390*4882a593Smuzhiyun goto fail_bad;
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun /* Iterate over all volumes and read their EBA table */
394*4882a593Smuzhiyun for (i = 0; i < be32_to_cpu(fmhdr->vol_count); i++) {
395*4882a593Smuzhiyun u32 vol_id, vol_type, used, reserved;
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun fmvhdr = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
398*4882a593Smuzhiyun fm_pos += sizeof(*fmvhdr);
399*4882a593Smuzhiyun if (fm_pos >= fm_size)
400*4882a593Smuzhiyun goto fail_bad;
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun if (be32_to_cpu(fmvhdr->magic) != UBI_FM_VHDR_MAGIC) {
403*4882a593Smuzhiyun ubi_err("bad fastmap vol header magic: 0x%x, " \
404*4882a593Smuzhiyun "expected: 0x%x",
405*4882a593Smuzhiyun be32_to_cpu(fmvhdr->magic), UBI_FM_VHDR_MAGIC);
406*4882a593Smuzhiyun goto fail_bad;
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun vol_id = be32_to_cpu(fmvhdr->vol_id);
410*4882a593Smuzhiyun vol_type = fmvhdr->vol_type;
411*4882a593Smuzhiyun used = be32_to_cpu(fmvhdr->used_ebs);
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun fm_eba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
414*4882a593Smuzhiyun fm_pos += sizeof(*fm_eba);
415*4882a593Smuzhiyun fm_pos += (sizeof(__be32) * be32_to_cpu(fm_eba->reserved_pebs));
416*4882a593Smuzhiyun if (fm_pos >= fm_size)
417*4882a593Smuzhiyun goto fail_bad;
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun if (be32_to_cpu(fm_eba->magic) != UBI_FM_EBA_MAGIC) {
420*4882a593Smuzhiyun ubi_err("bad fastmap EBA header magic: 0x%x, " \
421*4882a593Smuzhiyun "expected: 0x%x",
422*4882a593Smuzhiyun be32_to_cpu(fm_eba->magic), UBI_FM_EBA_MAGIC);
423*4882a593Smuzhiyun goto fail_bad;
424*4882a593Smuzhiyun }
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun reserved = be32_to_cpu(fm_eba->reserved_pebs);
427*4882a593Smuzhiyun ubi_dbg("FA: vol %u used %u res: %u", vol_id, used, reserved);
428*4882a593Smuzhiyun for (j = 0; j < reserved; j++) {
429*4882a593Smuzhiyun int pnum = be32_to_cpu(fm_eba->pnum[j]);
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun if ((int)be32_to_cpu(fm_eba->pnum[j]) < 0)
432*4882a593Smuzhiyun continue;
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun if (!__test_and_clear_bit(pnum, ubi->fm_used))
435*4882a593Smuzhiyun continue;
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun /*
438*4882a593Smuzhiyun * We only handle static volumes so used_ebs
439*4882a593Smuzhiyun * needs to be handed in. And we do not assign
440*4882a593Smuzhiyun * the reserved blocks
441*4882a593Smuzhiyun */
442*4882a593Smuzhiyun if (j >= used)
443*4882a593Smuzhiyun continue;
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun ret = assign_aeb_to_av(ubi, pnum, j, vol_id,
446*4882a593Smuzhiyun vol_type, used);
447*4882a593Smuzhiyun if (!ret)
448*4882a593Smuzhiyun continue;
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun /*
451*4882a593Smuzhiyun * Nasty: The fastmap claims that the volume
452*4882a593Smuzhiyun * has one block more than it, but that block
453*4882a593Smuzhiyun * is always empty and the other blocks have
454*4882a593Smuzhiyun * the correct number of total LEBs in the
455*4882a593Smuzhiyun * headers. Deal with it.
456*4882a593Smuzhiyun */
457*4882a593Smuzhiyun if (ret != UBI_IO_FF && j != used - 1)
458*4882a593Smuzhiyun goto fail_bad;
459*4882a593Smuzhiyun ubi_dbg("FA: Vol: %u Ignoring empty LEB %d of %d",
460*4882a593Smuzhiyun vol_id, j, used);
461*4882a593Smuzhiyun }
462*4882a593Smuzhiyun }
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun ret = scan_pool(ubi, fmpl1->pebs, pool_size);
465*4882a593Smuzhiyun if (ret)
466*4882a593Smuzhiyun goto fail;
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun ret = scan_pool(ubi, fmpl2->pebs, wl_pool_size);
469*4882a593Smuzhiyun if (ret)
470*4882a593Smuzhiyun goto fail;
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun #ifdef CHECKME
473*4882a593Smuzhiyun /*
474*4882a593Smuzhiyun * If fastmap is leaking PEBs (must not happen), raise a
475*4882a593Smuzhiyun * fat warning and fall back to scanning mode.
476*4882a593Smuzhiyun * We do this here because in ubi_wl_init() it's too late
477*4882a593Smuzhiyun * and we cannot fall back to scanning.
478*4882a593Smuzhiyun */
479*4882a593Smuzhiyun if (WARN_ON(count_fastmap_pebs(ai) != ubi->peb_count -
480*4882a593Smuzhiyun ai->bad_peb_count - fm->used_blocks))
481*4882a593Smuzhiyun goto fail_bad;
482*4882a593Smuzhiyun #endif
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun return 0;
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun fail_bad:
487*4882a593Smuzhiyun ret = UBI_BAD_FASTMAP;
488*4882a593Smuzhiyun fail:
489*4882a593Smuzhiyun return ret;
490*4882a593Smuzhiyun }
491*4882a593Smuzhiyun
ubi_scan_fastmap(struct ubi_scan_info * ubi,struct ubi_attach_info * ai,int fm_anchor)492*4882a593Smuzhiyun static int ubi_scan_fastmap(struct ubi_scan_info *ubi,
493*4882a593Smuzhiyun struct ubi_attach_info *ai,
494*4882a593Smuzhiyun int fm_anchor)
495*4882a593Smuzhiyun {
496*4882a593Smuzhiyun struct ubi_fm_sb *fmsb, *fmsb2;
497*4882a593Smuzhiyun struct ubi_vid_hdr *vh;
498*4882a593Smuzhiyun struct ubi_fastmap_layout *fm;
499*4882a593Smuzhiyun int i, used_blocks, pnum, ret = 0;
500*4882a593Smuzhiyun size_t fm_size;
501*4882a593Smuzhiyun __be32 crc, tmp_crc;
502*4882a593Smuzhiyun unsigned long long sqnum = 0;
503*4882a593Smuzhiyun
504*4882a593Smuzhiyun fmsb = &ubi->fm_sb;
505*4882a593Smuzhiyun fm = &ubi->fm_layout;
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun ret = ubi_io_read(ubi, fmsb, fm_anchor, ubi->leb_start, sizeof(*fmsb));
508*4882a593Smuzhiyun if (ret && ret != UBI_IO_BITFLIPS)
509*4882a593Smuzhiyun goto free_fm_sb;
510*4882a593Smuzhiyun else if (ret == UBI_IO_BITFLIPS)
511*4882a593Smuzhiyun fm->to_be_tortured[0] = 1;
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun if (be32_to_cpu(fmsb->magic) != UBI_FM_SB_MAGIC) {
514*4882a593Smuzhiyun ubi_err("bad super block magic: 0x%x, expected: 0x%x",
515*4882a593Smuzhiyun be32_to_cpu(fmsb->magic), UBI_FM_SB_MAGIC);
516*4882a593Smuzhiyun ret = UBI_BAD_FASTMAP;
517*4882a593Smuzhiyun goto free_fm_sb;
518*4882a593Smuzhiyun }
519*4882a593Smuzhiyun
520*4882a593Smuzhiyun if (fmsb->version != UBI_FM_FMT_VERSION) {
521*4882a593Smuzhiyun ubi_err("bad fastmap version: %i, expected: %i",
522*4882a593Smuzhiyun fmsb->version, UBI_FM_FMT_VERSION);
523*4882a593Smuzhiyun ret = UBI_BAD_FASTMAP;
524*4882a593Smuzhiyun goto free_fm_sb;
525*4882a593Smuzhiyun }
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun used_blocks = be32_to_cpu(fmsb->used_blocks);
528*4882a593Smuzhiyun if (used_blocks > UBI_FM_MAX_BLOCKS || used_blocks < 1) {
529*4882a593Smuzhiyun ubi_err("number of fastmap blocks is invalid: %i", used_blocks);
530*4882a593Smuzhiyun ret = UBI_BAD_FASTMAP;
531*4882a593Smuzhiyun goto free_fm_sb;
532*4882a593Smuzhiyun }
533*4882a593Smuzhiyun
534*4882a593Smuzhiyun fm_size = ubi->leb_size * used_blocks;
535*4882a593Smuzhiyun if (fm_size != ubi->fm_size) {
536*4882a593Smuzhiyun ubi_err("bad fastmap size: %zi, expected: %zi", fm_size,
537*4882a593Smuzhiyun ubi->fm_size);
538*4882a593Smuzhiyun ret = UBI_BAD_FASTMAP;
539*4882a593Smuzhiyun goto free_fm_sb;
540*4882a593Smuzhiyun }
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun vh = &ubi->fm_vh;
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun for (i = 0; i < used_blocks; i++) {
545*4882a593Smuzhiyun pnum = be32_to_cpu(fmsb->block_loc[i]);
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun if (ubi_io_is_bad(ubi, pnum)) {
548*4882a593Smuzhiyun ret = UBI_BAD_FASTMAP;
549*4882a593Smuzhiyun goto free_hdr;
550*4882a593Smuzhiyun }
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun #ifdef LATER
553*4882a593Smuzhiyun int image_seq;
554*4882a593Smuzhiyun ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
555*4882a593Smuzhiyun if (ret && ret != UBI_IO_BITFLIPS) {
556*4882a593Smuzhiyun ubi_err("unable to read fastmap block# %i EC (PEB: %i)",
557*4882a593Smuzhiyun i, pnum);
558*4882a593Smuzhiyun if (ret > 0)
559*4882a593Smuzhiyun ret = UBI_BAD_FASTMAP;
560*4882a593Smuzhiyun goto free_hdr;
561*4882a593Smuzhiyun } else if (ret == UBI_IO_BITFLIPS)
562*4882a593Smuzhiyun fm->to_be_tortured[i] = 1;
563*4882a593Smuzhiyun
564*4882a593Smuzhiyun image_seq = be32_to_cpu(ech->image_seq);
565*4882a593Smuzhiyun if (!ubi->image_seq)
566*4882a593Smuzhiyun ubi->image_seq = image_seq;
567*4882a593Smuzhiyun /*
568*4882a593Smuzhiyun * Older UBI implementations have image_seq set to zero, so
569*4882a593Smuzhiyun * we shouldn't fail if image_seq == 0.
570*4882a593Smuzhiyun */
571*4882a593Smuzhiyun if (image_seq && (image_seq != ubi->image_seq)) {
572*4882a593Smuzhiyun ubi_err("wrong image seq:%d instead of %d",
573*4882a593Smuzhiyun be32_to_cpu(ech->image_seq), ubi->image_seq);
574*4882a593Smuzhiyun ret = UBI_BAD_FASTMAP;
575*4882a593Smuzhiyun goto free_hdr;
576*4882a593Smuzhiyun }
577*4882a593Smuzhiyun #endif
578*4882a593Smuzhiyun ret = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
579*4882a593Smuzhiyun if (ret && ret != UBI_IO_BITFLIPS) {
580*4882a593Smuzhiyun ubi_err("unable to read fastmap block# %i (PEB: %i)",
581*4882a593Smuzhiyun i, pnum);
582*4882a593Smuzhiyun goto free_hdr;
583*4882a593Smuzhiyun }
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun /*
586*4882a593Smuzhiyun * Mainline code rescans the anchor header. We've done
587*4882a593Smuzhiyun * that already so we merily copy it over.
588*4882a593Smuzhiyun */
589*4882a593Smuzhiyun if (pnum == fm_anchor)
590*4882a593Smuzhiyun memcpy(vh, ubi->blockinfo + pnum, sizeof(*fm));
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun if (i == 0) {
593*4882a593Smuzhiyun if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) {
594*4882a593Smuzhiyun ubi_err("bad fastmap anchor vol_id: 0x%x," \
595*4882a593Smuzhiyun " expected: 0x%x",
596*4882a593Smuzhiyun be32_to_cpu(vh->vol_id),
597*4882a593Smuzhiyun UBI_FM_SB_VOLUME_ID);
598*4882a593Smuzhiyun ret = UBI_BAD_FASTMAP;
599*4882a593Smuzhiyun goto free_hdr;
600*4882a593Smuzhiyun }
601*4882a593Smuzhiyun } else {
602*4882a593Smuzhiyun if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) {
603*4882a593Smuzhiyun ubi_err("bad fastmap data vol_id: 0x%x," \
604*4882a593Smuzhiyun " expected: 0x%x",
605*4882a593Smuzhiyun be32_to_cpu(vh->vol_id),
606*4882a593Smuzhiyun UBI_FM_DATA_VOLUME_ID);
607*4882a593Smuzhiyun ret = UBI_BAD_FASTMAP;
608*4882a593Smuzhiyun goto free_hdr;
609*4882a593Smuzhiyun }
610*4882a593Smuzhiyun }
611*4882a593Smuzhiyun
612*4882a593Smuzhiyun if (sqnum < be64_to_cpu(vh->sqnum))
613*4882a593Smuzhiyun sqnum = be64_to_cpu(vh->sqnum);
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun ret = ubi_io_read(ubi, ubi->fm_buf + (ubi->leb_size * i), pnum,
616*4882a593Smuzhiyun ubi->leb_start, ubi->leb_size);
617*4882a593Smuzhiyun if (ret && ret != UBI_IO_BITFLIPS) {
618*4882a593Smuzhiyun ubi_err("unable to read fastmap block# %i (PEB: %i, " \
619*4882a593Smuzhiyun "err: %i)", i, pnum, ret);
620*4882a593Smuzhiyun goto free_hdr;
621*4882a593Smuzhiyun }
622*4882a593Smuzhiyun }
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun fmsb2 = (struct ubi_fm_sb *)(ubi->fm_buf);
625*4882a593Smuzhiyun tmp_crc = be32_to_cpu(fmsb2->data_crc);
626*4882a593Smuzhiyun fmsb2->data_crc = 0;
627*4882a593Smuzhiyun crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size);
628*4882a593Smuzhiyun if (crc != tmp_crc) {
629*4882a593Smuzhiyun ubi_err("fastmap data CRC is invalid");
630*4882a593Smuzhiyun ubi_err("CRC should be: 0x%x, calc: 0x%x", tmp_crc, crc);
631*4882a593Smuzhiyun ret = UBI_BAD_FASTMAP;
632*4882a593Smuzhiyun goto free_hdr;
633*4882a593Smuzhiyun }
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun fmsb2->sqnum = sqnum;
636*4882a593Smuzhiyun
637*4882a593Smuzhiyun fm->used_blocks = used_blocks;
638*4882a593Smuzhiyun
639*4882a593Smuzhiyun ret = ubi_attach_fastmap(ubi, ai, fm);
640*4882a593Smuzhiyun if (ret) {
641*4882a593Smuzhiyun if (ret > 0)
642*4882a593Smuzhiyun ret = UBI_BAD_FASTMAP;
643*4882a593Smuzhiyun goto free_hdr;
644*4882a593Smuzhiyun }
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun ubi->fm = fm;
647*4882a593Smuzhiyun ubi->fm_pool.max_size = ubi->fm->max_pool_size;
648*4882a593Smuzhiyun ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size;
649*4882a593Smuzhiyun ubi_msg("attached by fastmap %uMB %u blocks",
650*4882a593Smuzhiyun ubi->fsize_mb, ubi->peb_count);
651*4882a593Smuzhiyun ubi_dbg("fastmap pool size: %d", ubi->fm_pool.max_size);
652*4882a593Smuzhiyun ubi_dbg("fastmap WL pool size: %d", ubi->fm_wl_pool.max_size);
653*4882a593Smuzhiyun
654*4882a593Smuzhiyun out:
655*4882a593Smuzhiyun if (ret)
656*4882a593Smuzhiyun ubi_err("Attach by fastmap failed, doing a full scan!");
657*4882a593Smuzhiyun return ret;
658*4882a593Smuzhiyun
659*4882a593Smuzhiyun free_hdr:
660*4882a593Smuzhiyun free_fm_sb:
661*4882a593Smuzhiyun goto out;
662*4882a593Smuzhiyun }
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun /*
665*4882a593Smuzhiyun * Scan the flash and attempt to attach via fastmap
666*4882a593Smuzhiyun */
ipl_scan(struct ubi_scan_info * ubi)667*4882a593Smuzhiyun static void ipl_scan(struct ubi_scan_info *ubi)
668*4882a593Smuzhiyun {
669*4882a593Smuzhiyun unsigned int pnum;
670*4882a593Smuzhiyun int res;
671*4882a593Smuzhiyun
672*4882a593Smuzhiyun /*
673*4882a593Smuzhiyun * Scan first for the fastmap super block
674*4882a593Smuzhiyun */
675*4882a593Smuzhiyun for (pnum = 0; pnum < UBI_FM_MAX_START; pnum++) {
676*4882a593Smuzhiyun res = ubi_scan_vid_hdr(ubi, ubi->blockinfo + pnum, pnum);
677*4882a593Smuzhiyun /*
678*4882a593Smuzhiyun * We ignore errors here as we are meriliy scanning
679*4882a593Smuzhiyun * the headers.
680*4882a593Smuzhiyun */
681*4882a593Smuzhiyun if (res != UBI_FASTMAP_ANCHOR)
682*4882a593Smuzhiyun continue;
683*4882a593Smuzhiyun
684*4882a593Smuzhiyun /*
685*4882a593Smuzhiyun * If fastmap is disabled, continue scanning. This
686*4882a593Smuzhiyun * might happen because the previous attempt failed or
687*4882a593Smuzhiyun * the caller disabled it right away.
688*4882a593Smuzhiyun */
689*4882a593Smuzhiyun if (!ubi->fm_enabled)
690*4882a593Smuzhiyun continue;
691*4882a593Smuzhiyun
692*4882a593Smuzhiyun /*
693*4882a593Smuzhiyun * Try to attach the fastmap, if that fails continue
694*4882a593Smuzhiyun * scanning.
695*4882a593Smuzhiyun */
696*4882a593Smuzhiyun if (!ubi_scan_fastmap(ubi, NULL, pnum))
697*4882a593Smuzhiyun return;
698*4882a593Smuzhiyun /*
699*4882a593Smuzhiyun * Fastmap failed. Clear everything we have and start
700*4882a593Smuzhiyun * over. We are paranoid and do not trust anything.
701*4882a593Smuzhiyun */
702*4882a593Smuzhiyun memset(ubi->volinfo, 0, sizeof(ubi->volinfo));
703*4882a593Smuzhiyun pnum = 0;
704*4882a593Smuzhiyun break;
705*4882a593Smuzhiyun }
706*4882a593Smuzhiyun
707*4882a593Smuzhiyun /*
708*4882a593Smuzhiyun * Continue scanning, ignore errors, we might find what we are
709*4882a593Smuzhiyun * looking for,
710*4882a593Smuzhiyun */
711*4882a593Smuzhiyun for (; pnum < ubi->peb_count; pnum++)
712*4882a593Smuzhiyun ubi_scan_vid_hdr(ubi, ubi->blockinfo + pnum, pnum);
713*4882a593Smuzhiyun }
714*4882a593Smuzhiyun
715*4882a593Smuzhiyun /*
716*4882a593Smuzhiyun * Load a logical block of a volume into memory
717*4882a593Smuzhiyun */
ubi_load_block(struct ubi_scan_info * ubi,uint8_t * laddr,struct ubi_vol_info * vi,u32 vol_id,u32 lnum,u32 last)718*4882a593Smuzhiyun static int ubi_load_block(struct ubi_scan_info *ubi, uint8_t *laddr,
719*4882a593Smuzhiyun struct ubi_vol_info *vi, u32 vol_id, u32 lnum,
720*4882a593Smuzhiyun u32 last)
721*4882a593Smuzhiyun {
722*4882a593Smuzhiyun struct ubi_vid_hdr *vh, *vrepl;
723*4882a593Smuzhiyun u32 pnum, crc, dlen;
724*4882a593Smuzhiyun
725*4882a593Smuzhiyun retry:
726*4882a593Smuzhiyun /*
727*4882a593Smuzhiyun * If this is a fastmap run, we try to rescan full, otherwise
728*4882a593Smuzhiyun * we simply give up.
729*4882a593Smuzhiyun */
730*4882a593Smuzhiyun if (!test_bit(lnum, vi->found)) {
731*4882a593Smuzhiyun ubi_warn("LEB %d of %d is missing", lnum, last);
732*4882a593Smuzhiyun return -EINVAL;
733*4882a593Smuzhiyun }
734*4882a593Smuzhiyun
735*4882a593Smuzhiyun pnum = vi->lebs_to_pebs[lnum];
736*4882a593Smuzhiyun
737*4882a593Smuzhiyun ubi_dbg("Load vol %u LEB %u PEB %u", vol_id, lnum, pnum);
738*4882a593Smuzhiyun
739*4882a593Smuzhiyun if (ubi_io_is_bad(ubi, pnum)) {
740*4882a593Smuzhiyun ubi_warn("Corrupted mapping block %d PB %d\n", lnum, pnum);
741*4882a593Smuzhiyun return -EINVAL;
742*4882a593Smuzhiyun }
743*4882a593Smuzhiyun
744*4882a593Smuzhiyun if (test_bit(pnum, ubi->corrupt))
745*4882a593Smuzhiyun goto find_other;
746*4882a593Smuzhiyun
747*4882a593Smuzhiyun /*
748*4882a593Smuzhiyun * Lets try to read that block
749*4882a593Smuzhiyun */
750*4882a593Smuzhiyun vh = ubi->blockinfo + pnum;
751*4882a593Smuzhiyun
752*4882a593Smuzhiyun if (!test_bit(pnum, ubi->scanned)) {
753*4882a593Smuzhiyun ubi_warn("Vol: %u LEB %u PEB %u not yet scanned", vol_id,
754*4882a593Smuzhiyun lnum, pnum);
755*4882a593Smuzhiyun if (ubi_rescan_fm_vid_hdr(ubi, vh, pnum, vol_id, lnum))
756*4882a593Smuzhiyun goto find_other;
757*4882a593Smuzhiyun }
758*4882a593Smuzhiyun
759*4882a593Smuzhiyun /*
760*4882a593Smuzhiyun * Check, if the total number of blocks is correct
761*4882a593Smuzhiyun */
762*4882a593Smuzhiyun if (be32_to_cpu(vh->used_ebs) != last) {
763*4882a593Smuzhiyun ubi_dbg("Block count missmatch.");
764*4882a593Smuzhiyun ubi_dbg("vh->used_ebs: %d nrblocks: %d",
765*4882a593Smuzhiyun be32_to_cpu(vh->used_ebs), last);
766*4882a593Smuzhiyun generic_set_bit(pnum, ubi->corrupt);
767*4882a593Smuzhiyun goto find_other;
768*4882a593Smuzhiyun }
769*4882a593Smuzhiyun
770*4882a593Smuzhiyun /*
771*4882a593Smuzhiyun * Get the data length of this block.
772*4882a593Smuzhiyun */
773*4882a593Smuzhiyun dlen = be32_to_cpu(vh->data_size);
774*4882a593Smuzhiyun
775*4882a593Smuzhiyun /*
776*4882a593Smuzhiyun * Read the data into RAM. We ignore the return value
777*4882a593Smuzhiyun * here as the only thing which might go wrong are
778*4882a593Smuzhiyun * bitflips. Try nevertheless.
779*4882a593Smuzhiyun */
780*4882a593Smuzhiyun ubi_io_read(ubi, laddr, pnum, ubi->leb_start, dlen);
781*4882a593Smuzhiyun
782*4882a593Smuzhiyun /* Calculate CRC over the data */
783*4882a593Smuzhiyun crc = crc32(UBI_CRC32_INIT, laddr, dlen);
784*4882a593Smuzhiyun
785*4882a593Smuzhiyun if (crc != be32_to_cpu(vh->data_crc)) {
786*4882a593Smuzhiyun ubi_warn("Vol: %u LEB %u PEB %u data CRC failure", vol_id,
787*4882a593Smuzhiyun lnum, pnum);
788*4882a593Smuzhiyun generic_set_bit(pnum, ubi->corrupt);
789*4882a593Smuzhiyun goto find_other;
790*4882a593Smuzhiyun }
791*4882a593Smuzhiyun
792*4882a593Smuzhiyun /* We are good. Return the data length we read */
793*4882a593Smuzhiyun return dlen;
794*4882a593Smuzhiyun
795*4882a593Smuzhiyun find_other:
796*4882a593Smuzhiyun ubi_dbg("Find replacement for LEB %u PEB %u", lnum, pnum);
797*4882a593Smuzhiyun generic_clear_bit(lnum, vi->found);
798*4882a593Smuzhiyun vrepl = NULL;
799*4882a593Smuzhiyun
800*4882a593Smuzhiyun for (pnum = 0; pnum < ubi->peb_count; pnum++) {
801*4882a593Smuzhiyun struct ubi_vid_hdr *tmp = ubi->blockinfo + pnum;
802*4882a593Smuzhiyun u32 t_vol_id = be32_to_cpu(tmp->vol_id);
803*4882a593Smuzhiyun u32 t_lnum = be32_to_cpu(tmp->lnum);
804*4882a593Smuzhiyun
805*4882a593Smuzhiyun if (test_bit(pnum, ubi->corrupt))
806*4882a593Smuzhiyun continue;
807*4882a593Smuzhiyun
808*4882a593Smuzhiyun if (t_vol_id != vol_id || t_lnum != lnum)
809*4882a593Smuzhiyun continue;
810*4882a593Smuzhiyun
811*4882a593Smuzhiyun if (!test_bit(pnum, ubi->scanned)) {
812*4882a593Smuzhiyun ubi_warn("Vol: %u LEB %u PEB %u not yet scanned",
813*4882a593Smuzhiyun vol_id, lnum, pnum);
814*4882a593Smuzhiyun if (ubi_rescan_fm_vid_hdr(ubi, tmp, pnum, vol_id, lnum))
815*4882a593Smuzhiyun continue;
816*4882a593Smuzhiyun }
817*4882a593Smuzhiyun
818*4882a593Smuzhiyun /*
819*4882a593Smuzhiyun * We found one. If its the first, assign it otherwise
820*4882a593Smuzhiyun * compare the sqnum
821*4882a593Smuzhiyun */
822*4882a593Smuzhiyun generic_set_bit(lnum, vi->found);
823*4882a593Smuzhiyun
824*4882a593Smuzhiyun if (!vrepl) {
825*4882a593Smuzhiyun vrepl = tmp;
826*4882a593Smuzhiyun continue;
827*4882a593Smuzhiyun }
828*4882a593Smuzhiyun
829*4882a593Smuzhiyun if (be64_to_cpu(vrepl->sqnum) < be64_to_cpu(tmp->sqnum))
830*4882a593Smuzhiyun vrepl = tmp;
831*4882a593Smuzhiyun }
832*4882a593Smuzhiyun
833*4882a593Smuzhiyun if (vrepl) {
834*4882a593Smuzhiyun /* Update the vi table */
835*4882a593Smuzhiyun pnum = vrepl - ubi->blockinfo;
836*4882a593Smuzhiyun vi->lebs_to_pebs[lnum] = pnum;
837*4882a593Smuzhiyun ubi_dbg("Trying PEB %u for LEB %u", pnum, lnum);
838*4882a593Smuzhiyun vh = vrepl;
839*4882a593Smuzhiyun }
840*4882a593Smuzhiyun goto retry;
841*4882a593Smuzhiyun }
842*4882a593Smuzhiyun
843*4882a593Smuzhiyun /*
844*4882a593Smuzhiyun * Load a volume into RAM
845*4882a593Smuzhiyun */
ipl_load(struct ubi_scan_info * ubi,const u32 vol_id,uint8_t * laddr)846*4882a593Smuzhiyun static int ipl_load(struct ubi_scan_info *ubi, const u32 vol_id, uint8_t *laddr)
847*4882a593Smuzhiyun {
848*4882a593Smuzhiyun struct ubi_vol_info *vi;
849*4882a593Smuzhiyun u32 lnum, last, len;
850*4882a593Smuzhiyun
851*4882a593Smuzhiyun if (vol_id >= UBI_SPL_VOL_IDS)
852*4882a593Smuzhiyun return -EINVAL;
853*4882a593Smuzhiyun
854*4882a593Smuzhiyun len = 0;
855*4882a593Smuzhiyun vi = ubi->volinfo + vol_id;
856*4882a593Smuzhiyun last = vi->last_block + 1;
857*4882a593Smuzhiyun
858*4882a593Smuzhiyun /* Read the blocks to RAM, check CRC */
859*4882a593Smuzhiyun for (lnum = 0 ; lnum < last; lnum++) {
860*4882a593Smuzhiyun int res = ubi_load_block(ubi, laddr, vi, vol_id, lnum, last);
861*4882a593Smuzhiyun
862*4882a593Smuzhiyun if (res < 0) {
863*4882a593Smuzhiyun ubi_warn("Failed to load volume %u", vol_id);
864*4882a593Smuzhiyun return res;
865*4882a593Smuzhiyun }
866*4882a593Smuzhiyun /* res is the data length of the read block */
867*4882a593Smuzhiyun laddr += res;
868*4882a593Smuzhiyun len += res;
869*4882a593Smuzhiyun }
870*4882a593Smuzhiyun return len;
871*4882a593Smuzhiyun }
872*4882a593Smuzhiyun
ubispl_load_volumes(struct ubispl_info * info,struct ubispl_load * lvols,int nrvols)873*4882a593Smuzhiyun int ubispl_load_volumes(struct ubispl_info *info, struct ubispl_load *lvols,
874*4882a593Smuzhiyun int nrvols)
875*4882a593Smuzhiyun {
876*4882a593Smuzhiyun struct ubi_scan_info *ubi = info->ubi;
877*4882a593Smuzhiyun int res, i, fastmap = info->fastmap;
878*4882a593Smuzhiyun u32 fsize;
879*4882a593Smuzhiyun
880*4882a593Smuzhiyun retry:
881*4882a593Smuzhiyun /*
882*4882a593Smuzhiyun * We do a partial initializiation of @ubi. Cleaning fm_buf is
883*4882a593Smuzhiyun * not necessary.
884*4882a593Smuzhiyun */
885*4882a593Smuzhiyun memset(ubi, 0, offsetof(struct ubi_scan_info, fm_buf));
886*4882a593Smuzhiyun
887*4882a593Smuzhiyun ubi->read = info->read;
888*4882a593Smuzhiyun
889*4882a593Smuzhiyun /* Precalculate the offsets */
890*4882a593Smuzhiyun ubi->vid_offset = info->vid_offset;
891*4882a593Smuzhiyun ubi->leb_start = info->leb_start;
892*4882a593Smuzhiyun ubi->leb_size = info->peb_size - ubi->leb_start;
893*4882a593Smuzhiyun ubi->peb_count = info->peb_count;
894*4882a593Smuzhiyun ubi->peb_offset = info->peb_offset;
895*4882a593Smuzhiyun
896*4882a593Smuzhiyun fsize = info->peb_size * info->peb_count;
897*4882a593Smuzhiyun ubi->fsize_mb = fsize >> 20;
898*4882a593Smuzhiyun
899*4882a593Smuzhiyun /* Fastmap init */
900*4882a593Smuzhiyun ubi->fm_size = ubi_calc_fm_size(ubi);
901*4882a593Smuzhiyun ubi->fm_enabled = fastmap;
902*4882a593Smuzhiyun
903*4882a593Smuzhiyun for (i = 0; i < nrvols; i++) {
904*4882a593Smuzhiyun struct ubispl_load *lv = lvols + i;
905*4882a593Smuzhiyun
906*4882a593Smuzhiyun generic_set_bit(lv->vol_id, ubi->toload);
907*4882a593Smuzhiyun }
908*4882a593Smuzhiyun
909*4882a593Smuzhiyun ipl_scan(ubi);
910*4882a593Smuzhiyun
911*4882a593Smuzhiyun for (i = 0; i < nrvols; i++) {
912*4882a593Smuzhiyun struct ubispl_load *lv = lvols + i;
913*4882a593Smuzhiyun
914*4882a593Smuzhiyun ubi_msg("Loading VolId #%d", lv->vol_id);
915*4882a593Smuzhiyun res = ipl_load(ubi, lv->vol_id, lv->load_addr);
916*4882a593Smuzhiyun if (res < 0) {
917*4882a593Smuzhiyun if (fastmap) {
918*4882a593Smuzhiyun fastmap = 0;
919*4882a593Smuzhiyun goto retry;
920*4882a593Smuzhiyun }
921*4882a593Smuzhiyun ubi_warn("Failed");
922*4882a593Smuzhiyun return res;
923*4882a593Smuzhiyun }
924*4882a593Smuzhiyun }
925*4882a593Smuzhiyun return 0;
926*4882a593Smuzhiyun }
927