1*6f4e7d3cSThomas Gleixner /*
2*6f4e7d3cSThomas Gleixner * Copyright (c) Thomas Gleixner <tglx@linutronix.de>
3*6f4e7d3cSThomas Gleixner *
4*6f4e7d3cSThomas Gleixner * The parts taken from the kernel implementation are:
5*6f4e7d3cSThomas Gleixner *
6*6f4e7d3cSThomas Gleixner * Copyright (c) International Business Machines Corp., 2006
7*6f4e7d3cSThomas Gleixner *
8*6f4e7d3cSThomas Gleixner * SPDX-License-Identifier: GPL 2.0+ BSD-3-Clause
9*6f4e7d3cSThomas Gleixner */
10*6f4e7d3cSThomas Gleixner
11*6f4e7d3cSThomas Gleixner #include <common.h>
12*6f4e7d3cSThomas Gleixner #include <errno.h>
13*6f4e7d3cSThomas Gleixner #include <ubispl.h>
14*6f4e7d3cSThomas Gleixner
15*6f4e7d3cSThomas Gleixner #include <linux/crc32.h>
16*6f4e7d3cSThomas Gleixner
17*6f4e7d3cSThomas Gleixner #include "ubispl.h"
18*6f4e7d3cSThomas Gleixner
19*6f4e7d3cSThomas Gleixner /**
20*6f4e7d3cSThomas Gleixner * ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device.
21*6f4e7d3cSThomas Gleixner * @ubi: UBI device description object
22*6f4e7d3cSThomas Gleixner */
ubi_calc_fm_size(struct ubi_scan_info * ubi)23*6f4e7d3cSThomas Gleixner static size_t ubi_calc_fm_size(struct ubi_scan_info *ubi)
24*6f4e7d3cSThomas Gleixner {
25*6f4e7d3cSThomas Gleixner size_t size;
26*6f4e7d3cSThomas Gleixner
27*6f4e7d3cSThomas Gleixner size = sizeof(struct ubi_fm_sb) +
28*6f4e7d3cSThomas Gleixner sizeof(struct ubi_fm_hdr) +
29*6f4e7d3cSThomas Gleixner sizeof(struct ubi_fm_scan_pool) +
30*6f4e7d3cSThomas Gleixner sizeof(struct ubi_fm_scan_pool) +
31*6f4e7d3cSThomas Gleixner (ubi->peb_count * sizeof(struct ubi_fm_ec)) +
32*6f4e7d3cSThomas Gleixner (sizeof(struct ubi_fm_eba) +
33*6f4e7d3cSThomas Gleixner (ubi->peb_count * sizeof(__be32))) +
34*6f4e7d3cSThomas Gleixner sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES;
35*6f4e7d3cSThomas Gleixner return roundup(size, ubi->leb_size);
36*6f4e7d3cSThomas Gleixner }
37*6f4e7d3cSThomas Gleixner
ubi_io_read(struct ubi_scan_info * ubi,void * buf,int pnum,unsigned long from,unsigned long len)38*6f4e7d3cSThomas Gleixner static int ubi_io_read(struct ubi_scan_info *ubi, void *buf, int pnum,
39*6f4e7d3cSThomas Gleixner unsigned long from, unsigned long len)
40*6f4e7d3cSThomas Gleixner {
41*6f4e7d3cSThomas Gleixner return ubi->read(pnum + ubi->peb_offset, from, len, buf);
42*6f4e7d3cSThomas Gleixner }
43*6f4e7d3cSThomas Gleixner
ubi_io_is_bad(struct ubi_scan_info * ubi,int peb)44*6f4e7d3cSThomas Gleixner static int ubi_io_is_bad(struct ubi_scan_info *ubi, int peb)
45*6f4e7d3cSThomas Gleixner {
46*6f4e7d3cSThomas Gleixner return peb >= ubi->peb_count || peb < 0;
47*6f4e7d3cSThomas Gleixner }
48*6f4e7d3cSThomas Gleixner
ubi_io_read_vid_hdr(struct ubi_scan_info * ubi,int pnum,struct ubi_vid_hdr * vh,int unused)49*6f4e7d3cSThomas Gleixner static int ubi_io_read_vid_hdr(struct ubi_scan_info *ubi, int pnum,
50*6f4e7d3cSThomas Gleixner struct ubi_vid_hdr *vh, int unused)
51*6f4e7d3cSThomas Gleixner {
52*6f4e7d3cSThomas Gleixner u32 magic;
53*6f4e7d3cSThomas Gleixner int res;
54*6f4e7d3cSThomas Gleixner
55*6f4e7d3cSThomas Gleixner /* No point in rescanning a corrupt block */
56*6f4e7d3cSThomas Gleixner if (test_bit(pnum, ubi->corrupt))
57*6f4e7d3cSThomas Gleixner return UBI_IO_BAD_HDR;
58*6f4e7d3cSThomas Gleixner /*
59*6f4e7d3cSThomas Gleixner * If the block has been scanned already, no need to rescan
60*6f4e7d3cSThomas Gleixner */
61*6f4e7d3cSThomas Gleixner if (test_and_set_bit(pnum, ubi->scanned))
62*6f4e7d3cSThomas Gleixner return 0;
63*6f4e7d3cSThomas Gleixner
64*6f4e7d3cSThomas Gleixner res = ubi_io_read(ubi, vh, pnum, ubi->vid_offset, sizeof(*vh));
65*6f4e7d3cSThomas Gleixner
66*6f4e7d3cSThomas Gleixner /*
67*6f4e7d3cSThomas Gleixner * Bad block, unrecoverable ECC error, skip the block
68*6f4e7d3cSThomas Gleixner */
69*6f4e7d3cSThomas Gleixner if (res) {
70*6f4e7d3cSThomas Gleixner ubi_dbg("Skipping bad or unreadable block %d", pnum);
71*6f4e7d3cSThomas Gleixner vh->magic = 0;
72*6f4e7d3cSThomas Gleixner generic_set_bit(pnum, ubi->corrupt);
73*6f4e7d3cSThomas Gleixner return res;
74*6f4e7d3cSThomas Gleixner }
75*6f4e7d3cSThomas Gleixner
76*6f4e7d3cSThomas Gleixner /* Magic number available ? */
77*6f4e7d3cSThomas Gleixner magic = be32_to_cpu(vh->magic);
78*6f4e7d3cSThomas Gleixner if (magic != UBI_VID_HDR_MAGIC) {
79*6f4e7d3cSThomas Gleixner generic_set_bit(pnum, ubi->corrupt);
80*6f4e7d3cSThomas Gleixner if (magic == 0xffffffff)
81*6f4e7d3cSThomas Gleixner return UBI_IO_FF;
82*6f4e7d3cSThomas Gleixner ubi_msg("Bad magic in block 0%d %08x", pnum, magic);
83*6f4e7d3cSThomas Gleixner return UBI_IO_BAD_HDR;
84*6f4e7d3cSThomas Gleixner }
85*6f4e7d3cSThomas Gleixner
86*6f4e7d3cSThomas Gleixner /* Header CRC correct ? */
87*6f4e7d3cSThomas Gleixner if (crc32(UBI_CRC32_INIT, vh, UBI_VID_HDR_SIZE_CRC) !=
88*6f4e7d3cSThomas Gleixner be32_to_cpu(vh->hdr_crc)) {
89*6f4e7d3cSThomas Gleixner ubi_msg("Bad CRC in block 0%d", pnum);
90*6f4e7d3cSThomas Gleixner generic_set_bit(pnum, ubi->corrupt);
91*6f4e7d3cSThomas Gleixner return UBI_IO_BAD_HDR;
92*6f4e7d3cSThomas Gleixner }
93*6f4e7d3cSThomas Gleixner
94*6f4e7d3cSThomas Gleixner ubi_dbg("RV: pnum: %i sqnum %llu", pnum, be64_to_cpu(vh->sqnum));
95*6f4e7d3cSThomas Gleixner
96*6f4e7d3cSThomas Gleixner return 0;
97*6f4e7d3cSThomas Gleixner }
98*6f4e7d3cSThomas Gleixner
ubi_rescan_fm_vid_hdr(struct ubi_scan_info * ubi,struct ubi_vid_hdr * vh,u32 fm_pnum,u32 fm_vol_id,u32 fm_lnum)99*6f4e7d3cSThomas Gleixner static int ubi_rescan_fm_vid_hdr(struct ubi_scan_info *ubi,
100*6f4e7d3cSThomas Gleixner struct ubi_vid_hdr *vh,
101*6f4e7d3cSThomas Gleixner u32 fm_pnum, u32 fm_vol_id, u32 fm_lnum)
102*6f4e7d3cSThomas Gleixner {
103*6f4e7d3cSThomas Gleixner int res;
104*6f4e7d3cSThomas Gleixner
105*6f4e7d3cSThomas Gleixner if (ubi_io_is_bad(ubi, fm_pnum))
106*6f4e7d3cSThomas Gleixner return -EINVAL;
107*6f4e7d3cSThomas Gleixner
108*6f4e7d3cSThomas Gleixner res = ubi_io_read_vid_hdr(ubi, fm_pnum, vh, 0);
109*6f4e7d3cSThomas Gleixner if (!res) {
110*6f4e7d3cSThomas Gleixner /* Check volume id, volume type and lnum */
111*6f4e7d3cSThomas Gleixner if (be32_to_cpu(vh->vol_id) == fm_vol_id &&
112*6f4e7d3cSThomas Gleixner vh->vol_type == UBI_VID_STATIC &&
113*6f4e7d3cSThomas Gleixner be32_to_cpu(vh->lnum) == fm_lnum)
114*6f4e7d3cSThomas Gleixner return 0;
115*6f4e7d3cSThomas Gleixner ubi_dbg("RS: PEB %u vol: %u : %u typ %u lnum %u %u",
116*6f4e7d3cSThomas Gleixner fm_pnum, fm_vol_id, vh->vol_type,
117*6f4e7d3cSThomas Gleixner be32_to_cpu(vh->vol_id),
118*6f4e7d3cSThomas Gleixner fm_lnum, be32_to_cpu(vh->lnum));
119*6f4e7d3cSThomas Gleixner }
120*6f4e7d3cSThomas Gleixner return res;
121*6f4e7d3cSThomas Gleixner }
122*6f4e7d3cSThomas Gleixner
123*6f4e7d3cSThomas Gleixner /* Insert the logic block into the volume info */
ubi_add_peb_to_vol(struct ubi_scan_info * ubi,struct ubi_vid_hdr * vh,u32 vol_id,u32 pnum,u32 lnum)124*6f4e7d3cSThomas Gleixner static int ubi_add_peb_to_vol(struct ubi_scan_info *ubi,
125*6f4e7d3cSThomas Gleixner struct ubi_vid_hdr *vh, u32 vol_id,
126*6f4e7d3cSThomas Gleixner u32 pnum, u32 lnum)
127*6f4e7d3cSThomas Gleixner {
128*6f4e7d3cSThomas Gleixner struct ubi_vol_info *vi = ubi->volinfo + vol_id;
129*6f4e7d3cSThomas Gleixner u32 *ltp;
130*6f4e7d3cSThomas Gleixner
131*6f4e7d3cSThomas Gleixner /*
132*6f4e7d3cSThomas Gleixner * If the volume is larger than expected, yell and give up :(
133*6f4e7d3cSThomas Gleixner */
134*6f4e7d3cSThomas Gleixner if (lnum >= UBI_MAX_VOL_LEBS) {
135*6f4e7d3cSThomas Gleixner ubi_warn("Vol: %u LEB %d > %d", vol_id, lnum, UBI_MAX_VOL_LEBS);
136*6f4e7d3cSThomas Gleixner return -EINVAL;
137*6f4e7d3cSThomas Gleixner }
138*6f4e7d3cSThomas Gleixner
139*6f4e7d3cSThomas Gleixner ubi_dbg("SC: Add PEB %u to Vol %u as LEB %u fnd %d sc %d",
140*6f4e7d3cSThomas Gleixner pnum, vol_id, lnum, !!test_bit(lnum, vi->found),
141*6f4e7d3cSThomas Gleixner !!test_bit(pnum, ubi->scanned));
142*6f4e7d3cSThomas Gleixner
143*6f4e7d3cSThomas Gleixner /* Points to the translation entry */
144*6f4e7d3cSThomas Gleixner ltp = vi->lebs_to_pebs + lnum;
145*6f4e7d3cSThomas Gleixner
146*6f4e7d3cSThomas Gleixner /* If the block is already assigned, check sqnum */
147*6f4e7d3cSThomas Gleixner if (__test_and_set_bit(lnum, vi->found)) {
148*6f4e7d3cSThomas Gleixner u32 cur_pnum = *ltp;
149*6f4e7d3cSThomas Gleixner struct ubi_vid_hdr *cur = ubi->blockinfo + cur_pnum;
150*6f4e7d3cSThomas Gleixner
151*6f4e7d3cSThomas Gleixner /*
152*6f4e7d3cSThomas Gleixner * If the current block hase not yet been scanned, we
153*6f4e7d3cSThomas Gleixner * need to do that. The other block might be stale or
154*6f4e7d3cSThomas Gleixner * the current block corrupted and the FM not yet
155*6f4e7d3cSThomas Gleixner * updated.
156*6f4e7d3cSThomas Gleixner */
157*6f4e7d3cSThomas Gleixner if (!test_bit(cur_pnum, ubi->scanned)) {
158*6f4e7d3cSThomas Gleixner /*
159*6f4e7d3cSThomas Gleixner * If the scan fails, we use the valid block
160*6f4e7d3cSThomas Gleixner */
161*6f4e7d3cSThomas Gleixner if (ubi_rescan_fm_vid_hdr(ubi, cur, cur_pnum, vol_id,
162*6f4e7d3cSThomas Gleixner lnum)) {
163*6f4e7d3cSThomas Gleixner *ltp = pnum;
164*6f4e7d3cSThomas Gleixner return 0;
165*6f4e7d3cSThomas Gleixner }
166*6f4e7d3cSThomas Gleixner }
167*6f4e7d3cSThomas Gleixner
168*6f4e7d3cSThomas Gleixner /*
169*6f4e7d3cSThomas Gleixner * Should not happen ....
170*6f4e7d3cSThomas Gleixner */
171*6f4e7d3cSThomas Gleixner if (test_bit(cur_pnum, ubi->corrupt)) {
172*6f4e7d3cSThomas Gleixner *ltp = pnum;
173*6f4e7d3cSThomas Gleixner return 0;
174*6f4e7d3cSThomas Gleixner }
175*6f4e7d3cSThomas Gleixner
176*6f4e7d3cSThomas Gleixner ubi_dbg("Vol %u LEB %u PEB %u->sqnum %llu NPEB %u->sqnum %llu",
177*6f4e7d3cSThomas Gleixner vol_id, lnum, cur_pnum, be64_to_cpu(cur->sqnum), pnum,
178*6f4e7d3cSThomas Gleixner be64_to_cpu(vh->sqnum));
179*6f4e7d3cSThomas Gleixner
180*6f4e7d3cSThomas Gleixner /*
181*6f4e7d3cSThomas Gleixner * Compare sqnum and take the newer one
182*6f4e7d3cSThomas Gleixner */
183*6f4e7d3cSThomas Gleixner if (be64_to_cpu(cur->sqnum) < be64_to_cpu(vh->sqnum))
184*6f4e7d3cSThomas Gleixner *ltp = pnum;
185*6f4e7d3cSThomas Gleixner } else {
186*6f4e7d3cSThomas Gleixner *ltp = pnum;
187*6f4e7d3cSThomas Gleixner if (lnum > vi->last_block)
188*6f4e7d3cSThomas Gleixner vi->last_block = lnum;
189*6f4e7d3cSThomas Gleixner }
190*6f4e7d3cSThomas Gleixner
191*6f4e7d3cSThomas Gleixner return 0;
192*6f4e7d3cSThomas Gleixner }
193*6f4e7d3cSThomas Gleixner
ubi_scan_vid_hdr(struct ubi_scan_info * ubi,struct ubi_vid_hdr * vh,u32 pnum)194*6f4e7d3cSThomas Gleixner static int ubi_scan_vid_hdr(struct ubi_scan_info *ubi, struct ubi_vid_hdr *vh,
195*6f4e7d3cSThomas Gleixner u32 pnum)
196*6f4e7d3cSThomas Gleixner {
197*6f4e7d3cSThomas Gleixner u32 vol_id, lnum;
198*6f4e7d3cSThomas Gleixner int res;
199*6f4e7d3cSThomas Gleixner
200*6f4e7d3cSThomas Gleixner if (ubi_io_is_bad(ubi, pnum))
201*6f4e7d3cSThomas Gleixner return -EINVAL;
202*6f4e7d3cSThomas Gleixner
203*6f4e7d3cSThomas Gleixner res = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
204*6f4e7d3cSThomas Gleixner if (res)
205*6f4e7d3cSThomas Gleixner return res;
206*6f4e7d3cSThomas Gleixner
207*6f4e7d3cSThomas Gleixner /* Get volume id */
208*6f4e7d3cSThomas Gleixner vol_id = be32_to_cpu(vh->vol_id);
209*6f4e7d3cSThomas Gleixner
210*6f4e7d3cSThomas Gleixner /* If this is the fastmap anchor, return right away */
211*6f4e7d3cSThomas Gleixner if (vol_id == UBI_FM_SB_VOLUME_ID)
212*6f4e7d3cSThomas Gleixner return ubi->fm_enabled ? UBI_FASTMAP_ANCHOR : 0;
213*6f4e7d3cSThomas Gleixner
214*6f4e7d3cSThomas Gleixner /* We only care about static volumes with an id < UBI_SPL_VOL_IDS */
215*6f4e7d3cSThomas Gleixner if (vol_id >= UBI_SPL_VOL_IDS || vh->vol_type != UBI_VID_STATIC)
216*6f4e7d3cSThomas Gleixner return 0;
217*6f4e7d3cSThomas Gleixner
218*6f4e7d3cSThomas Gleixner /* We are only interested in the volumes to load */
219*6f4e7d3cSThomas Gleixner if (!test_bit(vol_id, ubi->toload))
220*6f4e7d3cSThomas Gleixner return 0;
221*6f4e7d3cSThomas Gleixner
222*6f4e7d3cSThomas Gleixner lnum = be32_to_cpu(vh->lnum);
223*6f4e7d3cSThomas Gleixner return ubi_add_peb_to_vol(ubi, vh, vol_id, pnum, lnum);
224*6f4e7d3cSThomas Gleixner }
225*6f4e7d3cSThomas Gleixner
assign_aeb_to_av(struct ubi_scan_info * ubi,u32 pnum,u32 lnum,u32 vol_id,u32 vol_type,u32 used)226*6f4e7d3cSThomas Gleixner static int assign_aeb_to_av(struct ubi_scan_info *ubi, u32 pnum, u32 lnum,
227*6f4e7d3cSThomas Gleixner u32 vol_id, u32 vol_type, u32 used)
228*6f4e7d3cSThomas Gleixner {
229*6f4e7d3cSThomas Gleixner struct ubi_vid_hdr *vh;
230*6f4e7d3cSThomas Gleixner
231*6f4e7d3cSThomas Gleixner if (ubi_io_is_bad(ubi, pnum))
232*6f4e7d3cSThomas Gleixner return -EINVAL;
233*6f4e7d3cSThomas Gleixner
234*6f4e7d3cSThomas Gleixner ubi->fastmap_pebs++;
235*6f4e7d3cSThomas Gleixner
236*6f4e7d3cSThomas Gleixner if (vol_id >= UBI_SPL_VOL_IDS || vol_type != UBI_STATIC_VOLUME)
237*6f4e7d3cSThomas Gleixner return 0;
238*6f4e7d3cSThomas Gleixner
239*6f4e7d3cSThomas Gleixner /* We are only interested in the volumes to load */
240*6f4e7d3cSThomas Gleixner if (!test_bit(vol_id, ubi->toload))
241*6f4e7d3cSThomas Gleixner return 0;
242*6f4e7d3cSThomas Gleixner
243*6f4e7d3cSThomas Gleixner vh = ubi->blockinfo + pnum;
244*6f4e7d3cSThomas Gleixner
245*6f4e7d3cSThomas Gleixner return ubi_scan_vid_hdr(ubi, vh, pnum);
246*6f4e7d3cSThomas Gleixner }
247*6f4e7d3cSThomas Gleixner
scan_pool(struct ubi_scan_info * ubi,__be32 * pebs,int pool_size)248*6f4e7d3cSThomas Gleixner static int scan_pool(struct ubi_scan_info *ubi, __be32 *pebs, int pool_size)
249*6f4e7d3cSThomas Gleixner {
250*6f4e7d3cSThomas Gleixner struct ubi_vid_hdr *vh;
251*6f4e7d3cSThomas Gleixner u32 pnum;
252*6f4e7d3cSThomas Gleixner int i;
253*6f4e7d3cSThomas Gleixner
254*6f4e7d3cSThomas Gleixner ubi_dbg("Scanning pool size: %d", pool_size);
255*6f4e7d3cSThomas Gleixner
256*6f4e7d3cSThomas Gleixner for (i = 0; i < pool_size; i++) {
257*6f4e7d3cSThomas Gleixner pnum = be32_to_cpu(pebs[i]);
258*6f4e7d3cSThomas Gleixner
259*6f4e7d3cSThomas Gleixner if (ubi_io_is_bad(ubi, pnum)) {
260*6f4e7d3cSThomas Gleixner ubi_err("FM: Bad PEB in fastmap pool! %u", pnum);
261*6f4e7d3cSThomas Gleixner return UBI_BAD_FASTMAP;
262*6f4e7d3cSThomas Gleixner }
263*6f4e7d3cSThomas Gleixner
264*6f4e7d3cSThomas Gleixner vh = ubi->blockinfo + pnum;
265*6f4e7d3cSThomas Gleixner /*
266*6f4e7d3cSThomas Gleixner * We allow the scan to fail here. The loader will notice
267*6f4e7d3cSThomas Gleixner * and look for a replacement.
268*6f4e7d3cSThomas Gleixner */
269*6f4e7d3cSThomas Gleixner ubi_scan_vid_hdr(ubi, vh, pnum);
270*6f4e7d3cSThomas Gleixner }
271*6f4e7d3cSThomas Gleixner return 0;
272*6f4e7d3cSThomas Gleixner }
273*6f4e7d3cSThomas Gleixner
274*6f4e7d3cSThomas Gleixner /*
275*6f4e7d3cSThomas Gleixner * Fastmap code is stolen from Linux kernel and this stub structure is used
276*6f4e7d3cSThomas Gleixner * to make it happy.
277*6f4e7d3cSThomas Gleixner */
278*6f4e7d3cSThomas Gleixner struct ubi_attach_info {
279*6f4e7d3cSThomas Gleixner int i;
280*6f4e7d3cSThomas Gleixner };
281*6f4e7d3cSThomas Gleixner
ubi_attach_fastmap(struct ubi_scan_info * ubi,struct ubi_attach_info * ai,struct ubi_fastmap_layout * fm)282*6f4e7d3cSThomas Gleixner static int ubi_attach_fastmap(struct ubi_scan_info *ubi,
283*6f4e7d3cSThomas Gleixner struct ubi_attach_info *ai,
284*6f4e7d3cSThomas Gleixner struct ubi_fastmap_layout *fm)
285*6f4e7d3cSThomas Gleixner {
286*6f4e7d3cSThomas Gleixner struct ubi_fm_hdr *fmhdr;
287*6f4e7d3cSThomas Gleixner struct ubi_fm_scan_pool *fmpl1, *fmpl2;
288*6f4e7d3cSThomas Gleixner struct ubi_fm_ec *fmec;
289*6f4e7d3cSThomas Gleixner struct ubi_fm_volhdr *fmvhdr;
290*6f4e7d3cSThomas Gleixner struct ubi_fm_eba *fm_eba;
291*6f4e7d3cSThomas Gleixner int ret, i, j, pool_size, wl_pool_size;
292*6f4e7d3cSThomas Gleixner size_t fm_pos = 0, fm_size = ubi->fm_size;
293*6f4e7d3cSThomas Gleixner void *fm_raw = ubi->fm_buf;
294*6f4e7d3cSThomas Gleixner
295*6f4e7d3cSThomas Gleixner memset(ubi->fm_used, 0, sizeof(ubi->fm_used));
296*6f4e7d3cSThomas Gleixner
297*6f4e7d3cSThomas Gleixner fm_pos += sizeof(struct ubi_fm_sb);
298*6f4e7d3cSThomas Gleixner if (fm_pos >= fm_size)
299*6f4e7d3cSThomas Gleixner goto fail_bad;
300*6f4e7d3cSThomas Gleixner
301*6f4e7d3cSThomas Gleixner fmhdr = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
302*6f4e7d3cSThomas Gleixner fm_pos += sizeof(*fmhdr);
303*6f4e7d3cSThomas Gleixner if (fm_pos >= fm_size)
304*6f4e7d3cSThomas Gleixner goto fail_bad;
305*6f4e7d3cSThomas Gleixner
306*6f4e7d3cSThomas Gleixner if (be32_to_cpu(fmhdr->magic) != UBI_FM_HDR_MAGIC) {
307*6f4e7d3cSThomas Gleixner ubi_err("bad fastmap header magic: 0x%x, expected: 0x%x",
308*6f4e7d3cSThomas Gleixner be32_to_cpu(fmhdr->magic), UBI_FM_HDR_MAGIC);
309*6f4e7d3cSThomas Gleixner goto fail_bad;
310*6f4e7d3cSThomas Gleixner }
311*6f4e7d3cSThomas Gleixner
312*6f4e7d3cSThomas Gleixner fmpl1 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
313*6f4e7d3cSThomas Gleixner fm_pos += sizeof(*fmpl1);
314*6f4e7d3cSThomas Gleixner if (fm_pos >= fm_size)
315*6f4e7d3cSThomas Gleixner goto fail_bad;
316*6f4e7d3cSThomas Gleixner if (be32_to_cpu(fmpl1->magic) != UBI_FM_POOL_MAGIC) {
317*6f4e7d3cSThomas Gleixner ubi_err("bad fastmap pool magic: 0x%x, expected: 0x%x",
318*6f4e7d3cSThomas Gleixner be32_to_cpu(fmpl1->magic), UBI_FM_POOL_MAGIC);
319*6f4e7d3cSThomas Gleixner goto fail_bad;
320*6f4e7d3cSThomas Gleixner }
321*6f4e7d3cSThomas Gleixner
322*6f4e7d3cSThomas Gleixner fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
323*6f4e7d3cSThomas Gleixner fm_pos += sizeof(*fmpl2);
324*6f4e7d3cSThomas Gleixner if (fm_pos >= fm_size)
325*6f4e7d3cSThomas Gleixner goto fail_bad;
326*6f4e7d3cSThomas Gleixner if (be32_to_cpu(fmpl2->magic) != UBI_FM_POOL_MAGIC) {
327*6f4e7d3cSThomas Gleixner ubi_err("bad fastmap pool magic: 0x%x, expected: 0x%x",
328*6f4e7d3cSThomas Gleixner be32_to_cpu(fmpl2->magic), UBI_FM_POOL_MAGIC);
329*6f4e7d3cSThomas Gleixner goto fail_bad;
330*6f4e7d3cSThomas Gleixner }
331*6f4e7d3cSThomas Gleixner
332*6f4e7d3cSThomas Gleixner pool_size = be16_to_cpu(fmpl1->size);
333*6f4e7d3cSThomas Gleixner wl_pool_size = be16_to_cpu(fmpl2->size);
334*6f4e7d3cSThomas Gleixner fm->max_pool_size = be16_to_cpu(fmpl1->max_size);
335*6f4e7d3cSThomas Gleixner fm->max_wl_pool_size = be16_to_cpu(fmpl2->max_size);
336*6f4e7d3cSThomas Gleixner
337*6f4e7d3cSThomas Gleixner if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) {
338*6f4e7d3cSThomas Gleixner ubi_err("bad pool size: %i", pool_size);
339*6f4e7d3cSThomas Gleixner goto fail_bad;
340*6f4e7d3cSThomas Gleixner }
341*6f4e7d3cSThomas Gleixner
342*6f4e7d3cSThomas Gleixner if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) {
343*6f4e7d3cSThomas Gleixner ubi_err("bad WL pool size: %i", wl_pool_size);
344*6f4e7d3cSThomas Gleixner goto fail_bad;
345*6f4e7d3cSThomas Gleixner }
346*6f4e7d3cSThomas Gleixner
347*6f4e7d3cSThomas Gleixner if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE ||
348*6f4e7d3cSThomas Gleixner fm->max_pool_size < 0) {
349*6f4e7d3cSThomas Gleixner ubi_err("bad maximal pool size: %i", fm->max_pool_size);
350*6f4e7d3cSThomas Gleixner goto fail_bad;
351*6f4e7d3cSThomas Gleixner }
352*6f4e7d3cSThomas Gleixner
353*6f4e7d3cSThomas Gleixner if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE ||
354*6f4e7d3cSThomas Gleixner fm->max_wl_pool_size < 0) {
355*6f4e7d3cSThomas Gleixner ubi_err("bad maximal WL pool size: %i", fm->max_wl_pool_size);
356*6f4e7d3cSThomas Gleixner goto fail_bad;
357*6f4e7d3cSThomas Gleixner }
358*6f4e7d3cSThomas Gleixner
359*6f4e7d3cSThomas Gleixner /* read EC values from free list */
360*6f4e7d3cSThomas Gleixner for (i = 0; i < be32_to_cpu(fmhdr->free_peb_count); i++) {
361*6f4e7d3cSThomas Gleixner fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
362*6f4e7d3cSThomas Gleixner fm_pos += sizeof(*fmec);
363*6f4e7d3cSThomas Gleixner if (fm_pos >= fm_size)
364*6f4e7d3cSThomas Gleixner goto fail_bad;
365*6f4e7d3cSThomas Gleixner }
366*6f4e7d3cSThomas Gleixner
367*6f4e7d3cSThomas Gleixner /* read EC values from used list */
368*6f4e7d3cSThomas Gleixner for (i = 0; i < be32_to_cpu(fmhdr->used_peb_count); i++) {
369*6f4e7d3cSThomas Gleixner fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
370*6f4e7d3cSThomas Gleixner fm_pos += sizeof(*fmec);
371*6f4e7d3cSThomas Gleixner if (fm_pos >= fm_size)
372*6f4e7d3cSThomas Gleixner goto fail_bad;
373*6f4e7d3cSThomas Gleixner
374*6f4e7d3cSThomas Gleixner generic_set_bit(be32_to_cpu(fmec->pnum), ubi->fm_used);
375*6f4e7d3cSThomas Gleixner }
376*6f4e7d3cSThomas Gleixner
377*6f4e7d3cSThomas Gleixner /* read EC values from scrub list */
378*6f4e7d3cSThomas Gleixner for (i = 0; i < be32_to_cpu(fmhdr->scrub_peb_count); i++) {
379*6f4e7d3cSThomas Gleixner fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
380*6f4e7d3cSThomas Gleixner fm_pos += sizeof(*fmec);
381*6f4e7d3cSThomas Gleixner if (fm_pos >= fm_size)
382*6f4e7d3cSThomas Gleixner goto fail_bad;
383*6f4e7d3cSThomas Gleixner }
384*6f4e7d3cSThomas Gleixner
385*6f4e7d3cSThomas Gleixner /* read EC values from erase list */
386*6f4e7d3cSThomas Gleixner for (i = 0; i < be32_to_cpu(fmhdr->erase_peb_count); i++) {
387*6f4e7d3cSThomas Gleixner fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
388*6f4e7d3cSThomas Gleixner fm_pos += sizeof(*fmec);
389*6f4e7d3cSThomas Gleixner if (fm_pos >= fm_size)
390*6f4e7d3cSThomas Gleixner goto fail_bad;
391*6f4e7d3cSThomas Gleixner }
392*6f4e7d3cSThomas Gleixner
393*6f4e7d3cSThomas Gleixner /* Iterate over all volumes and read their EBA table */
394*6f4e7d3cSThomas Gleixner for (i = 0; i < be32_to_cpu(fmhdr->vol_count); i++) {
395*6f4e7d3cSThomas Gleixner u32 vol_id, vol_type, used, reserved;
396*6f4e7d3cSThomas Gleixner
397*6f4e7d3cSThomas Gleixner fmvhdr = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
398*6f4e7d3cSThomas Gleixner fm_pos += sizeof(*fmvhdr);
399*6f4e7d3cSThomas Gleixner if (fm_pos >= fm_size)
400*6f4e7d3cSThomas Gleixner goto fail_bad;
401*6f4e7d3cSThomas Gleixner
402*6f4e7d3cSThomas Gleixner if (be32_to_cpu(fmvhdr->magic) != UBI_FM_VHDR_MAGIC) {
403*6f4e7d3cSThomas Gleixner ubi_err("bad fastmap vol header magic: 0x%x, " \
404*6f4e7d3cSThomas Gleixner "expected: 0x%x",
405*6f4e7d3cSThomas Gleixner be32_to_cpu(fmvhdr->magic), UBI_FM_VHDR_MAGIC);
406*6f4e7d3cSThomas Gleixner goto fail_bad;
407*6f4e7d3cSThomas Gleixner }
408*6f4e7d3cSThomas Gleixner
409*6f4e7d3cSThomas Gleixner vol_id = be32_to_cpu(fmvhdr->vol_id);
410*6f4e7d3cSThomas Gleixner vol_type = fmvhdr->vol_type;
411*6f4e7d3cSThomas Gleixner used = be32_to_cpu(fmvhdr->used_ebs);
412*6f4e7d3cSThomas Gleixner
413*6f4e7d3cSThomas Gleixner fm_eba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
414*6f4e7d3cSThomas Gleixner fm_pos += sizeof(*fm_eba);
415*6f4e7d3cSThomas Gleixner fm_pos += (sizeof(__be32) * be32_to_cpu(fm_eba->reserved_pebs));
416*6f4e7d3cSThomas Gleixner if (fm_pos >= fm_size)
417*6f4e7d3cSThomas Gleixner goto fail_bad;
418*6f4e7d3cSThomas Gleixner
419*6f4e7d3cSThomas Gleixner if (be32_to_cpu(fm_eba->magic) != UBI_FM_EBA_MAGIC) {
420*6f4e7d3cSThomas Gleixner ubi_err("bad fastmap EBA header magic: 0x%x, " \
421*6f4e7d3cSThomas Gleixner "expected: 0x%x",
422*6f4e7d3cSThomas Gleixner be32_to_cpu(fm_eba->magic), UBI_FM_EBA_MAGIC);
423*6f4e7d3cSThomas Gleixner goto fail_bad;
424*6f4e7d3cSThomas Gleixner }
425*6f4e7d3cSThomas Gleixner
426*6f4e7d3cSThomas Gleixner reserved = be32_to_cpu(fm_eba->reserved_pebs);
427*6f4e7d3cSThomas Gleixner ubi_dbg("FA: vol %u used %u res: %u", vol_id, used, reserved);
428*6f4e7d3cSThomas Gleixner for (j = 0; j < reserved; j++) {
429*6f4e7d3cSThomas Gleixner int pnum = be32_to_cpu(fm_eba->pnum[j]);
430*6f4e7d3cSThomas Gleixner
431*6f4e7d3cSThomas Gleixner if ((int)be32_to_cpu(fm_eba->pnum[j]) < 0)
432*6f4e7d3cSThomas Gleixner continue;
433*6f4e7d3cSThomas Gleixner
434*6f4e7d3cSThomas Gleixner if (!__test_and_clear_bit(pnum, ubi->fm_used))
435*6f4e7d3cSThomas Gleixner continue;
436*6f4e7d3cSThomas Gleixner
437*6f4e7d3cSThomas Gleixner /*
438*6f4e7d3cSThomas Gleixner * We only handle static volumes so used_ebs
439*6f4e7d3cSThomas Gleixner * needs to be handed in. And we do not assign
440*6f4e7d3cSThomas Gleixner * the reserved blocks
441*6f4e7d3cSThomas Gleixner */
442*6f4e7d3cSThomas Gleixner if (j >= used)
443*6f4e7d3cSThomas Gleixner continue;
444*6f4e7d3cSThomas Gleixner
445*6f4e7d3cSThomas Gleixner ret = assign_aeb_to_av(ubi, pnum, j, vol_id,
446*6f4e7d3cSThomas Gleixner vol_type, used);
447*6f4e7d3cSThomas Gleixner if (!ret)
448*6f4e7d3cSThomas Gleixner continue;
449*6f4e7d3cSThomas Gleixner
450*6f4e7d3cSThomas Gleixner /*
451*6f4e7d3cSThomas Gleixner * Nasty: The fastmap claims that the volume
452*6f4e7d3cSThomas Gleixner * has one block more than it, but that block
453*6f4e7d3cSThomas Gleixner * is always empty and the other blocks have
454*6f4e7d3cSThomas Gleixner * the correct number of total LEBs in the
455*6f4e7d3cSThomas Gleixner * headers. Deal with it.
456*6f4e7d3cSThomas Gleixner */
457*6f4e7d3cSThomas Gleixner if (ret != UBI_IO_FF && j != used - 1)
458*6f4e7d3cSThomas Gleixner goto fail_bad;
459*6f4e7d3cSThomas Gleixner ubi_dbg("FA: Vol: %u Ignoring empty LEB %d of %d",
460*6f4e7d3cSThomas Gleixner vol_id, j, used);
461*6f4e7d3cSThomas Gleixner }
462*6f4e7d3cSThomas Gleixner }
463*6f4e7d3cSThomas Gleixner
464*6f4e7d3cSThomas Gleixner ret = scan_pool(ubi, fmpl1->pebs, pool_size);
465*6f4e7d3cSThomas Gleixner if (ret)
466*6f4e7d3cSThomas Gleixner goto fail;
467*6f4e7d3cSThomas Gleixner
468*6f4e7d3cSThomas Gleixner ret = scan_pool(ubi, fmpl2->pebs, wl_pool_size);
469*6f4e7d3cSThomas Gleixner if (ret)
470*6f4e7d3cSThomas Gleixner goto fail;
471*6f4e7d3cSThomas Gleixner
472*6f4e7d3cSThomas Gleixner #ifdef CHECKME
473*6f4e7d3cSThomas Gleixner /*
474*6f4e7d3cSThomas Gleixner * If fastmap is leaking PEBs (must not happen), raise a
475*6f4e7d3cSThomas Gleixner * fat warning and fall back to scanning mode.
476*6f4e7d3cSThomas Gleixner * We do this here because in ubi_wl_init() it's too late
477*6f4e7d3cSThomas Gleixner * and we cannot fall back to scanning.
478*6f4e7d3cSThomas Gleixner */
479*6f4e7d3cSThomas Gleixner if (WARN_ON(count_fastmap_pebs(ai) != ubi->peb_count -
480*6f4e7d3cSThomas Gleixner ai->bad_peb_count - fm->used_blocks))
481*6f4e7d3cSThomas Gleixner goto fail_bad;
482*6f4e7d3cSThomas Gleixner #endif
483*6f4e7d3cSThomas Gleixner
484*6f4e7d3cSThomas Gleixner return 0;
485*6f4e7d3cSThomas Gleixner
486*6f4e7d3cSThomas Gleixner fail_bad:
487*6f4e7d3cSThomas Gleixner ret = UBI_BAD_FASTMAP;
488*6f4e7d3cSThomas Gleixner fail:
489*6f4e7d3cSThomas Gleixner return ret;
490*6f4e7d3cSThomas Gleixner }
491*6f4e7d3cSThomas Gleixner
ubi_scan_fastmap(struct ubi_scan_info * ubi,struct ubi_attach_info * ai,int fm_anchor)492*6f4e7d3cSThomas Gleixner static int ubi_scan_fastmap(struct ubi_scan_info *ubi,
493*6f4e7d3cSThomas Gleixner struct ubi_attach_info *ai,
494*6f4e7d3cSThomas Gleixner int fm_anchor)
495*6f4e7d3cSThomas Gleixner {
496*6f4e7d3cSThomas Gleixner struct ubi_fm_sb *fmsb, *fmsb2;
497*6f4e7d3cSThomas Gleixner struct ubi_vid_hdr *vh;
498*6f4e7d3cSThomas Gleixner struct ubi_fastmap_layout *fm;
499*6f4e7d3cSThomas Gleixner int i, used_blocks, pnum, ret = 0;
500*6f4e7d3cSThomas Gleixner size_t fm_size;
501*6f4e7d3cSThomas Gleixner __be32 crc, tmp_crc;
502*6f4e7d3cSThomas Gleixner unsigned long long sqnum = 0;
503*6f4e7d3cSThomas Gleixner
504*6f4e7d3cSThomas Gleixner fmsb = &ubi->fm_sb;
505*6f4e7d3cSThomas Gleixner fm = &ubi->fm_layout;
506*6f4e7d3cSThomas Gleixner
507*6f4e7d3cSThomas Gleixner ret = ubi_io_read(ubi, fmsb, fm_anchor, ubi->leb_start, sizeof(*fmsb));
508*6f4e7d3cSThomas Gleixner if (ret && ret != UBI_IO_BITFLIPS)
509*6f4e7d3cSThomas Gleixner goto free_fm_sb;
510*6f4e7d3cSThomas Gleixner else if (ret == UBI_IO_BITFLIPS)
511*6f4e7d3cSThomas Gleixner fm->to_be_tortured[0] = 1;
512*6f4e7d3cSThomas Gleixner
513*6f4e7d3cSThomas Gleixner if (be32_to_cpu(fmsb->magic) != UBI_FM_SB_MAGIC) {
514*6f4e7d3cSThomas Gleixner ubi_err("bad super block magic: 0x%x, expected: 0x%x",
515*6f4e7d3cSThomas Gleixner be32_to_cpu(fmsb->magic), UBI_FM_SB_MAGIC);
516*6f4e7d3cSThomas Gleixner ret = UBI_BAD_FASTMAP;
517*6f4e7d3cSThomas Gleixner goto free_fm_sb;
518*6f4e7d3cSThomas Gleixner }
519*6f4e7d3cSThomas Gleixner
520*6f4e7d3cSThomas Gleixner if (fmsb->version != UBI_FM_FMT_VERSION) {
521*6f4e7d3cSThomas Gleixner ubi_err("bad fastmap version: %i, expected: %i",
522*6f4e7d3cSThomas Gleixner fmsb->version, UBI_FM_FMT_VERSION);
523*6f4e7d3cSThomas Gleixner ret = UBI_BAD_FASTMAP;
524*6f4e7d3cSThomas Gleixner goto free_fm_sb;
525*6f4e7d3cSThomas Gleixner }
526*6f4e7d3cSThomas Gleixner
527*6f4e7d3cSThomas Gleixner used_blocks = be32_to_cpu(fmsb->used_blocks);
528*6f4e7d3cSThomas Gleixner if (used_blocks > UBI_FM_MAX_BLOCKS || used_blocks < 1) {
529*6f4e7d3cSThomas Gleixner ubi_err("number of fastmap blocks is invalid: %i", used_blocks);
530*6f4e7d3cSThomas Gleixner ret = UBI_BAD_FASTMAP;
531*6f4e7d3cSThomas Gleixner goto free_fm_sb;
532*6f4e7d3cSThomas Gleixner }
533*6f4e7d3cSThomas Gleixner
534*6f4e7d3cSThomas Gleixner fm_size = ubi->leb_size * used_blocks;
535*6f4e7d3cSThomas Gleixner if (fm_size != ubi->fm_size) {
536*6f4e7d3cSThomas Gleixner ubi_err("bad fastmap size: %zi, expected: %zi", fm_size,
537*6f4e7d3cSThomas Gleixner ubi->fm_size);
538*6f4e7d3cSThomas Gleixner ret = UBI_BAD_FASTMAP;
539*6f4e7d3cSThomas Gleixner goto free_fm_sb;
540*6f4e7d3cSThomas Gleixner }
541*6f4e7d3cSThomas Gleixner
542*6f4e7d3cSThomas Gleixner vh = &ubi->fm_vh;
543*6f4e7d3cSThomas Gleixner
544*6f4e7d3cSThomas Gleixner for (i = 0; i < used_blocks; i++) {
545*6f4e7d3cSThomas Gleixner pnum = be32_to_cpu(fmsb->block_loc[i]);
546*6f4e7d3cSThomas Gleixner
547*6f4e7d3cSThomas Gleixner if (ubi_io_is_bad(ubi, pnum)) {
548*6f4e7d3cSThomas Gleixner ret = UBI_BAD_FASTMAP;
549*6f4e7d3cSThomas Gleixner goto free_hdr;
550*6f4e7d3cSThomas Gleixner }
551*6f4e7d3cSThomas Gleixner
552*6f4e7d3cSThomas Gleixner #ifdef LATER
553*6f4e7d3cSThomas Gleixner int image_seq;
554*6f4e7d3cSThomas Gleixner ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
555*6f4e7d3cSThomas Gleixner if (ret && ret != UBI_IO_BITFLIPS) {
556*6f4e7d3cSThomas Gleixner ubi_err("unable to read fastmap block# %i EC (PEB: %i)",
557*6f4e7d3cSThomas Gleixner i, pnum);
558*6f4e7d3cSThomas Gleixner if (ret > 0)
559*6f4e7d3cSThomas Gleixner ret = UBI_BAD_FASTMAP;
560*6f4e7d3cSThomas Gleixner goto free_hdr;
561*6f4e7d3cSThomas Gleixner } else if (ret == UBI_IO_BITFLIPS)
562*6f4e7d3cSThomas Gleixner fm->to_be_tortured[i] = 1;
563*6f4e7d3cSThomas Gleixner
564*6f4e7d3cSThomas Gleixner image_seq = be32_to_cpu(ech->image_seq);
565*6f4e7d3cSThomas Gleixner if (!ubi->image_seq)
566*6f4e7d3cSThomas Gleixner ubi->image_seq = image_seq;
567*6f4e7d3cSThomas Gleixner /*
568*6f4e7d3cSThomas Gleixner * Older UBI implementations have image_seq set to zero, so
569*6f4e7d3cSThomas Gleixner * we shouldn't fail if image_seq == 0.
570*6f4e7d3cSThomas Gleixner */
571*6f4e7d3cSThomas Gleixner if (image_seq && (image_seq != ubi->image_seq)) {
572*6f4e7d3cSThomas Gleixner ubi_err("wrong image seq:%d instead of %d",
573*6f4e7d3cSThomas Gleixner be32_to_cpu(ech->image_seq), ubi->image_seq);
574*6f4e7d3cSThomas Gleixner ret = UBI_BAD_FASTMAP;
575*6f4e7d3cSThomas Gleixner goto free_hdr;
576*6f4e7d3cSThomas Gleixner }
577*6f4e7d3cSThomas Gleixner #endif
578*6f4e7d3cSThomas Gleixner ret = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
579*6f4e7d3cSThomas Gleixner if (ret && ret != UBI_IO_BITFLIPS) {
580*6f4e7d3cSThomas Gleixner ubi_err("unable to read fastmap block# %i (PEB: %i)",
581*6f4e7d3cSThomas Gleixner i, pnum);
582*6f4e7d3cSThomas Gleixner goto free_hdr;
583*6f4e7d3cSThomas Gleixner }
584*6f4e7d3cSThomas Gleixner
585*6f4e7d3cSThomas Gleixner /*
586*6f4e7d3cSThomas Gleixner * Mainline code rescans the anchor header. We've done
587*6f4e7d3cSThomas Gleixner * that already so we merily copy it over.
588*6f4e7d3cSThomas Gleixner */
589*6f4e7d3cSThomas Gleixner if (pnum == fm_anchor)
590*6f4e7d3cSThomas Gleixner memcpy(vh, ubi->blockinfo + pnum, sizeof(*fm));
591*6f4e7d3cSThomas Gleixner
592*6f4e7d3cSThomas Gleixner if (i == 0) {
593*6f4e7d3cSThomas Gleixner if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) {
594*6f4e7d3cSThomas Gleixner ubi_err("bad fastmap anchor vol_id: 0x%x," \
595*6f4e7d3cSThomas Gleixner " expected: 0x%x",
596*6f4e7d3cSThomas Gleixner be32_to_cpu(vh->vol_id),
597*6f4e7d3cSThomas Gleixner UBI_FM_SB_VOLUME_ID);
598*6f4e7d3cSThomas Gleixner ret = UBI_BAD_FASTMAP;
599*6f4e7d3cSThomas Gleixner goto free_hdr;
600*6f4e7d3cSThomas Gleixner }
601*6f4e7d3cSThomas Gleixner } else {
602*6f4e7d3cSThomas Gleixner if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) {
603*6f4e7d3cSThomas Gleixner ubi_err("bad fastmap data vol_id: 0x%x," \
604*6f4e7d3cSThomas Gleixner " expected: 0x%x",
605*6f4e7d3cSThomas Gleixner be32_to_cpu(vh->vol_id),
606*6f4e7d3cSThomas Gleixner UBI_FM_DATA_VOLUME_ID);
607*6f4e7d3cSThomas Gleixner ret = UBI_BAD_FASTMAP;
608*6f4e7d3cSThomas Gleixner goto free_hdr;
609*6f4e7d3cSThomas Gleixner }
610*6f4e7d3cSThomas Gleixner }
611*6f4e7d3cSThomas Gleixner
612*6f4e7d3cSThomas Gleixner if (sqnum < be64_to_cpu(vh->sqnum))
613*6f4e7d3cSThomas Gleixner sqnum = be64_to_cpu(vh->sqnum);
614*6f4e7d3cSThomas Gleixner
615*6f4e7d3cSThomas Gleixner ret = ubi_io_read(ubi, ubi->fm_buf + (ubi->leb_size * i), pnum,
616*6f4e7d3cSThomas Gleixner ubi->leb_start, ubi->leb_size);
617*6f4e7d3cSThomas Gleixner if (ret && ret != UBI_IO_BITFLIPS) {
618*6f4e7d3cSThomas Gleixner ubi_err("unable to read fastmap block# %i (PEB: %i, " \
619*6f4e7d3cSThomas Gleixner "err: %i)", i, pnum, ret);
620*6f4e7d3cSThomas Gleixner goto free_hdr;
621*6f4e7d3cSThomas Gleixner }
622*6f4e7d3cSThomas Gleixner }
623*6f4e7d3cSThomas Gleixner
624*6f4e7d3cSThomas Gleixner fmsb2 = (struct ubi_fm_sb *)(ubi->fm_buf);
625*6f4e7d3cSThomas Gleixner tmp_crc = be32_to_cpu(fmsb2->data_crc);
626*6f4e7d3cSThomas Gleixner fmsb2->data_crc = 0;
627*6f4e7d3cSThomas Gleixner crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size);
628*6f4e7d3cSThomas Gleixner if (crc != tmp_crc) {
629*6f4e7d3cSThomas Gleixner ubi_err("fastmap data CRC is invalid");
630*6f4e7d3cSThomas Gleixner ubi_err("CRC should be: 0x%x, calc: 0x%x", tmp_crc, crc);
631*6f4e7d3cSThomas Gleixner ret = UBI_BAD_FASTMAP;
632*6f4e7d3cSThomas Gleixner goto free_hdr;
633*6f4e7d3cSThomas Gleixner }
634*6f4e7d3cSThomas Gleixner
635*6f4e7d3cSThomas Gleixner fmsb2->sqnum = sqnum;
636*6f4e7d3cSThomas Gleixner
637*6f4e7d3cSThomas Gleixner fm->used_blocks = used_blocks;
638*6f4e7d3cSThomas Gleixner
639*6f4e7d3cSThomas Gleixner ret = ubi_attach_fastmap(ubi, ai, fm);
640*6f4e7d3cSThomas Gleixner if (ret) {
641*6f4e7d3cSThomas Gleixner if (ret > 0)
642*6f4e7d3cSThomas Gleixner ret = UBI_BAD_FASTMAP;
643*6f4e7d3cSThomas Gleixner goto free_hdr;
644*6f4e7d3cSThomas Gleixner }
645*6f4e7d3cSThomas Gleixner
646*6f4e7d3cSThomas Gleixner ubi->fm = fm;
647*6f4e7d3cSThomas Gleixner ubi->fm_pool.max_size = ubi->fm->max_pool_size;
648*6f4e7d3cSThomas Gleixner ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size;
649*6f4e7d3cSThomas Gleixner ubi_msg("attached by fastmap %uMB %u blocks",
650*6f4e7d3cSThomas Gleixner ubi->fsize_mb, ubi->peb_count);
651*6f4e7d3cSThomas Gleixner ubi_dbg("fastmap pool size: %d", ubi->fm_pool.max_size);
652*6f4e7d3cSThomas Gleixner ubi_dbg("fastmap WL pool size: %d", ubi->fm_wl_pool.max_size);
653*6f4e7d3cSThomas Gleixner
654*6f4e7d3cSThomas Gleixner out:
655*6f4e7d3cSThomas Gleixner if (ret)
656*6f4e7d3cSThomas Gleixner ubi_err("Attach by fastmap failed, doing a full scan!");
657*6f4e7d3cSThomas Gleixner return ret;
658*6f4e7d3cSThomas Gleixner
659*6f4e7d3cSThomas Gleixner free_hdr:
660*6f4e7d3cSThomas Gleixner free_fm_sb:
661*6f4e7d3cSThomas Gleixner goto out;
662*6f4e7d3cSThomas Gleixner }
663*6f4e7d3cSThomas Gleixner
664*6f4e7d3cSThomas Gleixner /*
665*6f4e7d3cSThomas Gleixner * Scan the flash and attempt to attach via fastmap
666*6f4e7d3cSThomas Gleixner */
ipl_scan(struct ubi_scan_info * ubi)667*6f4e7d3cSThomas Gleixner static void ipl_scan(struct ubi_scan_info *ubi)
668*6f4e7d3cSThomas Gleixner {
669*6f4e7d3cSThomas Gleixner unsigned int pnum;
670*6f4e7d3cSThomas Gleixner int res;
671*6f4e7d3cSThomas Gleixner
672*6f4e7d3cSThomas Gleixner /*
673*6f4e7d3cSThomas Gleixner * Scan first for the fastmap super block
674*6f4e7d3cSThomas Gleixner */
675*6f4e7d3cSThomas Gleixner for (pnum = 0; pnum < UBI_FM_MAX_START; pnum++) {
676*6f4e7d3cSThomas Gleixner res = ubi_scan_vid_hdr(ubi, ubi->blockinfo + pnum, pnum);
677*6f4e7d3cSThomas Gleixner /*
678*6f4e7d3cSThomas Gleixner * We ignore errors here as we are meriliy scanning
679*6f4e7d3cSThomas Gleixner * the headers.
680*6f4e7d3cSThomas Gleixner */
681*6f4e7d3cSThomas Gleixner if (res != UBI_FASTMAP_ANCHOR)
682*6f4e7d3cSThomas Gleixner continue;
683*6f4e7d3cSThomas Gleixner
684*6f4e7d3cSThomas Gleixner /*
685*6f4e7d3cSThomas Gleixner * If fastmap is disabled, continue scanning. This
686*6f4e7d3cSThomas Gleixner * might happen because the previous attempt failed or
687*6f4e7d3cSThomas Gleixner * the caller disabled it right away.
688*6f4e7d3cSThomas Gleixner */
689*6f4e7d3cSThomas Gleixner if (!ubi->fm_enabled)
690*6f4e7d3cSThomas Gleixner continue;
691*6f4e7d3cSThomas Gleixner
692*6f4e7d3cSThomas Gleixner /*
693*6f4e7d3cSThomas Gleixner * Try to attach the fastmap, if that fails continue
694*6f4e7d3cSThomas Gleixner * scanning.
695*6f4e7d3cSThomas Gleixner */
696*6f4e7d3cSThomas Gleixner if (!ubi_scan_fastmap(ubi, NULL, pnum))
697*6f4e7d3cSThomas Gleixner return;
698*6f4e7d3cSThomas Gleixner /*
699*6f4e7d3cSThomas Gleixner * Fastmap failed. Clear everything we have and start
700*6f4e7d3cSThomas Gleixner * over. We are paranoid and do not trust anything.
701*6f4e7d3cSThomas Gleixner */
702*6f4e7d3cSThomas Gleixner memset(ubi->volinfo, 0, sizeof(ubi->volinfo));
703*6f4e7d3cSThomas Gleixner pnum = 0;
704*6f4e7d3cSThomas Gleixner break;
705*6f4e7d3cSThomas Gleixner }
706*6f4e7d3cSThomas Gleixner
707*6f4e7d3cSThomas Gleixner /*
708*6f4e7d3cSThomas Gleixner * Continue scanning, ignore errors, we might find what we are
709*6f4e7d3cSThomas Gleixner * looking for,
710*6f4e7d3cSThomas Gleixner */
711*6f4e7d3cSThomas Gleixner for (; pnum < ubi->peb_count; pnum++)
712*6f4e7d3cSThomas Gleixner ubi_scan_vid_hdr(ubi, ubi->blockinfo + pnum, pnum);
713*6f4e7d3cSThomas Gleixner }
714*6f4e7d3cSThomas Gleixner
715*6f4e7d3cSThomas Gleixner /*
716*6f4e7d3cSThomas Gleixner * Load a logical block of a volume into memory
717*6f4e7d3cSThomas Gleixner */
ubi_load_block(struct ubi_scan_info * ubi,uint8_t * laddr,struct ubi_vol_info * vi,u32 vol_id,u32 lnum,u32 last)718*6f4e7d3cSThomas Gleixner static int ubi_load_block(struct ubi_scan_info *ubi, uint8_t *laddr,
719*6f4e7d3cSThomas Gleixner struct ubi_vol_info *vi, u32 vol_id, u32 lnum,
720*6f4e7d3cSThomas Gleixner u32 last)
721*6f4e7d3cSThomas Gleixner {
722*6f4e7d3cSThomas Gleixner struct ubi_vid_hdr *vh, *vrepl;
723*6f4e7d3cSThomas Gleixner u32 pnum, crc, dlen;
724*6f4e7d3cSThomas Gleixner
725*6f4e7d3cSThomas Gleixner retry:
726*6f4e7d3cSThomas Gleixner /*
727*6f4e7d3cSThomas Gleixner * If this is a fastmap run, we try to rescan full, otherwise
728*6f4e7d3cSThomas Gleixner * we simply give up.
729*6f4e7d3cSThomas Gleixner */
730*6f4e7d3cSThomas Gleixner if (!test_bit(lnum, vi->found)) {
731*6f4e7d3cSThomas Gleixner ubi_warn("LEB %d of %d is missing", lnum, last);
732*6f4e7d3cSThomas Gleixner return -EINVAL;
733*6f4e7d3cSThomas Gleixner }
734*6f4e7d3cSThomas Gleixner
735*6f4e7d3cSThomas Gleixner pnum = vi->lebs_to_pebs[lnum];
736*6f4e7d3cSThomas Gleixner
737*6f4e7d3cSThomas Gleixner ubi_dbg("Load vol %u LEB %u PEB %u", vol_id, lnum, pnum);
738*6f4e7d3cSThomas Gleixner
739*6f4e7d3cSThomas Gleixner if (ubi_io_is_bad(ubi, pnum)) {
740*6f4e7d3cSThomas Gleixner ubi_warn("Corrupted mapping block %d PB %d\n", lnum, pnum);
741*6f4e7d3cSThomas Gleixner return -EINVAL;
742*6f4e7d3cSThomas Gleixner }
743*6f4e7d3cSThomas Gleixner
744*6f4e7d3cSThomas Gleixner if (test_bit(pnum, ubi->corrupt))
745*6f4e7d3cSThomas Gleixner goto find_other;
746*6f4e7d3cSThomas Gleixner
747*6f4e7d3cSThomas Gleixner /*
748*6f4e7d3cSThomas Gleixner * Lets try to read that block
749*6f4e7d3cSThomas Gleixner */
750*6f4e7d3cSThomas Gleixner vh = ubi->blockinfo + pnum;
751*6f4e7d3cSThomas Gleixner
752*6f4e7d3cSThomas Gleixner if (!test_bit(pnum, ubi->scanned)) {
753*6f4e7d3cSThomas Gleixner ubi_warn("Vol: %u LEB %u PEB %u not yet scanned", vol_id,
754*6f4e7d3cSThomas Gleixner lnum, pnum);
755*6f4e7d3cSThomas Gleixner if (ubi_rescan_fm_vid_hdr(ubi, vh, pnum, vol_id, lnum))
756*6f4e7d3cSThomas Gleixner goto find_other;
757*6f4e7d3cSThomas Gleixner }
758*6f4e7d3cSThomas Gleixner
759*6f4e7d3cSThomas Gleixner /*
760*6f4e7d3cSThomas Gleixner * Check, if the total number of blocks is correct
761*6f4e7d3cSThomas Gleixner */
762*6f4e7d3cSThomas Gleixner if (be32_to_cpu(vh->used_ebs) != last) {
763*6f4e7d3cSThomas Gleixner ubi_dbg("Block count missmatch.");
764*6f4e7d3cSThomas Gleixner ubi_dbg("vh->used_ebs: %d nrblocks: %d",
765*6f4e7d3cSThomas Gleixner be32_to_cpu(vh->used_ebs), last);
766*6f4e7d3cSThomas Gleixner generic_set_bit(pnum, ubi->corrupt);
767*6f4e7d3cSThomas Gleixner goto find_other;
768*6f4e7d3cSThomas Gleixner }
769*6f4e7d3cSThomas Gleixner
770*6f4e7d3cSThomas Gleixner /*
771*6f4e7d3cSThomas Gleixner * Get the data length of this block.
772*6f4e7d3cSThomas Gleixner */
773*6f4e7d3cSThomas Gleixner dlen = be32_to_cpu(vh->data_size);
774*6f4e7d3cSThomas Gleixner
775*6f4e7d3cSThomas Gleixner /*
776*6f4e7d3cSThomas Gleixner * Read the data into RAM. We ignore the return value
777*6f4e7d3cSThomas Gleixner * here as the only thing which might go wrong are
778*6f4e7d3cSThomas Gleixner * bitflips. Try nevertheless.
779*6f4e7d3cSThomas Gleixner */
780*6f4e7d3cSThomas Gleixner ubi_io_read(ubi, laddr, pnum, ubi->leb_start, dlen);
781*6f4e7d3cSThomas Gleixner
782*6f4e7d3cSThomas Gleixner /* Calculate CRC over the data */
783*6f4e7d3cSThomas Gleixner crc = crc32(UBI_CRC32_INIT, laddr, dlen);
784*6f4e7d3cSThomas Gleixner
785*6f4e7d3cSThomas Gleixner if (crc != be32_to_cpu(vh->data_crc)) {
786*6f4e7d3cSThomas Gleixner ubi_warn("Vol: %u LEB %u PEB %u data CRC failure", vol_id,
787*6f4e7d3cSThomas Gleixner lnum, pnum);
788*6f4e7d3cSThomas Gleixner generic_set_bit(pnum, ubi->corrupt);
789*6f4e7d3cSThomas Gleixner goto find_other;
790*6f4e7d3cSThomas Gleixner }
791*6f4e7d3cSThomas Gleixner
792*6f4e7d3cSThomas Gleixner /* We are good. Return the data length we read */
793*6f4e7d3cSThomas Gleixner return dlen;
794*6f4e7d3cSThomas Gleixner
795*6f4e7d3cSThomas Gleixner find_other:
796*6f4e7d3cSThomas Gleixner ubi_dbg("Find replacement for LEB %u PEB %u", lnum, pnum);
797*6f4e7d3cSThomas Gleixner generic_clear_bit(lnum, vi->found);
798*6f4e7d3cSThomas Gleixner vrepl = NULL;
799*6f4e7d3cSThomas Gleixner
800*6f4e7d3cSThomas Gleixner for (pnum = 0; pnum < ubi->peb_count; pnum++) {
801*6f4e7d3cSThomas Gleixner struct ubi_vid_hdr *tmp = ubi->blockinfo + pnum;
802*6f4e7d3cSThomas Gleixner u32 t_vol_id = be32_to_cpu(tmp->vol_id);
803*6f4e7d3cSThomas Gleixner u32 t_lnum = be32_to_cpu(tmp->lnum);
804*6f4e7d3cSThomas Gleixner
805*6f4e7d3cSThomas Gleixner if (test_bit(pnum, ubi->corrupt))
806*6f4e7d3cSThomas Gleixner continue;
807*6f4e7d3cSThomas Gleixner
808*6f4e7d3cSThomas Gleixner if (t_vol_id != vol_id || t_lnum != lnum)
809*6f4e7d3cSThomas Gleixner continue;
810*6f4e7d3cSThomas Gleixner
811*6f4e7d3cSThomas Gleixner if (!test_bit(pnum, ubi->scanned)) {
812*6f4e7d3cSThomas Gleixner ubi_warn("Vol: %u LEB %u PEB %u not yet scanned",
813*6f4e7d3cSThomas Gleixner vol_id, lnum, pnum);
814*6f4e7d3cSThomas Gleixner if (ubi_rescan_fm_vid_hdr(ubi, tmp, pnum, vol_id, lnum))
815*6f4e7d3cSThomas Gleixner continue;
816*6f4e7d3cSThomas Gleixner }
817*6f4e7d3cSThomas Gleixner
818*6f4e7d3cSThomas Gleixner /*
819*6f4e7d3cSThomas Gleixner * We found one. If its the first, assign it otherwise
820*6f4e7d3cSThomas Gleixner * compare the sqnum
821*6f4e7d3cSThomas Gleixner */
822*6f4e7d3cSThomas Gleixner generic_set_bit(lnum, vi->found);
823*6f4e7d3cSThomas Gleixner
824*6f4e7d3cSThomas Gleixner if (!vrepl) {
825*6f4e7d3cSThomas Gleixner vrepl = tmp;
826*6f4e7d3cSThomas Gleixner continue;
827*6f4e7d3cSThomas Gleixner }
828*6f4e7d3cSThomas Gleixner
829*6f4e7d3cSThomas Gleixner if (be64_to_cpu(vrepl->sqnum) < be64_to_cpu(tmp->sqnum))
830*6f4e7d3cSThomas Gleixner vrepl = tmp;
831*6f4e7d3cSThomas Gleixner }
832*6f4e7d3cSThomas Gleixner
833*6f4e7d3cSThomas Gleixner if (vrepl) {
834*6f4e7d3cSThomas Gleixner /* Update the vi table */
835*6f4e7d3cSThomas Gleixner pnum = vrepl - ubi->blockinfo;
836*6f4e7d3cSThomas Gleixner vi->lebs_to_pebs[lnum] = pnum;
837*6f4e7d3cSThomas Gleixner ubi_dbg("Trying PEB %u for LEB %u", pnum, lnum);
838*6f4e7d3cSThomas Gleixner vh = vrepl;
839*6f4e7d3cSThomas Gleixner }
840*6f4e7d3cSThomas Gleixner goto retry;
841*6f4e7d3cSThomas Gleixner }
842*6f4e7d3cSThomas Gleixner
843*6f4e7d3cSThomas Gleixner /*
844*6f4e7d3cSThomas Gleixner * Load a volume into RAM
845*6f4e7d3cSThomas Gleixner */
ipl_load(struct ubi_scan_info * ubi,const u32 vol_id,uint8_t * laddr)846*6f4e7d3cSThomas Gleixner static int ipl_load(struct ubi_scan_info *ubi, const u32 vol_id, uint8_t *laddr)
847*6f4e7d3cSThomas Gleixner {
848*6f4e7d3cSThomas Gleixner struct ubi_vol_info *vi;
849*6f4e7d3cSThomas Gleixner u32 lnum, last, len;
850*6f4e7d3cSThomas Gleixner
851*6f4e7d3cSThomas Gleixner if (vol_id >= UBI_SPL_VOL_IDS)
852*6f4e7d3cSThomas Gleixner return -EINVAL;
853*6f4e7d3cSThomas Gleixner
854*6f4e7d3cSThomas Gleixner len = 0;
855*6f4e7d3cSThomas Gleixner vi = ubi->volinfo + vol_id;
856*6f4e7d3cSThomas Gleixner last = vi->last_block + 1;
857*6f4e7d3cSThomas Gleixner
858*6f4e7d3cSThomas Gleixner /* Read the blocks to RAM, check CRC */
859*6f4e7d3cSThomas Gleixner for (lnum = 0 ; lnum < last; lnum++) {
860*6f4e7d3cSThomas Gleixner int res = ubi_load_block(ubi, laddr, vi, vol_id, lnum, last);
861*6f4e7d3cSThomas Gleixner
862*6f4e7d3cSThomas Gleixner if (res < 0) {
863*6f4e7d3cSThomas Gleixner ubi_warn("Failed to load volume %u", vol_id);
864*6f4e7d3cSThomas Gleixner return res;
865*6f4e7d3cSThomas Gleixner }
866*6f4e7d3cSThomas Gleixner /* res is the data length of the read block */
867*6f4e7d3cSThomas Gleixner laddr += res;
868*6f4e7d3cSThomas Gleixner len += res;
869*6f4e7d3cSThomas Gleixner }
870*6f4e7d3cSThomas Gleixner return len;
871*6f4e7d3cSThomas Gleixner }
872*6f4e7d3cSThomas Gleixner
ubispl_load_volumes(struct ubispl_info * info,struct ubispl_load * lvols,int nrvols)873*6f4e7d3cSThomas Gleixner int ubispl_load_volumes(struct ubispl_info *info, struct ubispl_load *lvols,
874*6f4e7d3cSThomas Gleixner int nrvols)
875*6f4e7d3cSThomas Gleixner {
876*6f4e7d3cSThomas Gleixner struct ubi_scan_info *ubi = info->ubi;
877*6f4e7d3cSThomas Gleixner int res, i, fastmap = info->fastmap;
878*6f4e7d3cSThomas Gleixner u32 fsize;
879*6f4e7d3cSThomas Gleixner
880*6f4e7d3cSThomas Gleixner retry:
881*6f4e7d3cSThomas Gleixner /*
882*6f4e7d3cSThomas Gleixner * We do a partial initializiation of @ubi. Cleaning fm_buf is
883*6f4e7d3cSThomas Gleixner * not necessary.
884*6f4e7d3cSThomas Gleixner */
885*6f4e7d3cSThomas Gleixner memset(ubi, 0, offsetof(struct ubi_scan_info, fm_buf));
886*6f4e7d3cSThomas Gleixner
887*6f4e7d3cSThomas Gleixner ubi->read = info->read;
888*6f4e7d3cSThomas Gleixner
889*6f4e7d3cSThomas Gleixner /* Precalculate the offsets */
890*6f4e7d3cSThomas Gleixner ubi->vid_offset = info->vid_offset;
891*6f4e7d3cSThomas Gleixner ubi->leb_start = info->leb_start;
892*6f4e7d3cSThomas Gleixner ubi->leb_size = info->peb_size - ubi->leb_start;
893*6f4e7d3cSThomas Gleixner ubi->peb_count = info->peb_count;
894*6f4e7d3cSThomas Gleixner ubi->peb_offset = info->peb_offset;
895*6f4e7d3cSThomas Gleixner
896*6f4e7d3cSThomas Gleixner fsize = info->peb_size * info->peb_count;
897*6f4e7d3cSThomas Gleixner ubi->fsize_mb = fsize >> 20;
898*6f4e7d3cSThomas Gleixner
899*6f4e7d3cSThomas Gleixner /* Fastmap init */
900*6f4e7d3cSThomas Gleixner ubi->fm_size = ubi_calc_fm_size(ubi);
901*6f4e7d3cSThomas Gleixner ubi->fm_enabled = fastmap;
902*6f4e7d3cSThomas Gleixner
903*6f4e7d3cSThomas Gleixner for (i = 0; i < nrvols; i++) {
904*6f4e7d3cSThomas Gleixner struct ubispl_load *lv = lvols + i;
905*6f4e7d3cSThomas Gleixner
906*6f4e7d3cSThomas Gleixner generic_set_bit(lv->vol_id, ubi->toload);
907*6f4e7d3cSThomas Gleixner }
908*6f4e7d3cSThomas Gleixner
909*6f4e7d3cSThomas Gleixner ipl_scan(ubi);
910*6f4e7d3cSThomas Gleixner
911*6f4e7d3cSThomas Gleixner for (i = 0; i < nrvols; i++) {
912*6f4e7d3cSThomas Gleixner struct ubispl_load *lv = lvols + i;
913*6f4e7d3cSThomas Gleixner
914*6f4e7d3cSThomas Gleixner ubi_msg("Loading VolId #%d", lv->vol_id);
915*6f4e7d3cSThomas Gleixner res = ipl_load(ubi, lv->vol_id, lv->load_addr);
916*6f4e7d3cSThomas Gleixner if (res < 0) {
917*6f4e7d3cSThomas Gleixner if (fastmap) {
918*6f4e7d3cSThomas Gleixner fastmap = 0;
919*6f4e7d3cSThomas Gleixner goto retry;
920*6f4e7d3cSThomas Gleixner }
921*6f4e7d3cSThomas Gleixner ubi_warn("Failed");
922*6f4e7d3cSThomas Gleixner return res;
923*6f4e7d3cSThomas Gleixner }
924*6f4e7d3cSThomas Gleixner }
925*6f4e7d3cSThomas Gleixner return 0;
926*6f4e7d3cSThomas Gleixner }
927