1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2017 Free Electrons
4 *
5 * Authors:
6 * Boris Brezillon <boris.brezillon@free-electrons.com>
7 * Peter Pan <peterpandong@micron.com>
8 */
9
10 #define pr_fmt(fmt) "nand-bbt: " fmt
11
12 #include <linux/mtd/nand.h>
13 #ifndef __UBOOT__
14 #include <linux/slab.h>
15 #endif
16
17 #ifdef CONFIG_MTD_NAND_BBT_USING_FLASH
18
19 #ifdef BBT_DEBUG
20 #define BBT_DBG pr_err
21 #else
22 #define BBT_DBG(args...)
23 #endif
24
25 struct nanddev_bbt_info {
26 u8 pattern[4];
27 unsigned int version;
28 };
29
30 static u8 bbt_pattern[] = {'B', 'b', 't', '0' };
31
32 /**
33 * nanddev_read_bbt() - Read the BBT (Bad Block Table)
34 * @nand: NAND device
35 * @block: bbt block address
36 * @update: true - get version and overwrite bbt.cache with new version;
37 * false - get bbt version only;
38 *
39 * Initialize the in-memory BBT.
40 *
41 * Return: 0 in case of success, a negative error code otherwise.
42 */
nanddev_read_bbt(struct nand_device * nand,u32 block,bool update)43 static int nanddev_read_bbt(struct nand_device *nand, u32 block, bool update)
44 {
45 unsigned int bits_per_block = fls(NAND_BBT_BLOCK_NUM_STATUS);
46 unsigned int nblocks = nanddev_neraseblocks(nand);
47 unsigned int nbytes = DIV_ROUND_UP(nblocks * bits_per_block,
48 BITS_PER_LONG) * sizeof(*nand->bbt.cache);
49 struct mtd_info *mtd = nanddev_to_mtd(nand);
50 u8 *data_buf, *oob_buf;
51 struct nanddev_bbt_info *bbt_info;
52 struct mtd_oob_ops ops;
53 int bbt_page_num;
54 int ret = 0;
55 unsigned int version = 0;
56
57 if (!nand->bbt.cache)
58 return -ENOMEM;
59
60 if (block >= nblocks)
61 return -EINVAL;
62
63 /* aligned to page size, and even pages is better */
64 bbt_page_num = (sizeof(struct nanddev_bbt_info) + nbytes +
65 mtd->writesize - 1) >> mtd->writesize_shift;
66 bbt_page_num = (bbt_page_num + 1) / 2 * 2;
67 data_buf = kzalloc(bbt_page_num * mtd->writesize, GFP_KERNEL);
68 if (!data_buf)
69 return -ENOMEM;
70 oob_buf = kzalloc(bbt_page_num * mtd->oobsize, GFP_KERNEL);
71 if (!oob_buf) {
72 kfree(data_buf);
73
74 return -ENOMEM;
75 }
76
77 bbt_info = (struct nanddev_bbt_info *)(data_buf + nbytes);
78
79 memset(&ops, 0, sizeof(struct mtd_oob_ops));
80 ops.mode = MTD_OPS_PLACE_OOB;
81 ops.datbuf = data_buf;
82 ops.len = bbt_page_num * mtd->writesize;
83 ops.oobbuf = oob_buf;
84 ops.ooblen = bbt_page_num * mtd->oobsize;
85 ops.ooboffs = 0;
86
87 /* Store one entry for each block */
88 ret = mtd_read_oob(mtd, block * mtd->erasesize, &ops);
89 if (ret && ret != -EUCLEAN) {
90 pr_err("%s fail %d\n", __func__, ret);
91 ret = -EIO;
92 goto out;
93 } else {
94 ret = 0;
95 }
96
97 if (oob_buf[0] != 0xff && !memcmp(bbt_pattern, bbt_info->pattern, 4))
98 version = bbt_info->version;
99
100 BBT_DBG("read_bbt from blk=%d tag=%d ver=%d\n", block, update, version);
101 if (update && version > nand->bbt.version) {
102 memcpy(nand->bbt.cache, data_buf, nbytes);
103 nand->bbt.version = version;
104 }
105
106 out:
107 kfree(data_buf);
108 kfree(oob_buf);
109
110 return ret < 0 ? -EIO : version;
111 }
112
nanddev_write_bbt(struct nand_device * nand,u32 block)113 static int nanddev_write_bbt(struct nand_device *nand, u32 block)
114 {
115 unsigned int bits_per_block = fls(NAND_BBT_BLOCK_NUM_STATUS);
116 unsigned int nblocks = nanddev_neraseblocks(nand);
117 unsigned int nbytes = DIV_ROUND_UP(nblocks * bits_per_block,
118 BITS_PER_LONG) * sizeof(*nand->bbt.cache);
119 struct mtd_info *mtd = nanddev_to_mtd(nand);
120 u8 *data_buf, *oob_buf;
121 struct nanddev_bbt_info *bbt_info;
122 struct mtd_oob_ops ops;
123 int bbt_page_num;
124 int ret = 0;
125 struct nand_pos pos;
126
127 BBT_DBG("write_bbt to blk=%d ver=%d\n", block, nand->bbt.version);
128 if (!nand->bbt.cache)
129 return -ENOMEM;
130
131 if (block >= nblocks)
132 return -EINVAL;
133
134 /* aligned to page size, and even pages is better */
135 bbt_page_num = (sizeof(struct nanddev_bbt_info) + nbytes +
136 mtd->writesize - 1) >> mtd->writesize_shift;
137 bbt_page_num = (bbt_page_num + 1) / 2 * 2;
138
139 data_buf = kzalloc(bbt_page_num * mtd->writesize, GFP_KERNEL);
140 if (!data_buf)
141 return -ENOMEM;
142 oob_buf = kzalloc(bbt_page_num * mtd->oobsize, GFP_KERNEL);
143 if (!oob_buf) {
144 kfree(data_buf);
145
146 return -ENOMEM;
147 }
148
149 bbt_info = (struct nanddev_bbt_info *)(data_buf + nbytes);
150
151 memcpy(data_buf, nand->bbt.cache, nbytes);
152 memcpy(bbt_info, bbt_pattern, 4);
153 bbt_info->version = nand->bbt.version;
154
155 /* Store one entry for each block */
156 nanddev_offs_to_pos(nand, block * mtd->erasesize, &pos);
157 ret = nand->ops->erase(nand, &pos);
158 if (ret)
159 goto out;
160
161 memset(&ops, 0, sizeof(struct mtd_oob_ops));
162 ops.mode = MTD_OPS_PLACE_OOB;
163 ops.datbuf = data_buf;
164 ops.len = bbt_page_num * mtd->writesize;
165 ops.oobbuf = oob_buf;
166 ops.ooblen = bbt_page_num * mtd->oobsize;
167 ops.ooboffs = 0;
168 ret = mtd_write_oob(mtd, block * mtd->erasesize, &ops);
169
170 out:
171 kfree(data_buf);
172 kfree(oob_buf);
173
174 return ret;
175 }
176
nanddev_bbt_format(struct nand_device * nand)177 static __maybe_unused int nanddev_bbt_format(struct nand_device *nand)
178 {
179 unsigned int nblocks = nanddev_neraseblocks(nand);
180 struct mtd_info *mtd = nanddev_to_mtd(nand);
181 struct nand_pos pos;
182 u32 start_block, block;
183
184 start_block = nblocks - NANDDEV_BBT_SCAN_MAXBLOCKS;
185
186 for (block = 0; block < nblocks; block++) {
187 nanddev_offs_to_pos(nand, block * mtd->erasesize, &pos);
188 if (nanddev_isbad(nand, &pos))
189 nanddev_bbt_set_block_status(nand, block,
190 NAND_BBT_BLOCK_FACTORY_BAD);
191 }
192
193 for (block = 0; block < NANDDEV_BBT_SCAN_MAXBLOCKS; block++) {
194 if (nanddev_bbt_get_block_status(nand, start_block + block) ==
195 NAND_BBT_BLOCK_GOOD)
196 nanddev_bbt_set_block_status(nand, start_block + block,
197 NAND_BBT_BLOCK_WORN);
198 }
199
200 return 0;
201 }
202
nanddev_scan_bbt(struct nand_device * nand)203 static int nanddev_scan_bbt(struct nand_device *nand)
204 {
205 unsigned int nblocks = nanddev_neraseblocks(nand);
206 u32 start_block, block;
207 int ret = 0;
208
209 nand->bbt.version = 0;
210 start_block = nblocks - NANDDEV_BBT_SCAN_MAXBLOCKS;
211 for (block = 0; block < NANDDEV_BBT_SCAN_MAXBLOCKS; block++)
212 nanddev_read_bbt(nand, start_block + block, true);
213
214 nand->bbt.option |= NANDDEV_BBT_SCANNED;
215 #ifndef CONFIG_SPL_BUILD
216 if (nand->bbt.version == 0) {
217 nanddev_bbt_format(nand);
218 ret = nanddev_bbt_update(nand);
219 if (ret) {
220 nand->bbt.option = 0;
221 pr_err("%s fail\n", __func__);
222 }
223 }
224 #endif
225
226 return ret;
227 }
228 #endif
229
230 /**
231 * nanddev_bbt_init() - Initialize the BBT (Bad Block Table)
232 * @nand: NAND device
233 *
234 * Initialize the in-memory BBT.
235 *
236 * Return: 0 in case of success, a negative error code otherwise.
237 */
nanddev_bbt_init(struct nand_device * nand)238 int nanddev_bbt_init(struct nand_device *nand)
239 {
240 unsigned int bits_per_block = fls(NAND_BBT_BLOCK_NUM_STATUS);
241 unsigned int nblocks = nanddev_neraseblocks(nand);
242 unsigned int nwords = DIV_ROUND_UP(nblocks * bits_per_block,
243 BITS_PER_LONG);
244
245 nand->bbt.cache = kcalloc(nwords, sizeof(*nand->bbt.cache),
246 GFP_KERNEL);
247 if (!nand->bbt.cache)
248 return -ENOMEM;
249
250 return 0;
251 }
252 EXPORT_SYMBOL_GPL(nanddev_bbt_init);
253
254 /**
255 * nanddev_bbt_cleanup() - Cleanup the BBT (Bad Block Table)
256 * @nand: NAND device
257 *
258 * Undoes what has been done in nanddev_bbt_init()
259 */
nanddev_bbt_cleanup(struct nand_device * nand)260 void nanddev_bbt_cleanup(struct nand_device *nand)
261 {
262 kfree(nand->bbt.cache);
263 }
264 EXPORT_SYMBOL_GPL(nanddev_bbt_cleanup);
265
266 /**
267 * nanddev_bbt_update() - Update a BBT
268 * @nand: nand device
269 *
270 * Update the BBT. Currently a NOP function since on-flash bbt is not yet
271 * supported.
272 *
273 * Return: 0 in case of success, a negative error code otherwise.
274 */
nanddev_bbt_update(struct nand_device * nand)275 int nanddev_bbt_update(struct nand_device *nand)
276 {
277 #ifdef CONFIG_MTD_NAND_BBT_USING_FLASH
278 if (nand->bbt.cache &&
279 nand->bbt.option & NANDDEV_BBT_USE_FLASH) {
280 unsigned int nblocks = nanddev_neraseblocks(nand);
281 u32 bbt_version[NANDDEV_BBT_SCAN_MAXBLOCKS];
282 int start_block, block;
283 u32 min_version, block_des;
284 int ret, count = 0;
285
286 start_block = nblocks - NANDDEV_BBT_SCAN_MAXBLOCKS;
287 for (block = 0; block < NANDDEV_BBT_SCAN_MAXBLOCKS; block++) {
288 ret = nanddev_bbt_get_block_status(nand, start_block + block);
289 if (ret == NAND_BBT_BLOCK_FACTORY_BAD) {
290 bbt_version[block] = 0xFFFFFFFF;
291 continue;
292 }
293 ret = nanddev_read_bbt(nand, start_block + block,
294 false);
295 if (ret < 0)
296 bbt_version[block] = 0xFFFFFFFF;
297 else if (ret == 0)
298 bbt_version[block] = 0;
299 else
300 bbt_version[block] = ret;
301 }
302 get_min_ver:
303 min_version = 0xFFFFFFFF;
304 block_des = 0;
305 for (block = 0; block < NANDDEV_BBT_SCAN_MAXBLOCKS; block++) {
306 if (bbt_version[block] < min_version) {
307 min_version = bbt_version[block];
308 block_des = start_block + block;
309 }
310 }
311
312 if (block_des > 0) {
313 nand->bbt.version++;
314 ret = nanddev_write_bbt(nand, block_des);
315 bbt_version[block_des - start_block] = 0xFFFFFFFF;
316 if (ret) {
317 pr_err("%s blk= %d ret= %d\n", __func__,
318 block_des, ret);
319 goto get_min_ver;
320 } else {
321 count++;
322 if (count < 2)
323 goto get_min_ver;
324 BBT_DBG("%s success\n", __func__);
325 }
326 } else {
327 pr_err("%s failed\n", __func__);
328
329 return -1;
330 }
331 }
332 #endif
333 return 0;
334 }
335 EXPORT_SYMBOL_GPL(nanddev_bbt_update);
336
337 /**
338 * nanddev_bbt_get_block_status() - Return the status of an eraseblock
339 * @nand: nand device
340 * @entry: the BBT entry
341 *
342 * Return: a positive number nand_bbt_block_status status or -%ERANGE if @entry
343 * is bigger than the BBT size.
344 */
nanddev_bbt_get_block_status(const struct nand_device * nand,unsigned int entry)345 int nanddev_bbt_get_block_status(const struct nand_device *nand,
346 unsigned int entry)
347 {
348 unsigned int bits_per_block = fls(NAND_BBT_BLOCK_NUM_STATUS);
349 unsigned long *pos = nand->bbt.cache +
350 ((entry * bits_per_block) / BITS_PER_LONG);
351 unsigned int offs = (entry * bits_per_block) % BITS_PER_LONG;
352 unsigned long status;
353
354 #ifdef CONFIG_MTD_NAND_BBT_USING_FLASH
355 if (nand->bbt.option & NANDDEV_BBT_USE_FLASH &&
356 !(nand->bbt.option & NANDDEV_BBT_SCANNED))
357 nanddev_scan_bbt((struct nand_device *)nand);
358 #endif
359
360 if (entry >= nanddev_neraseblocks(nand))
361 return -ERANGE;
362
363 status = pos[0] >> offs;
364 if (bits_per_block + offs > BITS_PER_LONG)
365 status |= pos[1] << (BITS_PER_LONG - offs);
366
367 return status & GENMASK(bits_per_block - 1, 0);
368 }
369 EXPORT_SYMBOL_GPL(nanddev_bbt_get_block_status);
370
371 /**
372 * nanddev_bbt_set_block_status() - Update the status of an eraseblock in the
373 * in-memory BBT
374 * @nand: nand device
375 * @entry: the BBT entry to update
376 * @status: the new status
377 *
378 * Update an entry of the in-memory BBT. If you want to push the updated BBT
379 * the NAND you should call nanddev_bbt_update().
380 *
381 * Return: 0 in case of success or -%ERANGE if @entry is bigger than the BBT
382 * size.
383 */
nanddev_bbt_set_block_status(struct nand_device * nand,unsigned int entry,enum nand_bbt_block_status status)384 int nanddev_bbt_set_block_status(struct nand_device *nand, unsigned int entry,
385 enum nand_bbt_block_status status)
386 {
387 unsigned int bits_per_block = fls(NAND_BBT_BLOCK_NUM_STATUS);
388 unsigned long *pos = nand->bbt.cache +
389 ((entry * bits_per_block) / BITS_PER_LONG);
390 unsigned int offs = (entry * bits_per_block) % BITS_PER_LONG;
391 unsigned long val = status & GENMASK(bits_per_block - 1, 0);
392
393 if (entry >= nanddev_neraseblocks(nand))
394 return -ERANGE;
395
396 if (offs + bits_per_block - 1 > (BITS_PER_LONG - 1))
397 pos[0] &= ~GENMASK(BITS_PER_LONG - 1, offs);
398 else
399 pos[0] &= ~GENMASK(offs + bits_per_block - 1, offs);
400 pos[0] |= val << offs;
401
402 if (bits_per_block + offs > BITS_PER_LONG) {
403 unsigned int rbits = BITS_PER_LONG - offs;
404
405 pos[1] &= ~GENMASK(bits_per_block - rbits - 1, 0);
406 pos[1] |= val >> rbits;
407 }
408
409 return 0;
410 }
411 EXPORT_SYMBOL_GPL(nanddev_bbt_set_block_status);
412