xref: /rk3399_rockchip-uboot/drivers/mtd/nand/bbt.c (revision cd1c982e9a20e1f221cc1158f81fc40d9d0af0c2)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2017 Free Electrons
4  *
5  * Authors:
6  *	Boris Brezillon <boris.brezillon@free-electrons.com>
7  *	Peter Pan <peterpandong@micron.com>
8  */
9 
10 #define pr_fmt(fmt)	"nand-bbt: " fmt
11 
12 #include <linux/mtd/nand.h>
13 #ifndef __UBOOT__
14 #include <linux/slab.h>
15 #endif
16 
17 #ifdef CONFIG_MTD_NAND_BBT_USING_FLASH
18 
19 #ifdef BBT_DEBUG
20 #define BBT_DBG pr_err
21 #else
22 #define BBT_DBG(args...)
23 #endif
24 
25 struct nanddev_bbt_info {
26 	u8 pattern[4];
27 	unsigned int version;
28 };
29 
30 static u8 bbt_pattern[] = {'B', 'b', 't', '0' };
31 
32 /**
33  * nanddev_read_bbt() - Read the BBT (Bad Block Table)
34  * @nand: NAND device
35  * @block: bbt block address
36  * @update: true - get version and overwrite bbt.cache with new version;
37  *	false - get bbt version only;
38  *
39  * Initialize the in-memory BBT.
40  *
41  * Return: 0 in case of success, a negative error code otherwise.
42  */
43 static int nanddev_read_bbt(struct nand_device *nand, u32 block, bool update)
44 {
45 	unsigned int bits_per_block = fls(NAND_BBT_BLOCK_NUM_STATUS);
46 	unsigned int nblocks = nanddev_neraseblocks(nand);
47 	unsigned int nbytes = DIV_ROUND_UP(nblocks * bits_per_block,
48 					   BITS_PER_LONG) * sizeof(*nand->bbt.cache);
49 	struct mtd_info *mtd = nanddev_to_mtd(nand);
50 	u8 *data_buf, *oob_buf;
51 	struct nanddev_bbt_info *bbt_info;
52 	struct mtd_oob_ops ops;
53 	int bbt_page_num;
54 	int ret = 0;
55 	unsigned int version = 0;
56 
57 	if (!nand->bbt.cache)
58 		return -ENOMEM;
59 
60 	if (block >= nblocks)
61 		return -EINVAL;
62 
63 	/* aligned to page size, and even pages is better */
64 	bbt_page_num = (sizeof(struct nanddev_bbt_info) + nbytes +
65 		mtd->writesize - 1) >> mtd->writesize_shift;
66 	bbt_page_num = (bbt_page_num + 1) / 2 * 2;
67 	data_buf = kzalloc(bbt_page_num * mtd->writesize, GFP_KERNEL);
68 	if (!data_buf)
69 		return -ENOMEM;
70 	oob_buf = kzalloc(bbt_page_num * mtd->oobsize, GFP_KERNEL);
71 	if (!oob_buf) {
72 		kfree(data_buf);
73 
74 		return -ENOMEM;
75 	}
76 
77 	bbt_info = (struct nanddev_bbt_info *)(data_buf + nbytes);
78 
79 	memset(&ops, 0, sizeof(struct mtd_oob_ops));
80 	ops.mode = MTD_OPS_PLACE_OOB;
81 	ops.datbuf = data_buf;
82 	ops.len = bbt_page_num * mtd->writesize;
83 	ops.oobbuf = oob_buf;
84 	ops.ooblen = bbt_page_num * mtd->oobsize;
85 	ops.ooboffs = 0;
86 
87 	/* Store one entry for each block */
88 	ret = mtd_read_oob(mtd, block * mtd->erasesize, &ops);
89 	if (ret && ret != -EUCLEAN) {
90 		pr_err("%s fail %d\n", __func__, ret);
91 		ret = -EIO;
92 		goto out;
93 	} else {
94 		ret = 0;
95 	}
96 
97 	if (oob_buf[0] != 0xff && !memcmp(bbt_pattern, bbt_info->pattern, 4))
98 		version = bbt_info->version;
99 
100 	BBT_DBG("read_bbt from blk=%d tag=%d ver=%d\n", block, update, version);
101 	if (update && version > nand->bbt.version) {
102 		memcpy(nand->bbt.cache, data_buf, nbytes);
103 		nand->bbt.version = version;
104 	}
105 
106 out:
107 	kfree(data_buf);
108 	kfree(oob_buf);
109 
110 	return ret < 0 ? -EIO : version;
111 }
112 
113 static int nanddev_write_bbt(struct nand_device *nand, u32 block)
114 {
115 	unsigned int bits_per_block = fls(NAND_BBT_BLOCK_NUM_STATUS);
116 	unsigned int nblocks = nanddev_neraseblocks(nand);
117 	unsigned int nbytes = DIV_ROUND_UP(nblocks * bits_per_block,
118 					   BITS_PER_LONG) * sizeof(*nand->bbt.cache);
119 	struct mtd_info *mtd = nanddev_to_mtd(nand);
120 	u8 *data_buf, *oob_buf;
121 	struct nanddev_bbt_info *bbt_info;
122 	struct mtd_oob_ops ops;
123 	int bbt_page_num;
124 	int ret = 0;
125 	struct nand_pos pos;
126 
127 	BBT_DBG("write_bbt to blk=%d ver=%d\n", block, nand->bbt.version);
128 	if (!nand->bbt.cache)
129 		return -ENOMEM;
130 
131 	if (block >= nblocks)
132 		return -EINVAL;
133 
134 	/* aligned to page size, and even pages is better */
135 	bbt_page_num = (sizeof(struct nanddev_bbt_info) + nbytes +
136 		mtd->writesize - 1) >> mtd->writesize_shift;
137 	bbt_page_num = (bbt_page_num + 1) / 2 * 2;
138 
139 	data_buf = kzalloc(bbt_page_num * mtd->writesize, GFP_KERNEL);
140 	if (!data_buf)
141 		return -ENOMEM;
142 	oob_buf = kzalloc(bbt_page_num * mtd->oobsize, GFP_KERNEL);
143 	if (!oob_buf) {
144 		kfree(data_buf);
145 
146 		return -ENOMEM;
147 	}
148 
149 	bbt_info = (struct nanddev_bbt_info *)(data_buf + nbytes);
150 
151 	memcpy(data_buf, nand->bbt.cache, nbytes);
152 	memcpy(bbt_info, bbt_pattern, 4);
153 	bbt_info->version = nand->bbt.version;
154 
155 	/* Store one entry for each block */
156 	nanddev_offs_to_pos(nand, block * mtd->erasesize, &pos);
157 	ret = nand->ops->erase(nand, &pos);
158 	if (ret)
159 		goto out;
160 
161 	memset(&ops, 0, sizeof(struct mtd_oob_ops));
162 	ops.mode = MTD_OPS_PLACE_OOB;
163 	ops.datbuf = data_buf;
164 	ops.len = bbt_page_num * mtd->writesize;
165 	ops.oobbuf = oob_buf;
166 	ops.ooblen = bbt_page_num * mtd->oobsize;
167 	ops.ooboffs = 0;
168 	ret = mtd_write_oob(mtd, block * mtd->erasesize, &ops);
169 
170 out:
171 	kfree(data_buf);
172 	kfree(oob_buf);
173 
174 	return ret;
175 }
176 
177 static __maybe_unused int nanddev_bbt_format(struct nand_device *nand)
178 {
179 	unsigned int nblocks = nanddev_neraseblocks(nand);
180 	struct mtd_info *mtd = nanddev_to_mtd(nand);
181 	struct nand_pos pos;
182 	u32 start_block, block;
183 
184 	start_block = nblocks - NANDDEV_BBT_SCAN_MAXBLOCKS;
185 
186 	for (block = 0; block < nblocks; block++) {
187 		nanddev_offs_to_pos(nand, block * mtd->erasesize, &pos);
188 		if (nanddev_isbad(nand, &pos))
189 			nanddev_bbt_set_block_status(nand, block,
190 						     NAND_BBT_BLOCK_FACTORY_BAD);
191 	}
192 
193 	for (block = 0; block < NANDDEV_BBT_SCAN_MAXBLOCKS; block++) {
194 		if (nanddev_bbt_get_block_status(nand, start_block + block) ==
195 			NAND_BBT_BLOCK_GOOD)
196 			nanddev_bbt_set_block_status(nand, start_block + block,
197 						     NAND_BBT_BLOCK_WORN);
198 	}
199 
200 	return 0;
201 }
202 
203 static int nanddev_scan_bbt(struct nand_device *nand)
204 {
205 	unsigned int nblocks = nanddev_neraseblocks(nand);
206 	u32 start_block, block;
207 	int ret = 0;
208 
209 	nand->bbt.version = 0;
210 	start_block = nblocks - NANDDEV_BBT_SCAN_MAXBLOCKS;
211 	for (block = 0; block < NANDDEV_BBT_SCAN_MAXBLOCKS; block++)
212 		nanddev_read_bbt(nand, start_block + block, true);
213 
214 	nand->bbt.option |= NANDDEV_BBT_SCANNED;
215 #ifndef CONFIG_SPL_BUILD
216 	if (nand->bbt.version == 0) {
217 		nanddev_bbt_format(nand);
218 		ret = nanddev_bbt_update(nand);
219 		if (ret)
220 			pr_err("%s fail\n", __func__);
221 	}
222 #endif
223 
224 	return ret;
225 }
226 #endif
227 
228 /**
229  * nanddev_bbt_init() - Initialize the BBT (Bad Block Table)
230  * @nand: NAND device
231  *
232  * Initialize the in-memory BBT.
233  *
234  * Return: 0 in case of success, a negative error code otherwise.
235  */
236 int nanddev_bbt_init(struct nand_device *nand)
237 {
238 	unsigned int bits_per_block = fls(NAND_BBT_BLOCK_NUM_STATUS);
239 	unsigned int nblocks = nanddev_neraseblocks(nand);
240 	unsigned int nwords = DIV_ROUND_UP(nblocks * bits_per_block,
241 					   BITS_PER_LONG);
242 
243 	nand->bbt.cache = kcalloc(nwords, sizeof(*nand->bbt.cache),
244 				  GFP_KERNEL);
245 	if (!nand->bbt.cache)
246 		return -ENOMEM;
247 
248 	return 0;
249 }
250 EXPORT_SYMBOL_GPL(nanddev_bbt_init);
251 
252 /**
253  * nanddev_bbt_cleanup() - Cleanup the BBT (Bad Block Table)
254  * @nand: NAND device
255  *
256  * Undoes what has been done in nanddev_bbt_init()
257  */
258 void nanddev_bbt_cleanup(struct nand_device *nand)
259 {
260 	kfree(nand->bbt.cache);
261 }
262 EXPORT_SYMBOL_GPL(nanddev_bbt_cleanup);
263 
264 /**
265  * nanddev_bbt_update() - Update a BBT
266  * @nand: nand device
267  *
268  * Update the BBT. Currently a NOP function since on-flash bbt is not yet
269  * supported.
270  *
271  * Return: 0 in case of success, a negative error code otherwise.
272  */
273 int nanddev_bbt_update(struct nand_device *nand)
274 {
275 #ifdef CONFIG_MTD_NAND_BBT_USING_FLASH
276 	if (nand->bbt.cache &&
277 	    nand->bbt.option & NANDDEV_BBT_USE_FLASH) {
278 		unsigned int nblocks = nanddev_neraseblocks(nand);
279 		u32 bbt_version[NANDDEV_BBT_SCAN_MAXBLOCKS];
280 		int start_block, block;
281 		u32 min_version, block_des;
282 		int ret, count = 0;
283 
284 		start_block = nblocks - NANDDEV_BBT_SCAN_MAXBLOCKS;
285 		for (block = 0; block < NANDDEV_BBT_SCAN_MAXBLOCKS; block++) {
286 			ret = nanddev_bbt_get_block_status(nand, start_block + block);
287 			if (ret == NAND_BBT_BLOCK_FACTORY_BAD) {
288 				bbt_version[block] = 0xFFFFFFFF;
289 				continue;
290 			}
291 			ret = nanddev_read_bbt(nand, start_block + block,
292 					       false);
293 			if (ret < 0)
294 				bbt_version[block] = 0xFFFFFFFF;
295 			else if (ret == 0)
296 				bbt_version[block] = 0;
297 			else
298 				bbt_version[block] = ret;
299 		}
300 get_min_ver:
301 		min_version = 0xFFFFFFFF;
302 		block_des = 0;
303 		for (block = 0; block < NANDDEV_BBT_SCAN_MAXBLOCKS; block++) {
304 			if (bbt_version[block] < min_version) {
305 				min_version = bbt_version[block];
306 				block_des = start_block + block;
307 			}
308 		}
309 
310 		if (block_des > 0) {
311 			nand->bbt.version++;
312 			ret = nanddev_write_bbt(nand, block_des);
313 			bbt_version[block_des - start_block] = 0xFFFFFFFF;
314 			if (ret) {
315 				pr_err("%s blk= %d ret= %d\n", __func__,
316 				       block_des, ret);
317 				goto get_min_ver;
318 			} else {
319 				count++;
320 				if (count < 2)
321 					goto get_min_ver;
322 				BBT_DBG("%s success\n", __func__);
323 			}
324 		} else {
325 			pr_err("%s failed\n", __func__);
326 
327 			return -1;
328 		}
329 	}
330 #endif
331 	return 0;
332 }
333 EXPORT_SYMBOL_GPL(nanddev_bbt_update);
334 
335 /**
336  * nanddev_bbt_get_block_status() - Return the status of an eraseblock
337  * @nand: nand device
338  * @entry: the BBT entry
339  *
340  * Return: a positive number nand_bbt_block_status status or -%ERANGE if @entry
341  *	   is bigger than the BBT size.
342  */
343 int nanddev_bbt_get_block_status(const struct nand_device *nand,
344 				 unsigned int entry)
345 {
346 	unsigned int bits_per_block = fls(NAND_BBT_BLOCK_NUM_STATUS);
347 	unsigned long *pos = nand->bbt.cache +
348 			     ((entry * bits_per_block) / BITS_PER_LONG);
349 	unsigned int offs = (entry * bits_per_block) % BITS_PER_LONG;
350 	unsigned long status;
351 
352 #ifdef CONFIG_MTD_NAND_BBT_USING_FLASH
353 	if (nand->bbt.option & NANDDEV_BBT_USE_FLASH &&
354 	    !(nand->bbt.option & NANDDEV_BBT_SCANNED))
355 		nanddev_scan_bbt((struct nand_device *)nand);
356 #endif
357 
358 	if (entry >= nanddev_neraseblocks(nand))
359 		return -ERANGE;
360 
361 	status = pos[0] >> offs;
362 	if (bits_per_block + offs > BITS_PER_LONG)
363 		status |= pos[1] << (BITS_PER_LONG - offs);
364 
365 	return status & GENMASK(bits_per_block - 1, 0);
366 }
367 EXPORT_SYMBOL_GPL(nanddev_bbt_get_block_status);
368 
369 /**
370  * nanddev_bbt_set_block_status() - Update the status of an eraseblock in the
371  *				    in-memory BBT
372  * @nand: nand device
373  * @entry: the BBT entry to update
374  * @status: the new status
375  *
376  * Update an entry of the in-memory BBT. If you want to push the updated BBT
377  * the NAND you should call nanddev_bbt_update().
378  *
379  * Return: 0 in case of success or -%ERANGE if @entry is bigger than the BBT
380  *	   size.
381  */
382 int nanddev_bbt_set_block_status(struct nand_device *nand, unsigned int entry,
383 				 enum nand_bbt_block_status status)
384 {
385 	unsigned int bits_per_block = fls(NAND_BBT_BLOCK_NUM_STATUS);
386 	unsigned long *pos = nand->bbt.cache +
387 			     ((entry * bits_per_block) / BITS_PER_LONG);
388 	unsigned int offs = (entry * bits_per_block) % BITS_PER_LONG;
389 	unsigned long val = status & GENMASK(bits_per_block - 1, 0);
390 
391 	if (entry >= nanddev_neraseblocks(nand))
392 		return -ERANGE;
393 
394 	if (offs + bits_per_block - 1 > (BITS_PER_LONG - 1))
395 		pos[0] &= ~GENMASK(BITS_PER_LONG - 1, offs);
396 	else
397 		pos[0] &= ~GENMASK(offs + bits_per_block - 1, offs);
398 	pos[0] |= val << offs;
399 
400 	if (bits_per_block + offs > BITS_PER_LONG) {
401 		unsigned int rbits = BITS_PER_LONG - offs;
402 
403 		pos[1] &= ~GENMASK(bits_per_block - rbits - 1, 0);
404 		pos[1] |= val >> rbits;
405 	}
406 
407 	return 0;
408 }
409 EXPORT_SYMBOL_GPL(nanddev_bbt_set_block_status);
410