xref: /OK3568_Linux_fs/kernel/drivers/rkflash/sfc_nand_mtd_bbt.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (c) 2017 Free Electrons
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Authors:
6*4882a593Smuzhiyun  *	Boris Brezillon <boris.brezillon@free-electrons.com>
7*4882a593Smuzhiyun  *	Peter Pan <peterpandong@micron.com>
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <linux/mtd/mtd.h>
11*4882a593Smuzhiyun #include <linux/slab.h>
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #include "sfc_nand.h"
14*4882a593Smuzhiyun #include "sfc_nand_mtd.h"
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #ifdef CONFIG_MTD_NAND_BBT_USING_FLASH
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun #ifdef BBT_DEBUG
19*4882a593Smuzhiyun #define BBT_DBG pr_err
20*4882a593Smuzhiyun #else
21*4882a593Smuzhiyun #define BBT_DBG(args...)
22*4882a593Smuzhiyun #endif
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun struct nanddev_bbt_info {
25*4882a593Smuzhiyun 	u8 pattern[4];
26*4882a593Smuzhiyun 	unsigned int version;
27*4882a593Smuzhiyun };
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun static u8 bbt_pattern[] = {'B', 'b', 't', '0' };
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun /**
32*4882a593Smuzhiyun  * nanddev_read_bbt() - Read the BBT (Bad Block Table)
33*4882a593Smuzhiyun  * @nand: NAND device
34*4882a593Smuzhiyun  * @block: bbt block address
35*4882a593Smuzhiyun  * @update: true - get version and overwrite bbt.cache with new version;
36*4882a593Smuzhiyun  *	false - get bbt version only;
37*4882a593Smuzhiyun  *
38*4882a593Smuzhiyun  * Initialize the in-memory BBT.
39*4882a593Smuzhiyun  *
40*4882a593Smuzhiyun  * Return: 0 in case of success, a negative error code otherwise.
41*4882a593Smuzhiyun  */
nanddev_read_bbt(struct snand_mtd_dev * nand,u32 block,bool update)42*4882a593Smuzhiyun static int nanddev_read_bbt(struct snand_mtd_dev *nand, u32 block, bool update)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun 	unsigned int bits_per_block = fls(NAND_BBT_BLOCK_NUM_STATUS);
45*4882a593Smuzhiyun 	unsigned int nblocks = snanddev_neraseblocks(nand);
46*4882a593Smuzhiyun 	unsigned int nbytes = DIV_ROUND_UP(nblocks * bits_per_block,
47*4882a593Smuzhiyun 					   BITS_PER_LONG) * sizeof(*nand->bbt.cache);
48*4882a593Smuzhiyun 	struct mtd_info *mtd = snanddev_to_mtd(nand);
49*4882a593Smuzhiyun 	u8 *data_buf, *oob_buf, *temp_buf;
50*4882a593Smuzhiyun 	struct nanddev_bbt_info *bbt_info;
51*4882a593Smuzhiyun 	struct mtd_oob_ops ops;
52*4882a593Smuzhiyun 	u32 bbt_page_num;
53*4882a593Smuzhiyun 	int ret = 0;
54*4882a593Smuzhiyun 	unsigned int version = 0;
55*4882a593Smuzhiyun 	u32 page_addr, i;
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun 	if (!nand->bbt.cache)
58*4882a593Smuzhiyun 		return -ENOMEM;
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun 	if (block >= nblocks)
61*4882a593Smuzhiyun 		return -EINVAL;
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun 	/* aligned to page size, and even pages is better */
64*4882a593Smuzhiyun 	bbt_page_num = (sizeof(struct nanddev_bbt_info) + nbytes +
65*4882a593Smuzhiyun 		mtd->writesize - 1) >> mtd->writesize_shift;
66*4882a593Smuzhiyun 	bbt_page_num = (bbt_page_num + 1) / 2 * 2;
67*4882a593Smuzhiyun 	data_buf = kzalloc(bbt_page_num * mtd->writesize, GFP_KERNEL);
68*4882a593Smuzhiyun 	if (!data_buf)
69*4882a593Smuzhiyun 		return -ENOMEM;
70*4882a593Smuzhiyun 	oob_buf = kzalloc(bbt_page_num * mtd->oobsize, GFP_KERNEL);
71*4882a593Smuzhiyun 	if (!oob_buf) {
72*4882a593Smuzhiyun 		kfree(data_buf);
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 		return -ENOMEM;
75*4882a593Smuzhiyun 	}
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	bbt_info = (struct nanddev_bbt_info *)(data_buf + nbytes);
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 	memset(&ops, 0, sizeof(struct mtd_oob_ops));
80*4882a593Smuzhiyun 	ops.mode = MTD_OPS_PLACE_OOB;
81*4882a593Smuzhiyun 	ops.datbuf = data_buf;
82*4882a593Smuzhiyun 	ops.len = bbt_page_num * mtd->writesize;
83*4882a593Smuzhiyun 	ops.oobbuf = oob_buf;
84*4882a593Smuzhiyun 	ops.ooblen = bbt_page_num * mtd->oobsize;
85*4882a593Smuzhiyun 	ops.ooboffs = 0;
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	/* Store one entry for each block */
88*4882a593Smuzhiyun 	temp_buf = kzalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
89*4882a593Smuzhiyun 	if (!temp_buf) {
90*4882a593Smuzhiyun 		kfree(data_buf);
91*4882a593Smuzhiyun 		kfree(oob_buf);
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 		return -ENOMEM;
94*4882a593Smuzhiyun 	}
95*4882a593Smuzhiyun 	page_addr = (u32)(block << (mtd->erasesize_shift - mtd->writesize_shift));
96*4882a593Smuzhiyun 	for (i = 0; i < bbt_page_num; i++) {
97*4882a593Smuzhiyun 		ret = sfc_nand_read_page_raw(0, page_addr + i, (u32 *)temp_buf);
98*4882a593Smuzhiyun 		if (ret < 0) {
99*4882a593Smuzhiyun 			pr_err("%s fail %d\n", __func__, ret);
100*4882a593Smuzhiyun 			ret = -EIO;
101*4882a593Smuzhiyun 			kfree(temp_buf);
102*4882a593Smuzhiyun 			goto out;
103*4882a593Smuzhiyun 		}
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 		memcpy(ops.datbuf + i * mtd->writesize, temp_buf, mtd->writesize);
106*4882a593Smuzhiyun 		memcpy(ops.oobbuf + i * mtd->oobsize, temp_buf + mtd->writesize, mtd->oobsize);
107*4882a593Smuzhiyun 	}
108*4882a593Smuzhiyun 	kfree(temp_buf);
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	if (oob_buf[0] != 0xff && !memcmp(bbt_pattern, bbt_info->pattern, 4))
111*4882a593Smuzhiyun 		version = bbt_info->version;
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 	BBT_DBG("read_bbt from blk=%d tag=%d ver=%d\n", block, update, version);
114*4882a593Smuzhiyun 	if (update && version > nand->bbt.version) {
115*4882a593Smuzhiyun 		memcpy(nand->bbt.cache, data_buf, nbytes);
116*4882a593Smuzhiyun 		nand->bbt.version = version;
117*4882a593Smuzhiyun 	}
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun out:
120*4882a593Smuzhiyun 	kfree(data_buf);
121*4882a593Smuzhiyun 	kfree(oob_buf);
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	return ret < 0 ? -EIO : (int)version;
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun 
nanddev_write_bbt(struct snand_mtd_dev * nand,u32 block)126*4882a593Smuzhiyun static int nanddev_write_bbt(struct snand_mtd_dev *nand, u32 block)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun 	unsigned int bits_per_block = fls(NAND_BBT_BLOCK_NUM_STATUS);
129*4882a593Smuzhiyun 	unsigned int nblocks = snanddev_neraseblocks(nand);
130*4882a593Smuzhiyun 	unsigned int nbytes = DIV_ROUND_UP(nblocks * bits_per_block,
131*4882a593Smuzhiyun 					   BITS_PER_LONG) * sizeof(*nand->bbt.cache);
132*4882a593Smuzhiyun 	struct mtd_info *mtd = snanddev_to_mtd(nand);
133*4882a593Smuzhiyun 	u8 *data_buf, *oob_buf, *temp_buf;
134*4882a593Smuzhiyun 	struct nanddev_bbt_info *bbt_info;
135*4882a593Smuzhiyun 	struct mtd_oob_ops ops;
136*4882a593Smuzhiyun 	u32 bbt_page_num;
137*4882a593Smuzhiyun 	int ret = 0;
138*4882a593Smuzhiyun 	u32 page_addr, i;
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	BBT_DBG("write_bbt to blk=%d ver=%d\n", block, nand->bbt.version);
141*4882a593Smuzhiyun 	if (!nand->bbt.cache)
142*4882a593Smuzhiyun 		return -ENOMEM;
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	if (block >= nblocks)
145*4882a593Smuzhiyun 		return -EINVAL;
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	/* aligned to page size, and even pages is better */
148*4882a593Smuzhiyun 	bbt_page_num = (sizeof(struct nanddev_bbt_info) + nbytes +
149*4882a593Smuzhiyun 		mtd->writesize - 1) >> mtd->writesize_shift;
150*4882a593Smuzhiyun 	bbt_page_num = (bbt_page_num + 1) / 2 * 2;
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	data_buf = kzalloc(bbt_page_num * mtd->writesize, GFP_KERNEL);
153*4882a593Smuzhiyun 	if (!data_buf)
154*4882a593Smuzhiyun 		return -ENOMEM;
155*4882a593Smuzhiyun 	oob_buf = kzalloc(bbt_page_num * mtd->oobsize, GFP_KERNEL);
156*4882a593Smuzhiyun 	if (!oob_buf) {
157*4882a593Smuzhiyun 		kfree(data_buf);
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 		return -ENOMEM;
160*4882a593Smuzhiyun 	}
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	bbt_info = (struct nanddev_bbt_info *)(data_buf + nbytes);
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	memcpy(data_buf, nand->bbt.cache, nbytes);
165*4882a593Smuzhiyun 	memcpy(bbt_info, bbt_pattern, 4);
166*4882a593Smuzhiyun 	bbt_info->version = nand->bbt.version;
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	/* Store one entry for each block */
169*4882a593Smuzhiyun 	ret = sfc_nand_erase_mtd(mtd, block * mtd->erasesize);
170*4882a593Smuzhiyun 	if (ret)
171*4882a593Smuzhiyun 		goto out;
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	memset(&ops, 0, sizeof(struct mtd_oob_ops));
174*4882a593Smuzhiyun 	ops.datbuf = data_buf;
175*4882a593Smuzhiyun 	ops.len = bbt_page_num * mtd->writesize;
176*4882a593Smuzhiyun 	ops.oobbuf = oob_buf;
177*4882a593Smuzhiyun 	ops.ooblen = bbt_page_num * mtd->oobsize;
178*4882a593Smuzhiyun 	ops.ooboffs = 0;
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 	temp_buf = kzalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
181*4882a593Smuzhiyun 	if (!temp_buf) {
182*4882a593Smuzhiyun 		kfree(data_buf);
183*4882a593Smuzhiyun 		kfree(oob_buf);
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 		return -ENOMEM;
186*4882a593Smuzhiyun 	}
187*4882a593Smuzhiyun 	page_addr = (u32)(block << (mtd->erasesize_shift - mtd->writesize_shift));
188*4882a593Smuzhiyun 	for (i = 0; i < bbt_page_num; i++) {
189*4882a593Smuzhiyun 		memcpy(temp_buf, ops.datbuf + i * mtd->writesize, mtd->writesize);
190*4882a593Smuzhiyun 		memcpy(temp_buf + mtd->writesize, ops.oobbuf + i * mtd->oobsize, mtd->oobsize);
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 		ret = sfc_nand_prog_page_raw(0, page_addr + i, (u32 *)temp_buf);
193*4882a593Smuzhiyun 		if (ret < 0) {
194*4882a593Smuzhiyun 			pr_err("%s fail %d\n", __func__, ret);
195*4882a593Smuzhiyun 			ret = -EIO;
196*4882a593Smuzhiyun 			kfree(temp_buf);
197*4882a593Smuzhiyun 			goto out;
198*4882a593Smuzhiyun 		}
199*4882a593Smuzhiyun 	}
200*4882a593Smuzhiyun 	kfree(temp_buf);
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun out:
203*4882a593Smuzhiyun 	kfree(data_buf);
204*4882a593Smuzhiyun 	kfree(oob_buf);
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 	return ret;
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun 
nanddev_bbt_format(struct snand_mtd_dev * nand)209*4882a593Smuzhiyun static int nanddev_bbt_format(struct snand_mtd_dev *nand)
210*4882a593Smuzhiyun {
211*4882a593Smuzhiyun 	unsigned int nblocks = snanddev_neraseblocks(nand);
212*4882a593Smuzhiyun 	struct mtd_info *mtd = snanddev_to_mtd(nand);
213*4882a593Smuzhiyun 	u32 start_block, block;
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	start_block = nblocks - NANDDEV_BBT_SCAN_MAXBLOCKS;
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 	for (block = 0; block < nblocks; block++) {
218*4882a593Smuzhiyun 		if (sfc_nand_isbad_mtd(mtd, block * mtd->erasesize))
219*4882a593Smuzhiyun 			snanddev_bbt_set_block_status(nand, block,
220*4882a593Smuzhiyun 						      NAND_BBT_BLOCK_FACTORY_BAD);
221*4882a593Smuzhiyun 	}
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 	for (block = 0; block < NANDDEV_BBT_SCAN_MAXBLOCKS; block++) {
224*4882a593Smuzhiyun 		if (snanddev_bbt_get_block_status(nand, start_block + block) ==
225*4882a593Smuzhiyun 			NAND_BBT_BLOCK_GOOD)
226*4882a593Smuzhiyun 			snanddev_bbt_set_block_status(nand, start_block + block,
227*4882a593Smuzhiyun 						      NAND_BBT_BLOCK_WORN);
228*4882a593Smuzhiyun 	}
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 	return 0;
231*4882a593Smuzhiyun }
232*4882a593Smuzhiyun 
nanddev_scan_bbt(struct snand_mtd_dev * nand)233*4882a593Smuzhiyun static int nanddev_scan_bbt(struct snand_mtd_dev *nand)
234*4882a593Smuzhiyun {
235*4882a593Smuzhiyun 	unsigned int nblocks = snanddev_neraseblocks(nand);
236*4882a593Smuzhiyun 	u32 start_block, block;
237*4882a593Smuzhiyun 	int ret = 0;
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	nand->bbt.version = 0;
240*4882a593Smuzhiyun 	start_block = nblocks - NANDDEV_BBT_SCAN_MAXBLOCKS;
241*4882a593Smuzhiyun 	for (block = 0; block < NANDDEV_BBT_SCAN_MAXBLOCKS; block++)
242*4882a593Smuzhiyun 		nanddev_read_bbt(nand, start_block + block, true);
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	nand->bbt.option |= NANDDEV_BBT_SCANNED;
245*4882a593Smuzhiyun 	if (nand->bbt.version == 0) {
246*4882a593Smuzhiyun 		nanddev_bbt_format(nand);
247*4882a593Smuzhiyun 		ret = snanddev_bbt_update(nand);
248*4882a593Smuzhiyun 		if (ret) {
249*4882a593Smuzhiyun 			nand->bbt.option = 0;
250*4882a593Smuzhiyun 			pr_err("%s fail\n", __func__);
251*4882a593Smuzhiyun 		}
252*4882a593Smuzhiyun 	}
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 	return ret;
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun #endif
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun /**
260*4882a593Smuzhiyun  * nanddev_bbt_init() - Initialize the BBT (Bad Block Table)
261*4882a593Smuzhiyun  * @nand: NAND device
262*4882a593Smuzhiyun  *
263*4882a593Smuzhiyun  * Initialize the in-memory BBT.
264*4882a593Smuzhiyun  *
265*4882a593Smuzhiyun  * Return: 0 in case of success, a negative error code otherwise.
266*4882a593Smuzhiyun  */
snanddev_bbt_init(struct snand_mtd_dev * nand)267*4882a593Smuzhiyun int snanddev_bbt_init(struct snand_mtd_dev *nand)
268*4882a593Smuzhiyun {
269*4882a593Smuzhiyun 	unsigned int bits_per_block = fls(NAND_BBT_BLOCK_NUM_STATUS);
270*4882a593Smuzhiyun 	unsigned int nblocks = snanddev_neraseblocks(nand);
271*4882a593Smuzhiyun 	unsigned int nwords = DIV_ROUND_UP(nblocks * bits_per_block,
272*4882a593Smuzhiyun 					   BITS_PER_LONG);
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun 	nand->bbt.cache = kcalloc(nwords, sizeof(*nand->bbt.cache),
275*4882a593Smuzhiyun 				  GFP_KERNEL);
276*4882a593Smuzhiyun 	if (!nand->bbt.cache)
277*4882a593Smuzhiyun 		return -ENOMEM;
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 	return 0;
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(snanddev_bbt_init);
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun /**
284*4882a593Smuzhiyun  * nanddev_bbt_cleanup() - Cleanup the BBT (Bad Block Table)
285*4882a593Smuzhiyun  * @nand: NAND device
286*4882a593Smuzhiyun  *
287*4882a593Smuzhiyun  * Undoes what has been done in nanddev_bbt_init()
288*4882a593Smuzhiyun  */
snanddev_bbt_cleanup(struct snand_mtd_dev * nand)289*4882a593Smuzhiyun void snanddev_bbt_cleanup(struct snand_mtd_dev *nand)
290*4882a593Smuzhiyun {
291*4882a593Smuzhiyun 	kfree(nand->bbt.cache);
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(snanddev_bbt_cleanup);
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun /**
296*4882a593Smuzhiyun  * nanddev_bbt_update() - Update a BBT
297*4882a593Smuzhiyun  * @nand: nand device
298*4882a593Smuzhiyun  *
299*4882a593Smuzhiyun  * Update the BBT. Currently a NOP function since on-flash bbt is not yet
300*4882a593Smuzhiyun  * supported.
301*4882a593Smuzhiyun  *
302*4882a593Smuzhiyun  * Return: 0 in case of success, a negative error code otherwise.
303*4882a593Smuzhiyun  */
snanddev_bbt_update(struct snand_mtd_dev * nand)304*4882a593Smuzhiyun int snanddev_bbt_update(struct snand_mtd_dev *nand)
305*4882a593Smuzhiyun {
306*4882a593Smuzhiyun #ifdef CONFIG_MTD_NAND_BBT_USING_FLASH
307*4882a593Smuzhiyun 	if (nand->bbt.cache &&
308*4882a593Smuzhiyun 	    nand->bbt.option & NANDDEV_BBT_USE_FLASH) {
309*4882a593Smuzhiyun 		unsigned int nblocks = snanddev_neraseblocks(nand);
310*4882a593Smuzhiyun 		u32 bbt_version[NANDDEV_BBT_SCAN_MAXBLOCKS];
311*4882a593Smuzhiyun 		int start_block, block;
312*4882a593Smuzhiyun 		u32 min_version, block_des;
313*4882a593Smuzhiyun 		int ret, count = 0;
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun 		start_block = nblocks - NANDDEV_BBT_SCAN_MAXBLOCKS;
316*4882a593Smuzhiyun 		for (block = 0; block < NANDDEV_BBT_SCAN_MAXBLOCKS; block++) {
317*4882a593Smuzhiyun 			ret = snanddev_bbt_get_block_status(nand, start_block + block);
318*4882a593Smuzhiyun 			if (ret == NAND_BBT_BLOCK_FACTORY_BAD) {
319*4882a593Smuzhiyun 				bbt_version[block] = 0xFFFFFFFF;
320*4882a593Smuzhiyun 				continue;
321*4882a593Smuzhiyun 			}
322*4882a593Smuzhiyun 			ret = nanddev_read_bbt(nand, start_block + block,
323*4882a593Smuzhiyun 					       false);
324*4882a593Smuzhiyun 			if (ret < 0)
325*4882a593Smuzhiyun 				bbt_version[block] = 0xFFFFFFFF;
326*4882a593Smuzhiyun 			else if (ret == 0)
327*4882a593Smuzhiyun 				bbt_version[block] = 0;
328*4882a593Smuzhiyun 			else
329*4882a593Smuzhiyun 				bbt_version[block] = ret;
330*4882a593Smuzhiyun 		}
331*4882a593Smuzhiyun get_min_ver:
332*4882a593Smuzhiyun 		min_version = 0xFFFFFFFF;
333*4882a593Smuzhiyun 		block_des = 0;
334*4882a593Smuzhiyun 		for (block = 0; block < NANDDEV_BBT_SCAN_MAXBLOCKS; block++) {
335*4882a593Smuzhiyun 			if (bbt_version[block] < min_version) {
336*4882a593Smuzhiyun 				min_version = bbt_version[block];
337*4882a593Smuzhiyun 				block_des = start_block + block;
338*4882a593Smuzhiyun 			}
339*4882a593Smuzhiyun 		}
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 		if (block_des > 0) {
342*4882a593Smuzhiyun 			nand->bbt.version++;
343*4882a593Smuzhiyun 			ret = nanddev_write_bbt(nand, block_des);
344*4882a593Smuzhiyun 			bbt_version[block_des - start_block] = 0xFFFFFFFF;
345*4882a593Smuzhiyun 			if (ret) {
346*4882a593Smuzhiyun 				pr_err("%s blk= %d ret= %d\n", __func__,
347*4882a593Smuzhiyun 				       block_des, ret);
348*4882a593Smuzhiyun 				goto get_min_ver;
349*4882a593Smuzhiyun 			} else {
350*4882a593Smuzhiyun 				count++;
351*4882a593Smuzhiyun 				if (count < 2)
352*4882a593Smuzhiyun 					goto get_min_ver;
353*4882a593Smuzhiyun 				BBT_DBG("%s success\n", __func__);
354*4882a593Smuzhiyun 			}
355*4882a593Smuzhiyun 		} else {
356*4882a593Smuzhiyun 			pr_err("%s failed\n", __func__);
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 			return -1;
359*4882a593Smuzhiyun 		}
360*4882a593Smuzhiyun 	}
361*4882a593Smuzhiyun #endif
362*4882a593Smuzhiyun 	return 0;
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(snanddev_bbt_update);
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun /**
367*4882a593Smuzhiyun  * nanddev_bbt_get_block_status() - Return the status of an eraseblock
368*4882a593Smuzhiyun  * @nand: nand device
369*4882a593Smuzhiyun  * @entry: the BBT entry
370*4882a593Smuzhiyun  *
371*4882a593Smuzhiyun  * Return: a positive number nand_bbt_block_status status or -%ERANGE if @entry
372*4882a593Smuzhiyun  *	   is bigger than the BBT size.
373*4882a593Smuzhiyun  */
snanddev_bbt_get_block_status(const struct snand_mtd_dev * nand,unsigned int entry)374*4882a593Smuzhiyun int snanddev_bbt_get_block_status(const struct snand_mtd_dev *nand,
375*4882a593Smuzhiyun 				  unsigned int entry)
376*4882a593Smuzhiyun {
377*4882a593Smuzhiyun 	unsigned int bits_per_block = fls(NAND_BBT_BLOCK_NUM_STATUS);
378*4882a593Smuzhiyun 	unsigned long *pos = nand->bbt.cache +
379*4882a593Smuzhiyun 			     ((entry * bits_per_block) / BITS_PER_LONG);
380*4882a593Smuzhiyun 	unsigned int offs = (entry * bits_per_block) % BITS_PER_LONG;
381*4882a593Smuzhiyun 	unsigned long status;
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun #ifdef CONFIG_MTD_NAND_BBT_USING_FLASH
384*4882a593Smuzhiyun 	if (nand->bbt.option & NANDDEV_BBT_USE_FLASH &&
385*4882a593Smuzhiyun 	    !(nand->bbt.option & NANDDEV_BBT_SCANNED))
386*4882a593Smuzhiyun 		nanddev_scan_bbt((struct snand_mtd_dev *)nand);
387*4882a593Smuzhiyun #endif
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 	if (entry >= snanddev_neraseblocks(nand))
390*4882a593Smuzhiyun 		return -ERANGE;
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	status = pos[0] >> offs;
393*4882a593Smuzhiyun 	if (bits_per_block + offs > BITS_PER_LONG)
394*4882a593Smuzhiyun 		status |= pos[1] << (BITS_PER_LONG - offs);
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 	return status & GENMASK(bits_per_block - 1, 0);
397*4882a593Smuzhiyun }
398*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(snanddev_bbt_get_block_status);
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun /**
401*4882a593Smuzhiyun  * nanddev_bbt_set_block_status() - Update the status of an eraseblock in the
402*4882a593Smuzhiyun  *				    in-memory BBT
403*4882a593Smuzhiyun  * @nand: nand device
404*4882a593Smuzhiyun  * @entry: the BBT entry to update
405*4882a593Smuzhiyun  * @status: the new status
406*4882a593Smuzhiyun  *
407*4882a593Smuzhiyun  * Update an entry of the in-memory BBT. If you want to push the updated BBT
408*4882a593Smuzhiyun  * the NAND you should call nanddev_bbt_update().
409*4882a593Smuzhiyun  *
410*4882a593Smuzhiyun  * Return: 0 in case of success or -%ERANGE if @entry is bigger than the BBT
411*4882a593Smuzhiyun  *	   size.
412*4882a593Smuzhiyun  */
snanddev_bbt_set_block_status(struct snand_mtd_dev * nand,unsigned int entry,enum nand_bbt_block_status status)413*4882a593Smuzhiyun int snanddev_bbt_set_block_status(struct snand_mtd_dev *nand,
414*4882a593Smuzhiyun 				  unsigned int entry,
415*4882a593Smuzhiyun 				  enum nand_bbt_block_status status)
416*4882a593Smuzhiyun {
417*4882a593Smuzhiyun 	unsigned int bits_per_block = fls(NAND_BBT_BLOCK_NUM_STATUS);
418*4882a593Smuzhiyun 	unsigned long *pos = nand->bbt.cache +
419*4882a593Smuzhiyun 			     ((entry * bits_per_block) / BITS_PER_LONG);
420*4882a593Smuzhiyun 	unsigned int offs = (entry * bits_per_block) % BITS_PER_LONG;
421*4882a593Smuzhiyun 	unsigned long val = status & GENMASK(bits_per_block - 1, 0);
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun 	if (entry >= snanddev_neraseblocks(nand))
424*4882a593Smuzhiyun 		return -ERANGE;
425*4882a593Smuzhiyun 
426*4882a593Smuzhiyun 	if (offs + bits_per_block - 1 > (BITS_PER_LONG - 1))
427*4882a593Smuzhiyun 		pos[0] &= ~GENMASK(BITS_PER_LONG - 1, offs);
428*4882a593Smuzhiyun 	else
429*4882a593Smuzhiyun 		pos[0] &= ~GENMASK(offs + bits_per_block - 1, offs);
430*4882a593Smuzhiyun 	pos[0] |= val << offs;
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 	if (bits_per_block + offs > BITS_PER_LONG) {
433*4882a593Smuzhiyun 		unsigned int rbits = BITS_PER_LONG - offs;
434*4882a593Smuzhiyun 
435*4882a593Smuzhiyun 		pos[1] &= ~GENMASK(bits_per_block - rbits - 1, 0);
436*4882a593Smuzhiyun 		pos[1] |= val >> rbits;
437*4882a593Smuzhiyun 	}
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 	return 0;
440*4882a593Smuzhiyun }
441*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(snanddev_bbt_set_block_status);
442