1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (c) 2021 Rockchip Electronics Co. Ltd.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #include <linux/mtd/bbt_store.h>
8*4882a593Smuzhiyun #include <linux/slab.h>
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #ifdef BBT_DEBUG
11*4882a593Smuzhiyun #define BBT_DBG pr_err
12*4882a593Smuzhiyun #else
13*4882a593Smuzhiyun #define BBT_DBG(args...)
14*4882a593Smuzhiyun #endif
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun struct nanddev_bbt_info {
17*4882a593Smuzhiyun u8 pattern[4];
18*4882a593Smuzhiyun unsigned int version;
19*4882a593Smuzhiyun };
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun static u8 bbt_pattern[] = {'B', 'b', 't', '0' };
22*4882a593Smuzhiyun
nanddev_read_bbt(struct nand_device * nand,u32 block,bool update)23*4882a593Smuzhiyun static int nanddev_read_bbt(struct nand_device *nand, u32 block, bool update)
24*4882a593Smuzhiyun {
25*4882a593Smuzhiyun unsigned int bits_per_block = fls(NAND_BBT_BLOCK_NUM_STATUS);
26*4882a593Smuzhiyun unsigned int nblocks = nanddev_neraseblocks(nand);
27*4882a593Smuzhiyun unsigned int nbytes = DIV_ROUND_UP(nblocks * bits_per_block,
28*4882a593Smuzhiyun BITS_PER_LONG) * sizeof(*nand->bbt.cache);
29*4882a593Smuzhiyun struct mtd_info *mtd = nanddev_to_mtd(nand);
30*4882a593Smuzhiyun u8 *data_buf, *oob_buf;
31*4882a593Smuzhiyun struct nanddev_bbt_info *bbt_info;
32*4882a593Smuzhiyun struct mtd_oob_ops ops;
33*4882a593Smuzhiyun int bbt_page_num;
34*4882a593Smuzhiyun int ret = 0;
35*4882a593Smuzhiyun unsigned int version = 0;
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun if (!nand->bbt.cache)
38*4882a593Smuzhiyun return -ENOMEM;
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun if (block >= nblocks)
41*4882a593Smuzhiyun return -EINVAL;
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun /* Aligned to page size, and even pages is better */
44*4882a593Smuzhiyun bbt_page_num = (sizeof(struct nanddev_bbt_info) + nbytes +
45*4882a593Smuzhiyun mtd->writesize - 1) >> (ffs(mtd->writesize) - 1);
46*4882a593Smuzhiyun bbt_page_num = (bbt_page_num + 1) / 2 * 2;
47*4882a593Smuzhiyun data_buf = kzalloc(bbt_page_num * mtd->writesize, GFP_KERNEL);
48*4882a593Smuzhiyun if (!data_buf)
49*4882a593Smuzhiyun return -ENOMEM;
50*4882a593Smuzhiyun oob_buf = kzalloc(bbt_page_num * mtd->oobsize, GFP_KERNEL);
51*4882a593Smuzhiyun if (!oob_buf) {
52*4882a593Smuzhiyun kfree(data_buf);
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun return -ENOMEM;
55*4882a593Smuzhiyun }
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun bbt_info = (struct nanddev_bbt_info *)(data_buf + nbytes);
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun memset(&ops, 0, sizeof(struct mtd_oob_ops));
60*4882a593Smuzhiyun ops.mode = MTD_OPS_PLACE_OOB;
61*4882a593Smuzhiyun ops.datbuf = data_buf;
62*4882a593Smuzhiyun ops.len = bbt_page_num * mtd->writesize;
63*4882a593Smuzhiyun ops.oobbuf = oob_buf;
64*4882a593Smuzhiyun ops.ooblen = bbt_page_num * mtd->oobsize;
65*4882a593Smuzhiyun ops.ooboffs = 0;
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun ret = mtd_read_oob(mtd, block * mtd->erasesize, &ops);
68*4882a593Smuzhiyun if (ret && ret != -EUCLEAN) {
69*4882a593Smuzhiyun pr_err("%s fail %d\n", __func__, ret);
70*4882a593Smuzhiyun ret = -EIO;
71*4882a593Smuzhiyun goto out;
72*4882a593Smuzhiyun } else {
73*4882a593Smuzhiyun ret = 0;
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun if (oob_buf[0] != 0xff && !memcmp(bbt_pattern, bbt_info->pattern, 4))
77*4882a593Smuzhiyun version = bbt_info->version;
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun BBT_DBG("read_bbt from blk=%d tag=%d ver=%d\n", block, update, version);
80*4882a593Smuzhiyun if (update && version > nand->bbt.version) {
81*4882a593Smuzhiyun memcpy(nand->bbt.cache, data_buf, nbytes);
82*4882a593Smuzhiyun nand->bbt.version = version;
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun out:
86*4882a593Smuzhiyun kfree(oob_buf);
87*4882a593Smuzhiyun kfree(data_buf);
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun return ret < 0 ? -EIO : version;
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun
nanddev_write_bbt(struct nand_device * nand,u32 block)92*4882a593Smuzhiyun static int nanddev_write_bbt(struct nand_device *nand, u32 block)
93*4882a593Smuzhiyun {
94*4882a593Smuzhiyun unsigned int bits_per_block = fls(NAND_BBT_BLOCK_NUM_STATUS);
95*4882a593Smuzhiyun unsigned int nblocks = nanddev_neraseblocks(nand);
96*4882a593Smuzhiyun unsigned int nbytes = DIV_ROUND_UP(nblocks * bits_per_block,
97*4882a593Smuzhiyun BITS_PER_LONG) * sizeof(*nand->bbt.cache);
98*4882a593Smuzhiyun struct mtd_info *mtd = nanddev_to_mtd(nand);
99*4882a593Smuzhiyun u8 *data_buf, *oob_buf;
100*4882a593Smuzhiyun struct nanddev_bbt_info *bbt_info;
101*4882a593Smuzhiyun struct mtd_oob_ops ops;
102*4882a593Smuzhiyun int bbt_page_num;
103*4882a593Smuzhiyun int ret = 0;
104*4882a593Smuzhiyun struct nand_pos pos;
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun BBT_DBG("write_bbt to blk=%d ver=%d\n", block, nand->bbt.version);
107*4882a593Smuzhiyun if (!nand->bbt.cache)
108*4882a593Smuzhiyun return -ENOMEM;
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun if (block >= nblocks)
111*4882a593Smuzhiyun return -EINVAL;
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun /* Aligned to page size, and even pages is better */
114*4882a593Smuzhiyun bbt_page_num = (sizeof(struct nanddev_bbt_info) + nbytes +
115*4882a593Smuzhiyun mtd->writesize - 1) >> (ffs(mtd->writesize) - 1);
116*4882a593Smuzhiyun bbt_page_num = (bbt_page_num + 1) / 2 * 2;
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun data_buf = kzalloc(bbt_page_num * mtd->writesize, GFP_KERNEL);
119*4882a593Smuzhiyun if (!data_buf)
120*4882a593Smuzhiyun return -ENOMEM;
121*4882a593Smuzhiyun oob_buf = kzalloc(bbt_page_num * mtd->oobsize, GFP_KERNEL);
122*4882a593Smuzhiyun if (!oob_buf) {
123*4882a593Smuzhiyun kfree(data_buf);
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun return -ENOMEM;
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun bbt_info = (struct nanddev_bbt_info *)(data_buf + nbytes);
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun memcpy(data_buf, nand->bbt.cache, nbytes);
131*4882a593Smuzhiyun memcpy(bbt_info, bbt_pattern, 4);
132*4882a593Smuzhiyun bbt_info->version = nand->bbt.version;
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun nanddev_offs_to_pos(nand, block * mtd->erasesize, &pos);
135*4882a593Smuzhiyun ret = nand->ops->erase(nand, &pos);
136*4882a593Smuzhiyun if (ret)
137*4882a593Smuzhiyun goto out;
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun memset(&ops, 0, sizeof(struct mtd_oob_ops));
140*4882a593Smuzhiyun ops.mode = MTD_OPS_PLACE_OOB;
141*4882a593Smuzhiyun ops.datbuf = data_buf;
142*4882a593Smuzhiyun ops.len = bbt_page_num * mtd->writesize;
143*4882a593Smuzhiyun ops.oobbuf = oob_buf;
144*4882a593Smuzhiyun ops.ooblen = bbt_page_num * mtd->oobsize;
145*4882a593Smuzhiyun ops.ooboffs = 0;
146*4882a593Smuzhiyun ret = mtd_write_oob(mtd, block * mtd->erasesize, &ops);
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun out:
149*4882a593Smuzhiyun kfree(oob_buf);
150*4882a593Smuzhiyun kfree(data_buf);
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun return ret;
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun
nanddev_bbt_format(struct nand_device * nand)155*4882a593Smuzhiyun static int nanddev_bbt_format(struct nand_device *nand)
156*4882a593Smuzhiyun {
157*4882a593Smuzhiyun unsigned int nblocks = nanddev_neraseblocks(nand);
158*4882a593Smuzhiyun struct mtd_info *mtd = nanddev_to_mtd(nand);
159*4882a593Smuzhiyun struct nand_pos pos;
160*4882a593Smuzhiyun u32 start_block, block;
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun start_block = nblocks - NANDDEV_BBT_SCAN_MAXBLOCKS;
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun for (block = 0; block < nblocks; block++) {
165*4882a593Smuzhiyun nanddev_offs_to_pos(nand, block * mtd->erasesize, &pos);
166*4882a593Smuzhiyun if (nanddev_isbad(nand, &pos))
167*4882a593Smuzhiyun nanddev_bbt_set_block_status(nand, block,
168*4882a593Smuzhiyun NAND_BBT_BLOCK_FACTORY_BAD);
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun for (block = 0; block < NANDDEV_BBT_SCAN_MAXBLOCKS; block++) {
172*4882a593Smuzhiyun if (nanddev_bbt_get_block_status(nand, start_block + block) ==
173*4882a593Smuzhiyun NAND_BBT_BLOCK_GOOD)
174*4882a593Smuzhiyun nanddev_bbt_set_block_status(nand, start_block + block,
175*4882a593Smuzhiyun NAND_BBT_BLOCK_WORN);
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun return 0;
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun /**
182*4882a593Smuzhiyun * nanddev_scan_bbt_in_flash() - Scan for a BBT in the flash
183*4882a593Smuzhiyun * @nand: nand device
184*4882a593Smuzhiyun *
185*4882a593Smuzhiyun * Scan a bbt in flash, if not exist, format one.
186*4882a593Smuzhiyun *
187*4882a593Smuzhiyun * Return: 0 in case of success, a negative error code otherwise.
188*4882a593Smuzhiyun */
nanddev_scan_bbt_in_flash(struct nand_device * nand)189*4882a593Smuzhiyun int nanddev_scan_bbt_in_flash(struct nand_device *nand)
190*4882a593Smuzhiyun {
191*4882a593Smuzhiyun unsigned int nblocks = nanddev_neraseblocks(nand);
192*4882a593Smuzhiyun u32 start_block, block;
193*4882a593Smuzhiyun int ret = 0;
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun nand->bbt.version = 0;
196*4882a593Smuzhiyun start_block = nblocks - NANDDEV_BBT_SCAN_MAXBLOCKS;
197*4882a593Smuzhiyun for (block = 0; block < NANDDEV_BBT_SCAN_MAXBLOCKS; block++)
198*4882a593Smuzhiyun nanddev_read_bbt(nand, start_block + block, true);
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun if (nand->bbt.version == 0) {
201*4882a593Smuzhiyun nanddev_bbt_format(nand);
202*4882a593Smuzhiyun ret = nanddev_bbt_in_flash_update(nand);
203*4882a593Smuzhiyun if (ret) {
204*4882a593Smuzhiyun nand->bbt.option = 0;
205*4882a593Smuzhiyun pr_err("%s fail\n", __func__);
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun nand->bbt.option |= NANDDEV_BBT_SCANNED;
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun return ret;
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nanddev_scan_bbt_in_flash);
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun /**
216*4882a593Smuzhiyun * nanddev_bbt_in_flash_update() - Update a BBT
217*4882a593Smuzhiyun * @nand: nand device
218*4882a593Smuzhiyun *
219*4882a593Smuzhiyun * Update the BBT to flash.
220*4882a593Smuzhiyun *
221*4882a593Smuzhiyun * Return: 0 in case of success, a negative error code otherwise.
222*4882a593Smuzhiyun */
nanddev_bbt_in_flash_update(struct nand_device * nand)223*4882a593Smuzhiyun int nanddev_bbt_in_flash_update(struct nand_device *nand)
224*4882a593Smuzhiyun {
225*4882a593Smuzhiyun if (nand->bbt.option & NANDDEV_BBT_SCANNED) {
226*4882a593Smuzhiyun unsigned int nblocks = nanddev_neraseblocks(nand);
227*4882a593Smuzhiyun u32 bbt_version[NANDDEV_BBT_SCAN_MAXBLOCKS];
228*4882a593Smuzhiyun int start_block, block;
229*4882a593Smuzhiyun u32 min_version, block_des;
230*4882a593Smuzhiyun int ret, count = 0;
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun start_block = nblocks - NANDDEV_BBT_SCAN_MAXBLOCKS;
233*4882a593Smuzhiyun for (block = 0; block < NANDDEV_BBT_SCAN_MAXBLOCKS; block++) {
234*4882a593Smuzhiyun ret = nanddev_bbt_get_block_status(nand, start_block + block);
235*4882a593Smuzhiyun if (ret == NAND_BBT_BLOCK_FACTORY_BAD) {
236*4882a593Smuzhiyun bbt_version[block] = 0xFFFFFFFF;
237*4882a593Smuzhiyun continue;
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun ret = nanddev_read_bbt(nand, start_block + block,
240*4882a593Smuzhiyun false);
241*4882a593Smuzhiyun if (ret < 0)
242*4882a593Smuzhiyun bbt_version[block] = 0xFFFFFFFF;
243*4882a593Smuzhiyun else if (ret == 0)
244*4882a593Smuzhiyun bbt_version[block] = 0;
245*4882a593Smuzhiyun else
246*4882a593Smuzhiyun bbt_version[block] = ret;
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun get_min_ver:
249*4882a593Smuzhiyun min_version = 0xFFFFFFFF;
250*4882a593Smuzhiyun block_des = 0;
251*4882a593Smuzhiyun for (block = 0; block < NANDDEV_BBT_SCAN_MAXBLOCKS; block++) {
252*4882a593Smuzhiyun if (bbt_version[block] < min_version) {
253*4882a593Smuzhiyun min_version = bbt_version[block];
254*4882a593Smuzhiyun block_des = start_block + block;
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun if (block_des > 0) {
259*4882a593Smuzhiyun nand->bbt.version++;
260*4882a593Smuzhiyun ret = nanddev_write_bbt(nand, block_des);
261*4882a593Smuzhiyun bbt_version[block_des - start_block] = 0xFFFFFFFF;
262*4882a593Smuzhiyun if (ret) {
263*4882a593Smuzhiyun pr_err("%s blk= %d ret= %d\n", __func__,
264*4882a593Smuzhiyun block_des, ret);
265*4882a593Smuzhiyun goto get_min_ver;
266*4882a593Smuzhiyun } else {
267*4882a593Smuzhiyun count++;
268*4882a593Smuzhiyun if (count < 2)
269*4882a593Smuzhiyun goto get_min_ver;
270*4882a593Smuzhiyun BBT_DBG("%s success\n", __func__);
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun } else {
273*4882a593Smuzhiyun pr_err("%s failed\n", __func__);
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun return -EINVAL;
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun return 0;
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nanddev_bbt_in_flash_update);
282