xref: /OK3568_Linux_fs/kernel/drivers/rkflash/sfc_nand_mtd_bbt.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2017 Free Electrons
4  *
5  * Authors:
6  *	Boris Brezillon <boris.brezillon@free-electrons.com>
7  *	Peter Pan <peterpandong@micron.com>
8  */
9 
10 #include <linux/mtd/mtd.h>
11 #include <linux/slab.h>
12 
13 #include "sfc_nand.h"
14 #include "sfc_nand_mtd.h"
15 
16 #ifdef CONFIG_MTD_NAND_BBT_USING_FLASH
17 
18 #ifdef BBT_DEBUG
19 #define BBT_DBG pr_err
20 #else
21 #define BBT_DBG(args...)
22 #endif
23 
24 struct nanddev_bbt_info {
25 	u8 pattern[4];
26 	unsigned int version;
27 };
28 
29 static u8 bbt_pattern[] = {'B', 'b', 't', '0' };
30 
31 /**
32  * nanddev_read_bbt() - Read the BBT (Bad Block Table)
33  * @nand: NAND device
34  * @block: bbt block address
35  * @update: true - get version and overwrite bbt.cache with new version;
36  *	false - get bbt version only;
37  *
38  * Initialize the in-memory BBT.
39  *
40  * Return: 0 in case of success, a negative error code otherwise.
41  */
nanddev_read_bbt(struct snand_mtd_dev * nand,u32 block,bool update)42 static int nanddev_read_bbt(struct snand_mtd_dev *nand, u32 block, bool update)
43 {
44 	unsigned int bits_per_block = fls(NAND_BBT_BLOCK_NUM_STATUS);
45 	unsigned int nblocks = snanddev_neraseblocks(nand);
46 	unsigned int nbytes = DIV_ROUND_UP(nblocks * bits_per_block,
47 					   BITS_PER_LONG) * sizeof(*nand->bbt.cache);
48 	struct mtd_info *mtd = snanddev_to_mtd(nand);
49 	u8 *data_buf, *oob_buf, *temp_buf;
50 	struct nanddev_bbt_info *bbt_info;
51 	struct mtd_oob_ops ops;
52 	u32 bbt_page_num;
53 	int ret = 0;
54 	unsigned int version = 0;
55 	u32 page_addr, i;
56 
57 	if (!nand->bbt.cache)
58 		return -ENOMEM;
59 
60 	if (block >= nblocks)
61 		return -EINVAL;
62 
63 	/* aligned to page size, and even pages is better */
64 	bbt_page_num = (sizeof(struct nanddev_bbt_info) + nbytes +
65 		mtd->writesize - 1) >> mtd->writesize_shift;
66 	bbt_page_num = (bbt_page_num + 1) / 2 * 2;
67 	data_buf = kzalloc(bbt_page_num * mtd->writesize, GFP_KERNEL);
68 	if (!data_buf)
69 		return -ENOMEM;
70 	oob_buf = kzalloc(bbt_page_num * mtd->oobsize, GFP_KERNEL);
71 	if (!oob_buf) {
72 		kfree(data_buf);
73 
74 		return -ENOMEM;
75 	}
76 
77 	bbt_info = (struct nanddev_bbt_info *)(data_buf + nbytes);
78 
79 	memset(&ops, 0, sizeof(struct mtd_oob_ops));
80 	ops.mode = MTD_OPS_PLACE_OOB;
81 	ops.datbuf = data_buf;
82 	ops.len = bbt_page_num * mtd->writesize;
83 	ops.oobbuf = oob_buf;
84 	ops.ooblen = bbt_page_num * mtd->oobsize;
85 	ops.ooboffs = 0;
86 
87 	/* Store one entry for each block */
88 	temp_buf = kzalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
89 	if (!temp_buf) {
90 		kfree(data_buf);
91 		kfree(oob_buf);
92 
93 		return -ENOMEM;
94 	}
95 	page_addr = (u32)(block << (mtd->erasesize_shift - mtd->writesize_shift));
96 	for (i = 0; i < bbt_page_num; i++) {
97 		ret = sfc_nand_read_page_raw(0, page_addr + i, (u32 *)temp_buf);
98 		if (ret < 0) {
99 			pr_err("%s fail %d\n", __func__, ret);
100 			ret = -EIO;
101 			kfree(temp_buf);
102 			goto out;
103 		}
104 
105 		memcpy(ops.datbuf + i * mtd->writesize, temp_buf, mtd->writesize);
106 		memcpy(ops.oobbuf + i * mtd->oobsize, temp_buf + mtd->writesize, mtd->oobsize);
107 	}
108 	kfree(temp_buf);
109 
110 	if (oob_buf[0] != 0xff && !memcmp(bbt_pattern, bbt_info->pattern, 4))
111 		version = bbt_info->version;
112 
113 	BBT_DBG("read_bbt from blk=%d tag=%d ver=%d\n", block, update, version);
114 	if (update && version > nand->bbt.version) {
115 		memcpy(nand->bbt.cache, data_buf, nbytes);
116 		nand->bbt.version = version;
117 	}
118 
119 out:
120 	kfree(data_buf);
121 	kfree(oob_buf);
122 
123 	return ret < 0 ? -EIO : (int)version;
124 }
125 
nanddev_write_bbt(struct snand_mtd_dev * nand,u32 block)126 static int nanddev_write_bbt(struct snand_mtd_dev *nand, u32 block)
127 {
128 	unsigned int bits_per_block = fls(NAND_BBT_BLOCK_NUM_STATUS);
129 	unsigned int nblocks = snanddev_neraseblocks(nand);
130 	unsigned int nbytes = DIV_ROUND_UP(nblocks * bits_per_block,
131 					   BITS_PER_LONG) * sizeof(*nand->bbt.cache);
132 	struct mtd_info *mtd = snanddev_to_mtd(nand);
133 	u8 *data_buf, *oob_buf, *temp_buf;
134 	struct nanddev_bbt_info *bbt_info;
135 	struct mtd_oob_ops ops;
136 	u32 bbt_page_num;
137 	int ret = 0;
138 	u32 page_addr, i;
139 
140 	BBT_DBG("write_bbt to blk=%d ver=%d\n", block, nand->bbt.version);
141 	if (!nand->bbt.cache)
142 		return -ENOMEM;
143 
144 	if (block >= nblocks)
145 		return -EINVAL;
146 
147 	/* aligned to page size, and even pages is better */
148 	bbt_page_num = (sizeof(struct nanddev_bbt_info) + nbytes +
149 		mtd->writesize - 1) >> mtd->writesize_shift;
150 	bbt_page_num = (bbt_page_num + 1) / 2 * 2;
151 
152 	data_buf = kzalloc(bbt_page_num * mtd->writesize, GFP_KERNEL);
153 	if (!data_buf)
154 		return -ENOMEM;
155 	oob_buf = kzalloc(bbt_page_num * mtd->oobsize, GFP_KERNEL);
156 	if (!oob_buf) {
157 		kfree(data_buf);
158 
159 		return -ENOMEM;
160 	}
161 
162 	bbt_info = (struct nanddev_bbt_info *)(data_buf + nbytes);
163 
164 	memcpy(data_buf, nand->bbt.cache, nbytes);
165 	memcpy(bbt_info, bbt_pattern, 4);
166 	bbt_info->version = nand->bbt.version;
167 
168 	/* Store one entry for each block */
169 	ret = sfc_nand_erase_mtd(mtd, block * mtd->erasesize);
170 	if (ret)
171 		goto out;
172 
173 	memset(&ops, 0, sizeof(struct mtd_oob_ops));
174 	ops.datbuf = data_buf;
175 	ops.len = bbt_page_num * mtd->writesize;
176 	ops.oobbuf = oob_buf;
177 	ops.ooblen = bbt_page_num * mtd->oobsize;
178 	ops.ooboffs = 0;
179 
180 	temp_buf = kzalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
181 	if (!temp_buf) {
182 		kfree(data_buf);
183 		kfree(oob_buf);
184 
185 		return -ENOMEM;
186 	}
187 	page_addr = (u32)(block << (mtd->erasesize_shift - mtd->writesize_shift));
188 	for (i = 0; i < bbt_page_num; i++) {
189 		memcpy(temp_buf, ops.datbuf + i * mtd->writesize, mtd->writesize);
190 		memcpy(temp_buf + mtd->writesize, ops.oobbuf + i * mtd->oobsize, mtd->oobsize);
191 
192 		ret = sfc_nand_prog_page_raw(0, page_addr + i, (u32 *)temp_buf);
193 		if (ret < 0) {
194 			pr_err("%s fail %d\n", __func__, ret);
195 			ret = -EIO;
196 			kfree(temp_buf);
197 			goto out;
198 		}
199 	}
200 	kfree(temp_buf);
201 
202 out:
203 	kfree(data_buf);
204 	kfree(oob_buf);
205 
206 	return ret;
207 }
208 
nanddev_bbt_format(struct snand_mtd_dev * nand)209 static int nanddev_bbt_format(struct snand_mtd_dev *nand)
210 {
211 	unsigned int nblocks = snanddev_neraseblocks(nand);
212 	struct mtd_info *mtd = snanddev_to_mtd(nand);
213 	u32 start_block, block;
214 
215 	start_block = nblocks - NANDDEV_BBT_SCAN_MAXBLOCKS;
216 
217 	for (block = 0; block < nblocks; block++) {
218 		if (sfc_nand_isbad_mtd(mtd, block * mtd->erasesize))
219 			snanddev_bbt_set_block_status(nand, block,
220 						      NAND_BBT_BLOCK_FACTORY_BAD);
221 	}
222 
223 	for (block = 0; block < NANDDEV_BBT_SCAN_MAXBLOCKS; block++) {
224 		if (snanddev_bbt_get_block_status(nand, start_block + block) ==
225 			NAND_BBT_BLOCK_GOOD)
226 			snanddev_bbt_set_block_status(nand, start_block + block,
227 						      NAND_BBT_BLOCK_WORN);
228 	}
229 
230 	return 0;
231 }
232 
nanddev_scan_bbt(struct snand_mtd_dev * nand)233 static int nanddev_scan_bbt(struct snand_mtd_dev *nand)
234 {
235 	unsigned int nblocks = snanddev_neraseblocks(nand);
236 	u32 start_block, block;
237 	int ret = 0;
238 
239 	nand->bbt.version = 0;
240 	start_block = nblocks - NANDDEV_BBT_SCAN_MAXBLOCKS;
241 	for (block = 0; block < NANDDEV_BBT_SCAN_MAXBLOCKS; block++)
242 		nanddev_read_bbt(nand, start_block + block, true);
243 
244 	nand->bbt.option |= NANDDEV_BBT_SCANNED;
245 	if (nand->bbt.version == 0) {
246 		nanddev_bbt_format(nand);
247 		ret = snanddev_bbt_update(nand);
248 		if (ret) {
249 			nand->bbt.option = 0;
250 			pr_err("%s fail\n", __func__);
251 		}
252 	}
253 
254 	return ret;
255 }
256 
257 #endif
258 
259 /**
260  * nanddev_bbt_init() - Initialize the BBT (Bad Block Table)
261  * @nand: NAND device
262  *
263  * Initialize the in-memory BBT.
264  *
265  * Return: 0 in case of success, a negative error code otherwise.
266  */
snanddev_bbt_init(struct snand_mtd_dev * nand)267 int snanddev_bbt_init(struct snand_mtd_dev *nand)
268 {
269 	unsigned int bits_per_block = fls(NAND_BBT_BLOCK_NUM_STATUS);
270 	unsigned int nblocks = snanddev_neraseblocks(nand);
271 	unsigned int nwords = DIV_ROUND_UP(nblocks * bits_per_block,
272 					   BITS_PER_LONG);
273 
274 	nand->bbt.cache = kcalloc(nwords, sizeof(*nand->bbt.cache),
275 				  GFP_KERNEL);
276 	if (!nand->bbt.cache)
277 		return -ENOMEM;
278 
279 	return 0;
280 }
281 EXPORT_SYMBOL_GPL(snanddev_bbt_init);
282 
283 /**
284  * nanddev_bbt_cleanup() - Cleanup the BBT (Bad Block Table)
285  * @nand: NAND device
286  *
287  * Undoes what has been done in nanddev_bbt_init()
288  */
snanddev_bbt_cleanup(struct snand_mtd_dev * nand)289 void snanddev_bbt_cleanup(struct snand_mtd_dev *nand)
290 {
291 	kfree(nand->bbt.cache);
292 }
293 EXPORT_SYMBOL_GPL(snanddev_bbt_cleanup);
294 
295 /**
296  * nanddev_bbt_update() - Update a BBT
297  * @nand: nand device
298  *
299  * Update the BBT. Currently a NOP function since on-flash bbt is not yet
300  * supported.
301  *
302  * Return: 0 in case of success, a negative error code otherwise.
303  */
snanddev_bbt_update(struct snand_mtd_dev * nand)304 int snanddev_bbt_update(struct snand_mtd_dev *nand)
305 {
306 #ifdef CONFIG_MTD_NAND_BBT_USING_FLASH
307 	if (nand->bbt.cache &&
308 	    nand->bbt.option & NANDDEV_BBT_USE_FLASH) {
309 		unsigned int nblocks = snanddev_neraseblocks(nand);
310 		u32 bbt_version[NANDDEV_BBT_SCAN_MAXBLOCKS];
311 		int start_block, block;
312 		u32 min_version, block_des;
313 		int ret, count = 0;
314 
315 		start_block = nblocks - NANDDEV_BBT_SCAN_MAXBLOCKS;
316 		for (block = 0; block < NANDDEV_BBT_SCAN_MAXBLOCKS; block++) {
317 			ret = snanddev_bbt_get_block_status(nand, start_block + block);
318 			if (ret == NAND_BBT_BLOCK_FACTORY_BAD) {
319 				bbt_version[block] = 0xFFFFFFFF;
320 				continue;
321 			}
322 			ret = nanddev_read_bbt(nand, start_block + block,
323 					       false);
324 			if (ret < 0)
325 				bbt_version[block] = 0xFFFFFFFF;
326 			else if (ret == 0)
327 				bbt_version[block] = 0;
328 			else
329 				bbt_version[block] = ret;
330 		}
331 get_min_ver:
332 		min_version = 0xFFFFFFFF;
333 		block_des = 0;
334 		for (block = 0; block < NANDDEV_BBT_SCAN_MAXBLOCKS; block++) {
335 			if (bbt_version[block] < min_version) {
336 				min_version = bbt_version[block];
337 				block_des = start_block + block;
338 			}
339 		}
340 
341 		if (block_des > 0) {
342 			nand->bbt.version++;
343 			ret = nanddev_write_bbt(nand, block_des);
344 			bbt_version[block_des - start_block] = 0xFFFFFFFF;
345 			if (ret) {
346 				pr_err("%s blk= %d ret= %d\n", __func__,
347 				       block_des, ret);
348 				goto get_min_ver;
349 			} else {
350 				count++;
351 				if (count < 2)
352 					goto get_min_ver;
353 				BBT_DBG("%s success\n", __func__);
354 			}
355 		} else {
356 			pr_err("%s failed\n", __func__);
357 
358 			return -1;
359 		}
360 	}
361 #endif
362 	return 0;
363 }
364 EXPORT_SYMBOL_GPL(snanddev_bbt_update);
365 
366 /**
367  * nanddev_bbt_get_block_status() - Return the status of an eraseblock
368  * @nand: nand device
369  * @entry: the BBT entry
370  *
371  * Return: a positive number nand_bbt_block_status status or -%ERANGE if @entry
372  *	   is bigger than the BBT size.
373  */
snanddev_bbt_get_block_status(const struct snand_mtd_dev * nand,unsigned int entry)374 int snanddev_bbt_get_block_status(const struct snand_mtd_dev *nand,
375 				  unsigned int entry)
376 {
377 	unsigned int bits_per_block = fls(NAND_BBT_BLOCK_NUM_STATUS);
378 	unsigned long *pos = nand->bbt.cache +
379 			     ((entry * bits_per_block) / BITS_PER_LONG);
380 	unsigned int offs = (entry * bits_per_block) % BITS_PER_LONG;
381 	unsigned long status;
382 
383 #ifdef CONFIG_MTD_NAND_BBT_USING_FLASH
384 	if (nand->bbt.option & NANDDEV_BBT_USE_FLASH &&
385 	    !(nand->bbt.option & NANDDEV_BBT_SCANNED))
386 		nanddev_scan_bbt((struct snand_mtd_dev *)nand);
387 #endif
388 
389 	if (entry >= snanddev_neraseblocks(nand))
390 		return -ERANGE;
391 
392 	status = pos[0] >> offs;
393 	if (bits_per_block + offs > BITS_PER_LONG)
394 		status |= pos[1] << (BITS_PER_LONG - offs);
395 
396 	return status & GENMASK(bits_per_block - 1, 0);
397 }
398 EXPORT_SYMBOL_GPL(snanddev_bbt_get_block_status);
399 
400 /**
401  * nanddev_bbt_set_block_status() - Update the status of an eraseblock in the
402  *				    in-memory BBT
403  * @nand: nand device
404  * @entry: the BBT entry to update
405  * @status: the new status
406  *
407  * Update an entry of the in-memory BBT. If you want to push the updated BBT
408  * the NAND you should call nanddev_bbt_update().
409  *
410  * Return: 0 in case of success or -%ERANGE if @entry is bigger than the BBT
411  *	   size.
412  */
snanddev_bbt_set_block_status(struct snand_mtd_dev * nand,unsigned int entry,enum nand_bbt_block_status status)413 int snanddev_bbt_set_block_status(struct snand_mtd_dev *nand,
414 				  unsigned int entry,
415 				  enum nand_bbt_block_status status)
416 {
417 	unsigned int bits_per_block = fls(NAND_BBT_BLOCK_NUM_STATUS);
418 	unsigned long *pos = nand->bbt.cache +
419 			     ((entry * bits_per_block) / BITS_PER_LONG);
420 	unsigned int offs = (entry * bits_per_block) % BITS_PER_LONG;
421 	unsigned long val = status & GENMASK(bits_per_block - 1, 0);
422 
423 	if (entry >= snanddev_neraseblocks(nand))
424 		return -ERANGE;
425 
426 	if (offs + bits_per_block - 1 > (BITS_PER_LONG - 1))
427 		pos[0] &= ~GENMASK(BITS_PER_LONG - 1, offs);
428 	else
429 		pos[0] &= ~GENMASK(offs + bits_per_block - 1, offs);
430 	pos[0] |= val << offs;
431 
432 	if (bits_per_block + offs > BITS_PER_LONG) {
433 		unsigned int rbits = BITS_PER_LONG - offs;
434 
435 		pos[1] &= ~GENMASK(bits_per_block - rbits - 1, 0);
436 		pos[1] |= val >> rbits;
437 	}
438 
439 	return 0;
440 }
441 EXPORT_SYMBOL_GPL(snanddev_bbt_set_block_status);
442