1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2017 Free Electrons 4 * 5 * Authors: 6 * Boris Brezillon <boris.brezillon@free-electrons.com> 7 * Peter Pan <peterpandong@micron.com> 8 */ 9 10 #define pr_fmt(fmt) "nand-bbt: " fmt 11 12 #include <linux/mtd/nand.h> 13 #ifndef __UBOOT__ 14 #include <linux/slab.h> 15 #endif 16 17 #ifdef CONFIG_MTD_NAND_BBT_USING_FLASH 18 19 #ifdef BBT_DEBUG 20 #define BBT_DBG pr_err 21 #else 22 #define BBT_DBG(args...) 23 #endif 24 25 struct nanddev_bbt_info { 26 u8 pattern[4]; 27 unsigned int version; 28 }; 29 30 static u8 bbt_pattern[] = {'B', 'b', 't', '0' }; 31 32 /** 33 * nanddev_read_bbt() - Read the BBT (Bad Block Table) 34 * @nand: NAND device 35 * @block: bbt block address 36 * @update: true - get version and overwrite bbt.cache with new version; 37 * false - get bbt version only; 38 * 39 * Initialize the in-memory BBT. 40 * 41 * Return: 0 in case of success, a negative error code otherwise. 42 */ 43 static int nanddev_read_bbt(struct nand_device *nand, u32 block, bool update) 44 { 45 unsigned int bits_per_block = fls(NAND_BBT_BLOCK_NUM_STATUS); 46 unsigned int nblocks = nanddev_neraseblocks(nand); 47 unsigned int nbytes = DIV_ROUND_UP(nblocks * bits_per_block, 48 BITS_PER_LONG) * sizeof(*nand->bbt.cache); 49 struct mtd_info *mtd = nanddev_to_mtd(nand); 50 u8 *data_buf, *oob_buf; 51 struct nanddev_bbt_info *bbt_info; 52 struct mtd_oob_ops ops; 53 int bbt_page_num; 54 int ret = 0; 55 unsigned int version = 0; 56 57 if (!nand->bbt.cache) 58 return -ENOMEM; 59 60 if (block >= nblocks) 61 return -EINVAL; 62 63 /* aligned to page size, and even pages is better */ 64 bbt_page_num = (sizeof(struct nanddev_bbt_info) + nbytes + 65 mtd->writesize - 1) >> mtd->writesize_shift; 66 bbt_page_num = (bbt_page_num + 1) / 2 * 2; 67 data_buf = kzalloc(bbt_page_num * mtd->writesize, GFP_KERNEL); 68 if (!data_buf) 69 return -ENOMEM; 70 oob_buf = kzalloc(bbt_page_num * mtd->oobsize, GFP_KERNEL); 71 if (!oob_buf) { 72 kfree(data_buf); 73 74 return -ENOMEM; 75 } 76 77 bbt_info = (struct nanddev_bbt_info *)(data_buf + nbytes); 78 79 memset(&ops, 0, sizeof(struct mtd_oob_ops)); 80 ops.mode = MTD_OPS_PLACE_OOB; 81 ops.datbuf = data_buf; 82 ops.len = bbt_page_num * mtd->writesize; 83 ops.oobbuf = oob_buf; 84 ops.ooblen = bbt_page_num * mtd->oobsize; 85 ops.ooboffs = 0; 86 87 /* Store one entry for each block */ 88 ret = mtd_read_oob(mtd, block * mtd->erasesize, &ops); 89 if (ret && ret != -EUCLEAN) { 90 pr_err("%s fail %d\n", __func__, ret); 91 ret = -EIO; 92 goto out; 93 } else { 94 ret = 0; 95 } 96 97 if (oob_buf[0] != 0xff && !memcmp(bbt_pattern, bbt_info->pattern, 4)) 98 version = bbt_info->version; 99 100 BBT_DBG("read_bbt from blk=%d tag=%d ver=%d\n", block, update, version); 101 if (update && version > nand->bbt.version) { 102 memcpy(nand->bbt.cache, data_buf, nbytes); 103 nand->bbt.version = version; 104 } 105 106 #ifdef BBT_DEBUG 107 if (version) { 108 u8 *temp_buf = kzalloc(bbt_page_num * mtd->writesize, GFP_KERNEL); 109 110 memcpy(temp_buf, nand->bbt.cache, nbytes); 111 memcpy(nand->bbt.cache, data_buf, nbytes); 112 113 nand->bbt.option |= NANDDEV_BBT_SCANNED; 114 for (block = 0; block < nblocks; block++) { 115 ret = nanddev_bbt_get_block_status(nand, block); 116 if (ret != NAND_BBT_BLOCK_GOOD) 117 BBT_DBG("bad block[0x%x], ret=%d\n", block, ret); 118 } 119 120 nand->bbt.option &= ~NANDDEV_BBT_SCANNED; 121 memcpy(nand->bbt.cache, temp_buf, nbytes); 122 kfree(temp_buf); 123 } 124 #endif 125 126 out: 127 kfree(data_buf); 128 kfree(oob_buf); 129 130 return ret < 0 ? -EIO : version; 131 } 132 133 static int nanddev_write_bbt(struct nand_device *nand, u32 block) 134 { 135 unsigned int bits_per_block = fls(NAND_BBT_BLOCK_NUM_STATUS); 136 unsigned int nblocks = nanddev_neraseblocks(nand); 137 unsigned int nbytes = DIV_ROUND_UP(nblocks * bits_per_block, 138 BITS_PER_LONG) * sizeof(*nand->bbt.cache); 139 struct mtd_info *mtd = nanddev_to_mtd(nand); 140 u8 *data_buf, *oob_buf; 141 struct nanddev_bbt_info *bbt_info; 142 struct mtd_oob_ops ops; 143 int bbt_page_num; 144 int ret = 0; 145 struct nand_pos pos; 146 147 BBT_DBG("write_bbt to blk=%d ver=%d\n", block, nand->bbt.version); 148 if (!nand->bbt.cache) 149 return -ENOMEM; 150 151 if (block >= nblocks) 152 return -EINVAL; 153 154 /* aligned to page size, and even pages is better */ 155 bbt_page_num = (sizeof(struct nanddev_bbt_info) + nbytes + 156 mtd->writesize - 1) >> mtd->writesize_shift; 157 bbt_page_num = (bbt_page_num + 1) / 2 * 2; 158 159 data_buf = kzalloc(bbt_page_num * mtd->writesize, GFP_KERNEL); 160 if (!data_buf) 161 return -ENOMEM; 162 oob_buf = kzalloc(bbt_page_num * mtd->oobsize, GFP_KERNEL); 163 if (!oob_buf) { 164 kfree(data_buf); 165 166 return -ENOMEM; 167 } 168 169 bbt_info = (struct nanddev_bbt_info *)(data_buf + nbytes); 170 171 memcpy(data_buf, nand->bbt.cache, nbytes); 172 memcpy(bbt_info, bbt_pattern, 4); 173 bbt_info->version = nand->bbt.version; 174 175 /* Store one entry for each block */ 176 nanddev_offs_to_pos(nand, block * mtd->erasesize, &pos); 177 ret = nand->ops->erase(nand, &pos); 178 if (ret) 179 goto out; 180 181 memset(&ops, 0, sizeof(struct mtd_oob_ops)); 182 ops.mode = MTD_OPS_PLACE_OOB; 183 ops.datbuf = data_buf; 184 ops.len = bbt_page_num * mtd->writesize; 185 ops.oobbuf = oob_buf; 186 ops.ooblen = bbt_page_num * mtd->oobsize; 187 ops.ooboffs = 0; 188 ret = mtd_write_oob(mtd, block * mtd->erasesize, &ops); 189 190 out: 191 kfree(data_buf); 192 kfree(oob_buf); 193 194 return ret; 195 } 196 197 static __maybe_unused int nanddev_bbt_format(struct nand_device *nand) 198 { 199 unsigned int nblocks = nanddev_neraseblocks(nand); 200 struct mtd_info *mtd = nanddev_to_mtd(nand); 201 struct nand_pos pos; 202 u32 start_block, block; 203 204 start_block = nblocks - NANDDEV_BBT_SCAN_MAXBLOCKS; 205 206 /* 207 * Nand Flash factory defined block0 fixed as good block. 208 * Do not scan block0 to avoid extreme boundary issues caused by 209 * scanning anomalies on that block. 210 */ 211 nanddev_bbt_set_block_status(nand, 0, NAND_BBT_BLOCK_GOOD); 212 213 for (block = 1; block < nblocks; block++) { 214 nanddev_offs_to_pos(nand, block * mtd->erasesize, &pos); 215 if (nanddev_isbad(nand, &pos)) 216 nanddev_bbt_set_block_status(nand, block, 217 NAND_BBT_BLOCK_FACTORY_BAD); 218 } 219 220 for (block = 0; block < NANDDEV_BBT_SCAN_MAXBLOCKS; block++) { 221 if (nanddev_bbt_get_block_status(nand, start_block + block) == 222 NAND_BBT_BLOCK_GOOD) 223 nanddev_bbt_set_block_status(nand, start_block + block, 224 NAND_BBT_BLOCK_WORN); 225 } 226 227 return 0; 228 } 229 230 static int nanddev_scan_bbt(struct nand_device *nand) 231 { 232 unsigned int nblocks = nanddev_neraseblocks(nand); 233 u32 start_block, block; 234 int ret = 0; 235 236 nand->bbt.version = 0; 237 start_block = nblocks - NANDDEV_BBT_SCAN_MAXBLOCKS; 238 for (block = 0; block < NANDDEV_BBT_SCAN_MAXBLOCKS; block++) 239 nanddev_read_bbt(nand, start_block + block, true); 240 241 nand->bbt.option |= NANDDEV_BBT_SCANNED; 242 #ifndef CONFIG_SPL_BUILD 243 if (nand->bbt.version == 0) { 244 nanddev_bbt_format(nand); 245 ret = nanddev_bbt_update(nand); 246 if (ret) { 247 nand->bbt.option = 0; 248 pr_err("%s fail\n", __func__); 249 } 250 } 251 #endif 252 253 return ret; 254 } 255 #endif 256 257 /** 258 * nanddev_bbt_init() - Initialize the BBT (Bad Block Table) 259 * @nand: NAND device 260 * 261 * Initialize the in-memory BBT. 262 * 263 * Return: 0 in case of success, a negative error code otherwise. 264 */ 265 int nanddev_bbt_init(struct nand_device *nand) 266 { 267 unsigned int bits_per_block = fls(NAND_BBT_BLOCK_NUM_STATUS); 268 unsigned int nblocks = nanddev_neraseblocks(nand); 269 unsigned int nwords = DIV_ROUND_UP(nblocks * bits_per_block, 270 BITS_PER_LONG); 271 272 nand->bbt.cache = kcalloc(nwords, sizeof(*nand->bbt.cache), 273 GFP_KERNEL); 274 if (!nand->bbt.cache) 275 return -ENOMEM; 276 277 return 0; 278 } 279 EXPORT_SYMBOL_GPL(nanddev_bbt_init); 280 281 /** 282 * nanddev_bbt_cleanup() - Cleanup the BBT (Bad Block Table) 283 * @nand: NAND device 284 * 285 * Undoes what has been done in nanddev_bbt_init() 286 */ 287 void nanddev_bbt_cleanup(struct nand_device *nand) 288 { 289 kfree(nand->bbt.cache); 290 } 291 EXPORT_SYMBOL_GPL(nanddev_bbt_cleanup); 292 293 /** 294 * nanddev_bbt_update() - Update a BBT 295 * @nand: nand device 296 * 297 * Update the BBT. Currently a NOP function since on-flash bbt is not yet 298 * supported. 299 * 300 * Return: 0 in case of success, a negative error code otherwise. 301 */ 302 int nanddev_bbt_update(struct nand_device *nand) 303 { 304 #ifdef CONFIG_MTD_NAND_BBT_USING_FLASH 305 if (nand->bbt.cache && 306 nand->bbt.option & NANDDEV_BBT_USE_FLASH) { 307 unsigned int nblocks = nanddev_neraseblocks(nand); 308 u32 bbt_version[NANDDEV_BBT_SCAN_MAXBLOCKS]; 309 int start_block, block; 310 u32 min_version, block_des; 311 int ret, count = 0; 312 313 start_block = nblocks - NANDDEV_BBT_SCAN_MAXBLOCKS; 314 for (block = 0; block < NANDDEV_BBT_SCAN_MAXBLOCKS; block++) { 315 ret = nanddev_bbt_get_block_status(nand, start_block + block); 316 if (ret == NAND_BBT_BLOCK_FACTORY_BAD) { 317 bbt_version[block] = 0xFFFFFFFF; 318 continue; 319 } 320 ret = nanddev_read_bbt(nand, start_block + block, 321 false); 322 if (ret < 0) 323 bbt_version[block] = 0xFFFFFFFF; 324 else if (ret == 0) 325 bbt_version[block] = 0; 326 else 327 bbt_version[block] = ret; 328 } 329 get_min_ver: 330 min_version = 0xFFFFFFFF; 331 block_des = 0; 332 for (block = 0; block < NANDDEV_BBT_SCAN_MAXBLOCKS; block++) { 333 if (bbt_version[block] < min_version) { 334 min_version = bbt_version[block]; 335 block_des = start_block + block; 336 } 337 } 338 339 if (block_des > 0) { 340 nand->bbt.version++; 341 ret = nanddev_write_bbt(nand, block_des); 342 bbt_version[block_des - start_block] = 0xFFFFFFFF; 343 if (ret) { 344 pr_err("%s blk= %d ret= %d\n", __func__, 345 block_des, ret); 346 goto get_min_ver; 347 } else { 348 count++; 349 if (count < 2) 350 goto get_min_ver; 351 BBT_DBG("%s success\n", __func__); 352 } 353 } else { 354 pr_err("%s failed\n", __func__); 355 356 return -1; 357 } 358 } 359 #endif 360 return 0; 361 } 362 EXPORT_SYMBOL_GPL(nanddev_bbt_update); 363 364 /** 365 * nanddev_bbt_get_block_status() - Return the status of an eraseblock 366 * @nand: nand device 367 * @entry: the BBT entry 368 * 369 * Return: a positive number nand_bbt_block_status status or -%ERANGE if @entry 370 * is bigger than the BBT size. 371 */ 372 int nanddev_bbt_get_block_status(const struct nand_device *nand, 373 unsigned int entry) 374 { 375 unsigned int bits_per_block = fls(NAND_BBT_BLOCK_NUM_STATUS); 376 unsigned long *pos = nand->bbt.cache + 377 ((entry * bits_per_block) / BITS_PER_LONG); 378 unsigned int offs = (entry * bits_per_block) % BITS_PER_LONG; 379 unsigned long status; 380 381 #ifdef CONFIG_MTD_NAND_BBT_USING_FLASH 382 if (nand->bbt.option & NANDDEV_BBT_USE_FLASH && 383 !(nand->bbt.option & NANDDEV_BBT_SCANNED)) 384 nanddev_scan_bbt((struct nand_device *)nand); 385 #endif 386 387 if (entry >= nanddev_neraseblocks(nand)) 388 return -ERANGE; 389 390 status = pos[0] >> offs; 391 if (bits_per_block + offs > BITS_PER_LONG) 392 status |= pos[1] << (BITS_PER_LONG - offs); 393 394 return status & GENMASK(bits_per_block - 1, 0); 395 } 396 EXPORT_SYMBOL_GPL(nanddev_bbt_get_block_status); 397 398 /** 399 * nanddev_bbt_set_block_status() - Update the status of an eraseblock in the 400 * in-memory BBT 401 * @nand: nand device 402 * @entry: the BBT entry to update 403 * @status: the new status 404 * 405 * Update an entry of the in-memory BBT. If you want to push the updated BBT 406 * the NAND you should call nanddev_bbt_update(). 407 * 408 * Return: 0 in case of success or -%ERANGE if @entry is bigger than the BBT 409 * size. 410 */ 411 int nanddev_bbt_set_block_status(struct nand_device *nand, unsigned int entry, 412 enum nand_bbt_block_status status) 413 { 414 unsigned int bits_per_block = fls(NAND_BBT_BLOCK_NUM_STATUS); 415 unsigned long *pos = nand->bbt.cache + 416 ((entry * bits_per_block) / BITS_PER_LONG); 417 unsigned int offs = (entry * bits_per_block) % BITS_PER_LONG; 418 unsigned long val = status & GENMASK(bits_per_block - 1, 0); 419 420 if (entry >= nanddev_neraseblocks(nand)) 421 return -ERANGE; 422 423 if (offs + bits_per_block - 1 > (BITS_PER_LONG - 1)) 424 pos[0] &= ~GENMASK(BITS_PER_LONG - 1, offs); 425 else 426 pos[0] &= ~GENMASK(offs + bits_per_block - 1, offs); 427 pos[0] |= val << offs; 428 429 if (bits_per_block + offs > BITS_PER_LONG) { 430 unsigned int rbits = BITS_PER_LONG - offs; 431 432 pos[1] &= ~GENMASK(bits_per_block - rbits - 1, 0); 433 pos[1] |= val >> rbits; 434 } 435 436 return 0; 437 } 438 EXPORT_SYMBOL_GPL(nanddev_bbt_set_block_status); 439