1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2017 Free Electrons 4 * 5 * Authors: 6 * Boris Brezillon <boris.brezillon@free-electrons.com> 7 * Peter Pan <peterpandong@micron.com> 8 */ 9 10 #define pr_fmt(fmt) "nand-bbt: " fmt 11 12 #include <linux/mtd/nand.h> 13 #ifndef __UBOOT__ 14 #include <linux/slab.h> 15 #endif 16 17 #ifdef CONFIG_MTD_NAND_BBT_USING_FLASH 18 19 #ifdef BBT_DEBUG 20 #define bbt_dbg pr_err 21 #else 22 #define bbt_dbg(args...) 23 #endif 24 25 #define BBT_VERSION_INVALID (0xFFFFFFFFU) 26 #define BBT_VERSION_BLOCK_ABNORMAL (BBT_VERSION_INVALID - 1) 27 #define BBT_VERSION_MAX (BBT_VERSION_INVALID - 8) 28 29 struct nanddev_bbt_info { 30 u8 pattern[4]; 31 unsigned int version; 32 u32 hash; 33 }; 34 35 static u8 bbt_pattern[] = {'B', 'b', 't', '0' }; 36 37 #if defined(BBT_DEBUG) && defined(BBT_DEBUG_DUMP) 38 static void bbt_dbg_hex(char *s, void *buf, u32 len) 39 { 40 u32 i, j = 0; 41 u32 *p32 = (u32 *)buf; 42 43 for (i = 0; i < len / 4; i++) { 44 if (j == 0) 45 printf("%s %p + 0x%04x: ", s, buf, i * 4); 46 47 printf("0x%08x,", p32[i]); 48 49 if (++j >= (4)) { 50 j = 0; 51 printf("\n"); 52 } 53 } 54 printf("\n"); 55 } 56 #endif 57 58 static u32 js_hash(u8 *buf, u32 len) 59 { 60 u32 hash = 0x47C6A7E6; 61 u32 i; 62 63 for (i = 0; i < len; i++) 64 hash ^= ((hash << 5) + buf[i] + (hash >> 2)); 65 66 return hash; 67 } 68 69 static bool bbt_check_hash(u8 *buf, u32 len, u32 hash_cmp) 70 { 71 u32 hash; 72 73 /* compatible with no-hash version */ 74 if (hash_cmp == 0 || hash_cmp == 0xFFFFFFFF) 75 return 1; 76 77 hash = js_hash(buf, len); 78 if (hash != hash_cmp) 79 return 0; 80 81 return 1; 82 } 83 84 static u32 bbt_nand_isbad_bypass(struct nand_device *nand, u32 block) 85 { 86 struct mtd_info *mtd = nanddev_to_mtd(nand); 87 struct nand_pos pos; 88 89 nanddev_bbt_set_block_status(nand, block, NAND_BBT_BLOCK_STATUS_UNKNOWN); 90 nanddev_offs_to_pos(nand, block * mtd->erasesize, &pos); 91 92 return nanddev_isbad(nand, &pos); 93 } 94 95 /** 96 * nanddev_read_bbt() - Read the BBT (Bad Block Table) 97 * @nand: NAND device 98 * @block: bbt block address 99 * @update: true - get version and overwrite bbt.cache with new version; 100 * false - get bbt version only; 101 * 102 * Initialize the in-memory BBT. 103 * 104 * Return: 0 in case of success, a negative error code otherwise. 105 */ 106 static int nanddev_read_bbt(struct nand_device *nand, u32 block, bool update) 107 { 108 unsigned int bits_per_block = fls(NAND_BBT_BLOCK_NUM_STATUS); 109 unsigned int nblocks = nanddev_neraseblocks(nand); 110 unsigned int nbytes = DIV_ROUND_UP(nblocks * bits_per_block, 111 BITS_PER_LONG) * sizeof(*nand->bbt.cache); 112 struct mtd_info *mtd = nanddev_to_mtd(nand); 113 u8 *data_buf, *oob_buf; 114 struct nanddev_bbt_info *bbt_info; 115 struct mtd_oob_ops ops; 116 u32 bbt_page_num; 117 int ret = 0; 118 unsigned int version = 0; 119 120 if (!nand->bbt.cache) 121 return -ENOMEM; 122 123 if (block >= nblocks) 124 return -EINVAL; 125 126 /* aligned to page size, and even pages is better */ 127 bbt_page_num = (sizeof(struct nanddev_bbt_info) + nbytes + 128 mtd->writesize - 1) >> mtd->writesize_shift; 129 bbt_page_num = (bbt_page_num + 1) / 2 * 2; 130 data_buf = kzalloc(bbt_page_num * mtd->writesize, GFP_KERNEL); 131 if (!data_buf) 132 return -ENOMEM; 133 oob_buf = kzalloc(bbt_page_num * mtd->oobsize, GFP_KERNEL); 134 if (!oob_buf) { 135 kfree(data_buf); 136 137 return -ENOMEM; 138 } 139 140 bbt_info = (struct nanddev_bbt_info *)(data_buf + nbytes); 141 142 memset(&ops, 0, sizeof(struct mtd_oob_ops)); 143 ops.mode = MTD_OPS_PLACE_OOB; 144 ops.datbuf = data_buf; 145 ops.len = bbt_page_num * mtd->writesize; 146 ops.oobbuf = oob_buf; 147 ops.ooblen = bbt_page_num * mtd->oobsize; 148 ops.ooboffs = 0; 149 150 /* Store one entry for each block */ 151 ret = mtd_read_oob(mtd, block * mtd->erasesize, &ops); 152 if (ret && ret != -EUCLEAN) { 153 pr_err("read_bbt blk=%d fail=%d update=%d\n", block, ret, update); 154 ret = 0; 155 version = BBT_VERSION_BLOCK_ABNORMAL; 156 goto out; 157 } else { 158 ret = 0; 159 } 160 161 /* bad block or good block without bbt */ 162 if (memcmp(bbt_pattern, bbt_info->pattern, 4)) { 163 ret = 0; 164 goto out; 165 } 166 167 /* good block with abnornal bbt */ 168 if (oob_buf[0] == 0xff || 169 !bbt_check_hash(data_buf, nbytes + sizeof(struct nanddev_bbt_info) - 4, bbt_info->hash)) { 170 pr_err("read_bbt check fail blk=%d ret=%d update=%d\n", block, ret, update); 171 ret = 0; 172 version = BBT_VERSION_BLOCK_ABNORMAL; 173 goto out; 174 } 175 176 /* good block with good bbt */ 177 version = bbt_info->version; 178 bbt_dbg("read_bbt from blk=%d ver=%d update=%d\n", block, version, update); 179 if (update && version > nand->bbt.version) { 180 memcpy(nand->bbt.cache, data_buf, nbytes); 181 nand->bbt.version = version; 182 } 183 184 #if defined(BBT_DEBUG) && defined(BBT_DEBUG_DUMP) 185 bbt_dbg_hex("bbt", data_buf, nbytes + sizeof(struct nanddev_bbt_info)); 186 if (version) { 187 u8 *temp_buf = kzalloc(bbt_page_num * mtd->writesize, GFP_KERNEL); 188 bool in_scan = nand->bbt.option & NANDDEV_BBT_SCANNED; 189 190 if (!temp_buf) 191 goto out; 192 193 memcpy(temp_buf, nand->bbt.cache, nbytes); 194 memcpy(nand->bbt.cache, data_buf, nbytes); 195 196 if (!in_scan) 197 nand->bbt.option |= NANDDEV_BBT_SCANNED; 198 for (block = 0; block < nblocks; block++) { 199 ret = nanddev_bbt_get_block_status(nand, block); 200 if (ret != NAND_BBT_BLOCK_GOOD) 201 bbt_dbg("bad block[0x%x], ret=%d\n", block, ret); 202 } 203 if (!in_scan) 204 nand->bbt.option &= ~NANDDEV_BBT_SCANNED; 205 memcpy(nand->bbt.cache, temp_buf, nbytes); 206 kfree(temp_buf); 207 ret = 0; 208 } 209 #endif 210 211 out: 212 kfree(data_buf); 213 kfree(oob_buf); 214 215 return ret < 0 ? -EIO : (int)version; 216 } 217 218 static int nanddev_write_bbt(struct nand_device *nand, u32 block) 219 { 220 unsigned int bits_per_block = fls(NAND_BBT_BLOCK_NUM_STATUS); 221 unsigned int nblocks = nanddev_neraseblocks(nand); 222 unsigned int nbytes = DIV_ROUND_UP(nblocks * bits_per_block, 223 BITS_PER_LONG) * sizeof(*nand->bbt.cache); 224 struct mtd_info *mtd = nanddev_to_mtd(nand); 225 u8 *data_buf, *oob_buf; 226 struct nanddev_bbt_info *bbt_info; 227 struct mtd_oob_ops ops; 228 u32 bbt_page_num; 229 int ret = 0, version; 230 struct nand_pos pos; 231 232 bbt_dbg("write_bbt to blk=%d ver=%d\n", block, nand->bbt.version); 233 if (!nand->bbt.cache) 234 return -ENOMEM; 235 236 if (block >= nblocks) 237 return -EINVAL; 238 239 /* aligned to page size, and even pages is better */ 240 bbt_page_num = (sizeof(struct nanddev_bbt_info) + nbytes + 241 mtd->writesize - 1) >> mtd->writesize_shift; 242 bbt_page_num = (bbt_page_num + 1) / 2 * 2; 243 244 data_buf = kzalloc(bbt_page_num * mtd->writesize, GFP_KERNEL); 245 if (!data_buf) 246 return -ENOMEM; 247 oob_buf = kzalloc(bbt_page_num * mtd->oobsize, GFP_KERNEL); 248 if (!oob_buf) { 249 kfree(data_buf); 250 251 return -ENOMEM; 252 } 253 254 bbt_info = (struct nanddev_bbt_info *)(data_buf + nbytes); 255 256 memcpy(data_buf, nand->bbt.cache, nbytes); 257 memcpy(bbt_info, bbt_pattern, 4); 258 bbt_info->version = nand->bbt.version; 259 bbt_info->hash = js_hash(data_buf, nbytes + sizeof(struct nanddev_bbt_info) - 4); 260 261 /* Store one entry for each block */ 262 nanddev_offs_to_pos(nand, block * mtd->erasesize, &pos); 263 ret = nand->ops->erase(nand, &pos); 264 if (ret) 265 goto out; 266 267 memset(&ops, 0, sizeof(struct mtd_oob_ops)); 268 ops.mode = MTD_OPS_PLACE_OOB; 269 ops.datbuf = data_buf; 270 ops.len = bbt_page_num * mtd->writesize; 271 ops.oobbuf = oob_buf; 272 ops.ooblen = bbt_page_num * mtd->oobsize; 273 ops.ooboffs = 0; 274 ret = mtd_write_oob(mtd, block * mtd->erasesize, &ops); 275 if (ret) { 276 nand->ops->erase(nand, &pos); 277 goto out; 278 } 279 280 version = nanddev_read_bbt(nand, block, false); 281 if (version != bbt_info->version) { 282 pr_err("bbt_write fail, blk=%d recheck fail %d-%d\n", 283 block, version, bbt_info->version); 284 nand->ops->erase(nand, &pos); 285 ret = -EIO; 286 } else { 287 ret = 0; 288 } 289 out: 290 kfree(data_buf); 291 kfree(oob_buf); 292 293 return ret; 294 } 295 296 static __maybe_unused int nanddev_bbt_format(struct nand_device *nand) 297 { 298 unsigned int nblocks = nanddev_neraseblocks(nand); 299 struct mtd_info *mtd = nanddev_to_mtd(nand); 300 struct nand_pos pos; 301 u32 start_block, block; 302 unsigned int bits_per_block = fls(NAND_BBT_BLOCK_NUM_STATUS); 303 unsigned int nwords = DIV_ROUND_UP(nblocks * bits_per_block, 304 BITS_PER_LONG); 305 306 start_block = nblocks - NANDDEV_BBT_SCAN_MAXBLOCKS; 307 308 for (block = 0; block < nblocks; block++) { 309 nanddev_offs_to_pos(nand, block * mtd->erasesize, &pos); 310 if (nanddev_isbad(nand, &pos)) { 311 if (bbt_nand_isbad_bypass(nand, 0)) { 312 memset(nand->bbt.cache, 0, nwords * sizeof(*nand->bbt.cache)); 313 pr_err("bbt_format fail, test good block %d fail\n", 0); 314 return -EIO; 315 } 316 317 if (!bbt_nand_isbad_bypass(nand, block)) { 318 memset(nand->bbt.cache, 0, nwords * sizeof(*nand->bbt.cache)); 319 pr_err("bbt_format fail, test bad block %d fail\n", block); 320 return -EIO; 321 } 322 323 nanddev_bbt_set_block_status(nand, block, 324 NAND_BBT_BLOCK_FACTORY_BAD); 325 } 326 } 327 328 for (block = 0; block < NANDDEV_BBT_SCAN_MAXBLOCKS; block++) { 329 if (nanddev_bbt_get_block_status(nand, start_block + block) == 330 NAND_BBT_BLOCK_GOOD) 331 nanddev_bbt_set_block_status(nand, start_block + block, 332 NAND_BBT_BLOCK_WORN); 333 } 334 335 return 0; 336 } 337 338 static int nanddev_scan_bbt(struct nand_device *nand) 339 { 340 unsigned int nblocks = nanddev_neraseblocks(nand); 341 u32 start_block, block; 342 int ret = 0; 343 344 nand->bbt.version = 0; 345 start_block = nblocks - NANDDEV_BBT_SCAN_MAXBLOCKS; 346 for (block = 0; block < NANDDEV_BBT_SCAN_MAXBLOCKS; block++) 347 nanddev_read_bbt(nand, start_block + block, true); 348 349 nand->bbt.option |= NANDDEV_BBT_SCANNED; 350 #ifndef CONFIG_SPL_BUILD 351 if (nand->bbt.version == 0) { 352 ret = nanddev_bbt_format(nand); 353 if (ret) { 354 nand->bbt.option = 0; 355 pr_err("%s format fail\n", __func__); 356 357 return ret; 358 } 359 360 ret = nanddev_bbt_update(nand); 361 if (ret) { 362 nand->bbt.option = 0; 363 pr_err("%s update fail\n", __func__); 364 365 return ret; 366 } 367 } 368 #endif 369 370 #if defined(BBT_DEBUG) 371 pr_err("scan_bbt success\n"); 372 if (nand->bbt.version) { 373 for (block = 0; block < nblocks; block++) { 374 ret = nanddev_bbt_get_block_status(nand, block); 375 if (ret != NAND_BBT_BLOCK_GOOD) 376 bbt_dbg("bad block[0x%x], ret=%d\n", block, ret); 377 } 378 } 379 #endif 380 381 return ret; 382 } 383 #endif 384 385 /** 386 * nanddev_bbt_init() - Initialize the BBT (Bad Block Table) 387 * @nand: NAND device 388 * 389 * Initialize the in-memory BBT. 390 * 391 * Return: 0 in case of success, a negative error code otherwise. 392 */ 393 int nanddev_bbt_init(struct nand_device *nand) 394 { 395 unsigned int bits_per_block = fls(NAND_BBT_BLOCK_NUM_STATUS); 396 unsigned int nblocks = nanddev_neraseblocks(nand); 397 unsigned int nwords = DIV_ROUND_UP(nblocks * bits_per_block, 398 BITS_PER_LONG); 399 400 nand->bbt.cache = kcalloc(nwords, sizeof(*nand->bbt.cache), 401 GFP_KERNEL); 402 if (!nand->bbt.cache) 403 return -ENOMEM; 404 405 return 0; 406 } 407 EXPORT_SYMBOL_GPL(nanddev_bbt_init); 408 409 /** 410 * nanddev_bbt_cleanup() - Cleanup the BBT (Bad Block Table) 411 * @nand: NAND device 412 * 413 * Undoes what has been done in nanddev_bbt_init() 414 */ 415 void nanddev_bbt_cleanup(struct nand_device *nand) 416 { 417 kfree(nand->bbt.cache); 418 } 419 EXPORT_SYMBOL_GPL(nanddev_bbt_cleanup); 420 421 /** 422 * nanddev_bbt_update() - Update a BBT 423 * @nand: nand device 424 * 425 * Update the BBT. Currently a NOP function since on-flash bbt is not yet 426 * supported. 427 * 428 * Return: 0 in case of success, a negative error code otherwise. 429 */ 430 int nanddev_bbt_update(struct nand_device *nand) 431 { 432 #ifdef CONFIG_MTD_NAND_BBT_USING_FLASH 433 struct nand_pos pos; 434 struct mtd_info *mtd = nanddev_to_mtd(nand); 435 436 if (nand->bbt.cache && 437 nand->bbt.option & NANDDEV_BBT_USE_FLASH) { 438 unsigned int nblocks = nanddev_neraseblocks(nand); 439 u32 bbt_version[NANDDEV_BBT_SCAN_MAXBLOCKS]; 440 int start_block, block; 441 u32 min_version, block_des; 442 int ret, count = 0, status; 443 444 start_block = nblocks - NANDDEV_BBT_SCAN_MAXBLOCKS; 445 for (block = 0; block < NANDDEV_BBT_SCAN_MAXBLOCKS; block++) { 446 status = nanddev_bbt_get_block_status(nand, start_block + block); 447 ret = nanddev_read_bbt(nand, start_block + block, false); 448 if (ret == 0 && status == NAND_BBT_BLOCK_FACTORY_BAD) 449 bbt_version[block] = BBT_VERSION_INVALID; 450 else if (ret == -EIO) 451 bbt_version[block] = BBT_VERSION_INVALID; 452 else if (ret == BBT_VERSION_BLOCK_ABNORMAL) 453 bbt_version[block] = ret; 454 else 455 bbt_version[block] = ret; 456 } 457 get_min_ver: 458 min_version = BBT_VERSION_MAX; 459 block_des = 0; 460 for (block = 0; block < NANDDEV_BBT_SCAN_MAXBLOCKS; block++) { 461 if (bbt_version[block] < min_version) { 462 min_version = bbt_version[block]; 463 block_des = start_block + block; 464 } 465 } 466 467 /* Overwrite the BBT_VERSION_BLOCK_ABNORMAL block */ 468 if (nand->bbt.version < min_version) 469 nand->bbt.version = min_version + 4; 470 471 if (block_des > 0) { 472 nand->bbt.version++; 473 ret = nanddev_write_bbt(nand, block_des); 474 if (ret) { 475 pr_err("bbt_update fail, blk=%d ret= %d\n", block_des, ret); 476 477 return -1; 478 } 479 480 bbt_version[block_des - start_block] = BBT_VERSION_INVALID; 481 count++; 482 if (count < 2) 483 goto get_min_ver; 484 bbt_dbg("bbt_update success\n"); 485 } else { 486 pr_err("bbt_update failed\n"); 487 ret = -1; 488 } 489 490 for (block = 0; block < NANDDEV_BBT_SCAN_MAXBLOCKS; block++) { 491 if (bbt_version[block] == BBT_VERSION_BLOCK_ABNORMAL) { 492 block_des = start_block + block; 493 nanddev_offs_to_pos(nand, block_des * mtd->erasesize, &pos); 494 nand->ops->erase(nand, &pos); 495 } 496 } 497 498 return ret; 499 } 500 #endif 501 return 0; 502 } 503 EXPORT_SYMBOL_GPL(nanddev_bbt_update); 504 505 /** 506 * nanddev_bbt_get_block_status() - Return the status of an eraseblock 507 * @nand: nand device 508 * @entry: the BBT entry 509 * 510 * Return: a positive number nand_bbt_block_status status or -%ERANGE if @entry 511 * is bigger than the BBT size. 512 */ 513 int nanddev_bbt_get_block_status(const struct nand_device *nand, 514 unsigned int entry) 515 { 516 unsigned int bits_per_block = fls(NAND_BBT_BLOCK_NUM_STATUS); 517 unsigned long *pos = nand->bbt.cache + 518 ((entry * bits_per_block) / BITS_PER_LONG); 519 unsigned int offs = (entry * bits_per_block) % BITS_PER_LONG; 520 unsigned long status; 521 522 #ifdef CONFIG_MTD_NAND_BBT_USING_FLASH 523 if (nand->bbt.option & NANDDEV_BBT_USE_FLASH && 524 !(nand->bbt.option & NANDDEV_BBT_SCANNED)) 525 nanddev_scan_bbt((struct nand_device *)nand); 526 #endif 527 528 if (entry >= nanddev_neraseblocks(nand)) 529 return -ERANGE; 530 531 status = pos[0] >> offs; 532 if (bits_per_block + offs > BITS_PER_LONG) 533 status |= pos[1] << (BITS_PER_LONG - offs); 534 535 return status & GENMASK(bits_per_block - 1, 0); 536 } 537 EXPORT_SYMBOL_GPL(nanddev_bbt_get_block_status); 538 539 /** 540 * nanddev_bbt_set_block_status() - Update the status of an eraseblock in the 541 * in-memory BBT 542 * @nand: nand device 543 * @entry: the BBT entry to update 544 * @status: the new status 545 * 546 * Update an entry of the in-memory BBT. If you want to push the updated BBT 547 * the NAND you should call nanddev_bbt_update(). 548 * 549 * Return: 0 in case of success or -%ERANGE if @entry is bigger than the BBT 550 * size. 551 */ 552 int nanddev_bbt_set_block_status(struct nand_device *nand, unsigned int entry, 553 enum nand_bbt_block_status status) 554 { 555 unsigned int bits_per_block = fls(NAND_BBT_BLOCK_NUM_STATUS); 556 unsigned long *pos = nand->bbt.cache + 557 ((entry * bits_per_block) / BITS_PER_LONG); 558 unsigned int offs = (entry * bits_per_block) % BITS_PER_LONG; 559 unsigned long val = status & GENMASK(bits_per_block - 1, 0); 560 561 if (entry >= nanddev_neraseblocks(nand)) 562 return -ERANGE; 563 564 if (offs + bits_per_block - 1 > (BITS_PER_LONG - 1)) 565 pos[0] &= ~GENMASK(BITS_PER_LONG - 1, offs); 566 else 567 pos[0] &= ~GENMASK(offs + bits_per_block - 1, offs); 568 pos[0] |= val << offs; 569 570 if (bits_per_block + offs > BITS_PER_LONG) { 571 unsigned int rbits = BITS_PER_LONG - offs; 572 573 pos[1] &= ~GENMASK(bits_per_block - rbits - 1, 0); 574 pos[1] |= val >> rbits; 575 } 576 577 return 0; 578 } 579 EXPORT_SYMBOL_GPL(nanddev_bbt_set_block_status); 580