1 /* 2 * Copyright (c) 2018 Fuzhou Rockchip Electronics Co., Ltd 3 * 4 * SPDX-License-Identifier: GPL-2.0 5 */ 6 7 #include <common.h> 8 #include <linux/bug.h> 9 #include <linux/delay.h> 10 11 #include "flash_com.h" 12 #include "rkflash_debug.h" 13 #include "rk_sftl.h" 14 #include "sfc.h" 15 #include "sfc_nand.h" 16 17 static struct nand_info spi_nand_tbl[] = { 18 /* TC58CVG0S0HxAIx */ 19 {0x98C2, 4, 64, 1, 1024, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x02, 0xD8, 0x00, 18, 8, 0xFF, 0xFF, {4, 8, 0xff, 0xff}, NULL}, 20 /* TC58CVG1S0HxAIx */ 21 {0x98CB, 4, 64, 2, 1024, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x02, 0xD8, 0x00, 19, 8, 0xFF, 0xFF, {4, 8, 0xff, 0xff}, NULL}, 22 /* MX35LF1GE4AB */ 23 {0xC212, 4, 64, 1, 1024, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x32, 0xD8, 0x0C, 18, 4, 0xB0, 0, {4, 8, 0xff, 0xff}, &sfc_nand_ecc_status_sp1}, 24 /* MX35LF2GE4AB */ 25 {0xC222, 4, 64, 2, 1024, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x32, 0xD8, 0x0C, 19, 4, 0xB0, 0, {4, 8, 0xff, 0xff}, &sfc_nand_ecc_status_sp1}, 26 /* GD5F1GQ4UAYIG */ 27 {0xC8F1, 4, 64, 1, 1024, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x32, 0xD8, 0x0C, 18, 8, 0xB0, 0, {4, 8, 0xff, 0xff}, NULL}, 28 /* MT29F1G01ZAC */ 29 {0x2C12, 4, 64, 1, 1024, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x32, 0xD8, 0x00, 18, 1, 0xB0, 0, {4, 8, 0xff, 0xff}, &sfc_nand_ecc_status_sp1}, 30 /* GD5F2GQ40BY2GR */ 31 {0xC8D2, 4, 64, 2, 1024, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x32, 0xD8, 0x0C, 19, 8, 0xB0, 0, {4, 8, 0xff, 0xff}, &sfc_nand_ecc_status_sp3}, 32 /* GD5F1GQ4RB9IGR */ 33 {0xC8D1, 4, 64, 1, 1024, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x32, 0xD8, 0x0C, 18, 8, 0xB0, 0, {4, 8, 0xff, 0xff}, &sfc_nand_ecc_status_sp3}, 34 /* IS37SML01G1 */ 35 {0xC821, 4, 64, 1, 1024, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x32, 0xD8, 0x00, 18, 1, 0xFF, 0xFF, {8, 12, 0xff, 0xff}, &sfc_nand_ecc_status_sp1}, 36 /* W25N01GV */ 37 {0xEFAA, 4, 64, 1, 1024, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x32, 0xD8, 0x4C, 18, 1, 0xFF, 0xFF, {4, 20, 36, 0xff}, &sfc_nand_ecc_status_sp1}, 38 /* HYF2GQ4UAACAE */ 39 {0xC952, 4, 64, 1, 2048, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x32, 0xD8, 0x4C, 19, 14, 0xB0, 0, {4, 36, 0xff, 0xff}, NULL}, 40 /* HYF2GQ4UDACAE */ 41 {0xC922, 4, 64, 1, 2048, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x32, 0xD8, 0x4C, 19, 4, 0xB0, 0, {4, 20, 0xff, 0xff}, NULL}, 42 /* HYF2GQ4UHCCAE */ 43 {0xC95A, 4, 64, 1, 2048, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x32, 0xD8, 0x4C, 19, 14, 0xB0, 0, {4, 36, 0xff, 0xff}, NULL}, 44 /* HYF1GQ4UDACAE */ 45 {0xC921, 4, 64, 1, 1024, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x32, 0xD8, 0x4C, 18, 4, 0xB0, 0, {4, 20, 0xff, 0xff}, NULL}, 46 /* F50L1G41LB */ 47 {0xC801, 4, 64, 1, 1024, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x32, 0xD8, 0x4C, 18, 1, 0xFF, 0xFF, {20, 36, 0xff, 0xff}, &sfc_nand_ecc_status_sp1}, 48 /* XT26G02A */ 49 {0x0BE2, 4, 64, 1, 2048, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x32, 0xD8, 0x4C, 19, 1, 0xB0, 0x0, {8, 12, 0xff, 0xff}, &sfc_nand_ecc_status_sp4}, 50 /* XT26G01A */ 51 {0x0BE1, 4, 64, 1, 1024, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x32, 0xD8, 0x4C, 18, 1, 0xB0, 0x0, {8, 12, 0xff, 0xff}, &sfc_nand_ecc_status_sp4}, 52 /* FS35ND01G-S1 */ 53 {0xCDB1, 4, 64, 1, 1024, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x32, 0xD8, 0x0C, 18, 4, 0xB0, 0x0, {16, 20, 0xff, 0xff}, &sfc_nand_ecc_status_sp5}, 54 /* FS35ND02G-S2 */ 55 {0xCDA2, 4, 64, 1, 2048, 0x13, 0x10, 0x03, 0x02, 0x03, 0x02, 0xD8, 0x00, 19, 4, 0xFF, 0xFF, {16, 20, 0xff, 0xff}, &sfc_nand_ecc_status_sp5}, 56 /* DS35Q1GA-1B */ 57 {0xE571, 4, 64, 1, 1024, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x32, 0xD8, 0x0C, 18, 4, 0xB0, 0x0, {4, 20, 0xff, 0xff}, &sfc_nand_ecc_status_sp1}, 58 /* DS35Q2GA-1B */ 59 {0xE572, 4, 64, 2, 1024, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x32, 0xD8, 0x0C, 19, 4, 0xB0, 0x0, {4, 20, 0xff, 0xff}, &sfc_nand_ecc_status_sp1}, 60 /* EM73C044SNC-G */ 61 {0xD522, 4, 64, 1, 1024, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x32, 0xD8, 0x0C, 18, 8, 0xB0, 0x0, {4, 20, 0xff, 0xff}, NULL}, 62 /* EM73D044SNB-G */ 63 {0xD520, 4, 64, 1, 2048, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x32, 0xD8, 0x0C, 19, 8, 0xB0, 0x0, {4, 20, 0xff, 0xff}, NULL}, 64 /* ATO25D1GA */ 65 {0x9B12, 4, 64, 1, 1024, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x32, 0xD8, 0x40, 18, 1, 0xB0, 0x0, {20, 36, 0xff, 0xff}, &sfc_nand_ecc_status_sp1}, 66 /* XT26G02B */ 67 {0x0BF2, 4, 64, 1, 2048, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x32, 0xD8, 0x4C, 19, 1, 0xB0, 0x0, {8, 12, 0xff, 0xff}, &sfc_nand_ecc_status_sp5}, 68 /* XT26G01B */ 69 {0x0BF1, 4, 64, 1, 1024, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x32, 0xD8, 0x4C, 18, 1, 0xB0, 0x0, {8, 12, 0xff, 0xff}, &sfc_nand_ecc_status_sp4}, 70 /* HYF4GQ4UAACBE */ 71 {0xC9D4, 8, 64, 1, 2048, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x32, 0xD8, 0x4C, 20, 4, 0xB0, 0, {32, 64, 36, 68}, NULL}, 72 /* FM25S01 */ 73 {0xA1A1, 4, 64, 1, 1024, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x32, 0xD8, 0x4C, 18, 1, 0xB0, 0, {0, 4, 0xff, 0xff}, &sfc_nand_ecc_status_sp1}, 74 /* HYF1GQ4UPACAE */ 75 {0xC9A1, 4, 64, 1, 1024, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x32, 0xD8, 0x4C, 18, 4, 0xB0, 0, {4, 20, 0xff, 0xff}, &sfc_nand_ecc_status_sp1}, 76 /* EM73E044SNA-G */ 77 {0xD503, 8, 64, 1, 2048, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x32, 0xD8, 0x4C, 20, 8, 0xB0, 0, {4, 40, 8, 44}, NULL}, 78 /* GD5F2GQ5UEYIG */ 79 {0xC852, 4, 64, 1, 2048, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x32, 0xD8, 0x4C, 19, 4, 0xB0, 0, {4, 20, 0xff, 0xff}, &sfc_nand_ecc_status_sp2}, 80 /* GD5F1GQ4R */ 81 {0xC8C1, 4, 64, 1, 1024, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x32, 0xD8, 0x0C, 18, 8, 0xB0, 0, {4, 8, 0xff, 0xff}, &sfc_nand_ecc_status_sp3}, 82 /* TC58CVG2S0HRAIJ */ 83 {0x98ED, 8, 64, 1, 2048, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x32, 0xD8, 0x0C, 20, 8, 0xFF, 0xFF, {4, 12, 8, 16}, NULL}, 84 /* TC58CVG1S3HRAIJ */ 85 {0x98EB, 4, 64, 1, 2048, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x32, 0xD8, 0x0C, 19, 8, 0xFF, 0xFF, {4, 8, 0xff, 0xff}, NULL}, 86 /* TC58CVG0S3HRAIJ */ 87 {0x98E2, 4, 64, 1, 1024, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x32, 0xD8, 0x0C, 18, 8, 0xFF, 0xFF, {4, 8, 0xff, 0xff}, NULL}, 88 /* XT26G04A */ 89 {0x0BE3, 4, 128, 1, 2048, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x32, 0xD8, 0x4C, 20, 1, 0xB0, 0x0, {8, 12, 0xff, 0xff}, &sfc_nand_ecc_status_sp4}, 90 /* FS35ND01G-S1Y2 */ 91 {0xCDEA, 4, 64, 1, 1024, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x32, 0xD8, 0x4C, 18, 4, 0xFF, 0xFF, {4, 8, 12, 16}, &sfc_nand_ecc_status_sp1}, 92 /* FS35ND02G-S3Y2 */ 93 {0xCDEB, 4, 64, 1, 2048, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x32, 0xD8, 0x4C, 19, 4, 0xFF, 0xFF, {4, 8, 12, 16}, &sfc_nand_ecc_status_sp1}, 94 }; 95 96 static struct nand_info *p_nand_info; 97 static u32 gp_page_buf[SFC_NAND_PAGE_MAX_SIZE / 4]; 98 static struct SFNAND_DEV sfc_nand_dev; 99 100 static struct nand_info *sfc_nand_get_info(u8 *nand_id) 101 { 102 u32 i; 103 u32 id = (nand_id[0] << 8) | (nand_id[1] << 0); 104 105 for (i = 0; i < ARRAY_SIZE(spi_nand_tbl); i++) { 106 if (spi_nand_tbl[i].id == id) 107 return &spi_nand_tbl[i]; 108 } 109 return NULL; 110 } 111 112 static int sfc_nand_write_en(void) 113 { 114 int ret; 115 struct rk_sfc_op op; 116 117 op.sfcmd.d32 = 0; 118 op.sfcmd.b.cmd = CMD_WRITE_EN; 119 120 op.sfctrl.d32 = 0; 121 122 ret = sfc_request(&op, 0, NULL, 0); 123 return ret; 124 } 125 126 static int sfc_nand_rw_preset(void) 127 { 128 int ret; 129 struct rk_sfc_op op; 130 u8 status = 0xFF; 131 132 op.sfcmd.d32 = 0; 133 op.sfcmd.b.cmd = 0; 134 op.sfcmd.b.rw = SFC_WRITE; 135 136 op.sfctrl.d32 = 0; 137 op.sfctrl.b.datalines = 2; 138 139 ret = sfc_request(&op, 0, &status, 1); 140 return ret; 141 } 142 143 static int sfc_nand_read_feature(u8 addr, u8 *data) 144 { 145 int ret; 146 struct rk_sfc_op op; 147 148 op.sfcmd.d32 = 0; 149 op.sfcmd.b.cmd = 0x0F; 150 op.sfcmd.b.addrbits = SFC_ADDR_XBITS; 151 152 op.sfctrl.d32 = 0; 153 op.sfctrl.b.addrbits = 8; 154 155 *data = 0; 156 157 ret = sfc_request(&op, addr, data, 1); 158 if (ret != SFC_OK) 159 return ret; 160 return SFC_OK; 161 } 162 163 static int sfc_nand_write_feature(u32 addr, u8 status) 164 { 165 int ret; 166 struct rk_sfc_op op; 167 168 sfc_nand_write_en(); 169 170 op.sfcmd.d32 = 0; 171 op.sfcmd.b.cmd = 0x1F; 172 op.sfcmd.b.addrbits = SFC_ADDR_XBITS; 173 op.sfcmd.b.rw = SFC_WRITE; 174 175 op.sfctrl.d32 = 0; 176 op.sfctrl.b.addrbits = 8; 177 178 ret = sfc_request(&op, addr, &status, 1); 179 if (ret != SFC_OK) 180 return ret; 181 return ret; 182 } 183 184 static int sfc_nand_wait_busy(u8 *data, int timeout) 185 { 186 int ret; 187 int i; 188 u8 status; 189 190 *data = 0; 191 for (i = 0; i < timeout; i++) { 192 ret = sfc_nand_read_feature(0xC0, &status); 193 if (ret != SFC_OK) 194 return ret; 195 *data = status; 196 if (!(status & (1 << 0))) 197 return SFC_OK; 198 sfc_delay(1); 199 } 200 return -1; 201 } 202 203 /* 204 * ecc default: 205 * ecc bits: 0xC0[4,5] 206 * 0x00, No bit errors were detected 207 * 0x01, Bit errors were detected and corrected. 208 * 0x10, Multiple bit errors were detected and not corrected. 209 * 0x11, Bits errors were detected and corrected, bit error count 210 * exceed the bit flip detection threshold 211 */ 212 static u32 sfc_nand_ecc_status(void) 213 { 214 u32 ret; 215 u32 i; 216 u8 ecc; 217 u8 status; 218 u32 timeout = 1000 * 1000; 219 220 for (i = 0; i < timeout; i++) { 221 ret = sfc_nand_read_feature(0xC0, &status); 222 if (ret != SFC_OK) 223 return SFC_NAND_ECC_ERROR; 224 if (!(status & (1 << 0))) 225 break; 226 sfc_delay(1); 227 } 228 229 ecc = (status >> 4) & 0x03; 230 231 if (ecc <= 1) 232 ret = SFC_NAND_ECC_OK; 233 else if (ecc == 2) 234 ret = (u32)SFC_NAND_ECC_ERROR; 235 else 236 ret = SFC_NAND_ECC_REFRESH; 237 238 return ret; 239 } 240 241 /* 242 * ecc spectial type1: 243 * ecc bits: 0xC0[4,5] 244 * 0x00, No bit errors were detected; 245 * 0x01, Bits errors were detected and corrected, bit error count 246 * may reach the bit flip detection threshold; 247 * 0x10, Multiple bit errors were detected and not corrected; 248 * 0x11, Reserved. 249 */ 250 u32 sfc_nand_ecc_status_sp1(void) 251 { 252 u32 ret; 253 u32 i; 254 u8 ecc; 255 u8 status; 256 u32 timeout = 1000 * 1000; 257 258 for (i = 0; i < timeout; i++) { 259 ret = sfc_nand_read_feature(0xC0, &status); 260 if (ret != SFC_OK) 261 return SFC_NAND_ECC_ERROR; 262 if (!(status & (1 << 0))) 263 break; 264 sfc_delay(1); 265 } 266 267 ecc = (status >> 4) & 0x03; 268 269 if (ecc == 0) 270 ret = SFC_NAND_ECC_OK; 271 else if (ecc == 1) 272 ret = SFC_NAND_ECC_REFRESH; 273 else 274 ret = (u32)SFC_NAND_ECC_ERROR; 275 276 return ret; 277 } 278 279 /* 280 * ecc spectial type2: 281 * ecc bits: 0xC0[4,5] 0xF0[4,5] 282 * [0x0000, 0x0011], No bit errors were detected; 283 * [0x0100, 0x0111], Bit errors were detected and corrected. Not 284 * reach Flipping Bits; 285 * [0x1000, 0x1011], Multiple bit errors were detected and 286 * not corrected. 287 * [0x1100, 0x1111], reserved. 288 */ 289 u32 sfc_nand_ecc_status_sp2(void) 290 { 291 u32 ret; 292 u32 i; 293 u8 ecc; 294 u8 status, status1; 295 u32 timeout = 1000 * 1000; 296 297 for (i = 0; i < timeout; i++) { 298 ret = sfc_nand_read_feature(0xC0, &status); 299 if (ret != SFC_OK) 300 return SFC_NAND_ECC_ERROR; 301 ret = sfc_nand_read_feature(0xF0, &status1); 302 if (ret != SFC_OK) 303 return SFC_NAND_ECC_ERROR; 304 if (!(status & (1 << 0))) 305 break; 306 sfc_delay(1); 307 } 308 309 ecc = (status >> 4) & 0x03; 310 ecc = (ecc << 2) | ((status1 >> 4) & 0x03); 311 if (ecc < 7) 312 ret = SFC_NAND_ECC_OK; 313 else if (ecc == 7) 314 ret = SFC_NAND_ECC_REFRESH; 315 else 316 ret = (u32)SFC_NAND_ECC_ERROR; 317 318 return ret; 319 } 320 321 /* 322 * ecc spectial type3: 323 * ecc bits: 0xC0[4,5] 0xF0[4,5] 324 * [0x0000, 0x0011], No bit errors were detected; 325 * [0x0100, 0x0111], Bit errors were detected and corrected. Not 326 * reach Flipping Bits; 327 * [0x1000, 0x1011], Multiple bit errors were detected and 328 * not corrected. 329 * [0x1100, 0x1111], Bit error count equals the bit flip 330 * detectio nthreshold 331 */ 332 u32 sfc_nand_ecc_status_sp3(void) 333 { 334 u32 ret; 335 u32 i; 336 u8 ecc; 337 u8 status, status1; 338 u32 timeout = 1000 * 1000; 339 340 for (i = 0; i < timeout; i++) { 341 ret = sfc_nand_read_feature(0xC0, &status); 342 if (ret != SFC_OK) 343 return SFC_NAND_ECC_ERROR; 344 ret = sfc_nand_read_feature(0xF0, &status1); 345 if (ret != SFC_OK) 346 return SFC_NAND_ECC_ERROR; 347 if (!(status & (1 << 0))) 348 break; 349 sfc_delay(1); 350 } 351 352 ecc = (status >> 4) & 0x03; 353 ecc = (ecc << 2) | ((status1 >> 4) & 0x03); 354 if (ecc < 7) 355 ret = SFC_NAND_ECC_OK; 356 else if (ecc == 7 || ecc >= 12) 357 ret = SFC_NAND_ECC_REFRESH; 358 else 359 ret = (u32)SFC_NAND_ECC_ERROR; 360 361 return ret; 362 } 363 364 /* 365 * ecc spectial type4: 366 * ecc bits: 0xC0[2,5] 367 * [0x0000], No bit errors were detected; 368 * [0x0001, 0x0111], Bit errors were detected and corrected. Not 369 * reach Flipping Bits; 370 * [0x1000], Multiple bit errors were detected and 371 * not corrected. 372 * [0x1100], Bit error count equals the bit flip 373 * detectionthreshold 374 * else, reserved 375 */ 376 u32 sfc_nand_ecc_status_sp4(void) 377 { 378 u32 ret; 379 u32 i; 380 u8 ecc; 381 u8 status; 382 u32 timeout = 1000 * 1000; 383 384 for (i = 0; i < timeout; i++) { 385 ret = sfc_nand_read_feature(0xC0, &status); 386 if (ret != SFC_OK) 387 return SFC_NAND_ECC_ERROR; 388 if (!(status & (1 << 0))) 389 break; 390 sfc_delay(1); 391 } 392 393 ecc = (status >> 2) & 0x0f; 394 if (ecc < 7) 395 ret = SFC_NAND_ECC_OK; 396 else if (ecc == 7 || ecc == 12) 397 ret = SFC_NAND_ECC_REFRESH; 398 else 399 ret = (u32)SFC_NAND_ECC_ERROR; 400 401 return ret; 402 } 403 404 /* 405 * ecc spectial type5: 406 * ecc bits: 0xC0[4,6] 407 * [0x0], No bit errors were detected; 408 * [0x001, 0x011], Bit errors were detected and corrected. Not 409 * reach Flipping Bits; 410 * [0x100], Bit error count equals the bit flip 411 * detectionthreshold 412 * [0x101, 0x110], Reserved; 413 * [0x111], Multiple bit errors were detected and 414 * not corrected. 415 */ 416 u32 sfc_nand_ecc_status_sp5(void) 417 { 418 u32 ret; 419 u32 i; 420 u8 ecc; 421 u8 status; 422 u32 timeout = 1000 * 1000; 423 424 for (i = 0; i < timeout; i++) { 425 ret = sfc_nand_read_feature(0xC0, &status); 426 if (ret != SFC_OK) 427 return SFC_NAND_ECC_ERROR; 428 if (!(status & (1 << 0))) 429 break; 430 sfc_delay(1); 431 } 432 433 ecc = (status >> 4) & 0x07; 434 if (ecc < 4) 435 ret = SFC_NAND_ECC_OK; 436 else if (ecc == 4) 437 ret = SFC_NAND_ECC_REFRESH; 438 else 439 ret = (u32)SFC_NAND_ECC_ERROR; 440 441 return ret; 442 } 443 444 u32 sfc_nand_erase_block(u8 cs, u32 addr) 445 { 446 int ret; 447 struct rk_sfc_op op; 448 u8 status; 449 450 rkflash_print_dio("%s %x\n", __func__, addr); 451 op.sfcmd.d32 = 0; 452 op.sfcmd.b.cmd = p_nand_info->block_erase_cmd; 453 op.sfcmd.b.addrbits = SFC_ADDR_24BITS; 454 op.sfcmd.b.rw = SFC_WRITE; 455 456 op.sfctrl.d32 = 0; 457 458 sfc_nand_write_en(); 459 ret = sfc_request(&op, addr, NULL, 0); 460 if (ret != SFC_OK) 461 return ret; 462 ret = sfc_nand_wait_busy(&status, 1000 * 1000); 463 if (status & (1 << 2)) 464 return SFC_NAND_PROG_ERASE_ERROR; 465 466 return ret; 467 } 468 469 static u32 sfc_nand_prog_page_raw(u8 cs, u32 addr, u32 *p_page_buf) 470 { 471 int ret; 472 u32 plane; 473 struct rk_sfc_op op; 474 u8 status; 475 u32 page_size = SFC_NAND_SECTOR_FULL_SIZE * p_nand_info->sec_per_page; 476 477 rkflash_print_dio("%s %x %x\n", __func__, addr, p_page_buf[0]); 478 sfc_nand_write_en(); 479 if (sfc_nand_dev.prog_lines == DATA_LINES_X4 && 480 p_nand_info->feature & FEA_SOFT_QOP_BIT && 481 sfc_get_version() < SFC_VER_3) 482 sfc_nand_rw_preset(); 483 484 op.sfcmd.d32 = 0; 485 op.sfcmd.b.cmd = sfc_nand_dev.page_prog_cmd; 486 op.sfcmd.b.addrbits = SFC_ADDR_XBITS; 487 op.sfcmd.b.rw = SFC_WRITE; 488 489 op.sfctrl.d32 = 0; 490 op.sfctrl.b.datalines = sfc_nand_dev.prog_lines; 491 op.sfctrl.b.addrbits = 16; 492 plane = p_nand_info->plane_per_die == 2 ? ((addr >> 6) & 0x1) << 12 : 0; 493 sfc_request(&op, plane, p_page_buf, page_size); 494 495 op.sfcmd.d32 = 0; 496 op.sfcmd.b.cmd = p_nand_info->page_prog_cmd; 497 op.sfcmd.b.addrbits = SFC_ADDR_24BITS; 498 op.sfcmd.b.rw = SFC_WRITE; 499 500 op.sfctrl.d32 = 0; 501 ret = sfc_request(&op, addr, p_page_buf, 0); 502 if (ret != SFC_OK) 503 return ret; 504 ret = sfc_nand_wait_busy(&status, 1000 * 1000); 505 if (status & (1 << 3)) 506 return SFC_NAND_PROG_ERASE_ERROR; 507 508 return ret; 509 } 510 511 u32 sfc_nand_prog_page(u8 cs, u32 addr, u32 *p_data, u32 *p_spare) 512 { 513 int ret; 514 u32 sec_per_page = p_nand_info->sec_per_page; 515 u32 data_size = sec_per_page * SFC_NAND_SECTOR_SIZE; 516 struct nand_mega_area *meta = &p_nand_info->meta; 517 518 memcpy(gp_page_buf, p_data, data_size); 519 memset(&gp_page_buf[data_size / 4], 0xff, sec_per_page * 16); 520 gp_page_buf[(data_size + meta->off0) / 4] = p_spare[0]; 521 gp_page_buf[(data_size + meta->off1) / 4] = p_spare[1]; 522 if (sec_per_page == 8) { 523 gp_page_buf[(data_size + meta->off2) / 4] = p_spare[2]; 524 gp_page_buf[(data_size + meta->off3) / 4] = p_spare[3]; 525 } 526 ret = sfc_nand_prog_page_raw(cs, addr, gp_page_buf); 527 528 return ret; 529 } 530 531 static u32 sfc_nand_read_page_raw(u8 cs, u32 addr, u32 *p_page_buf) 532 { 533 int ret; 534 u32 plane; 535 struct rk_sfc_op op; 536 u32 ecc_result; 537 u32 page_size = SFC_NAND_SECTOR_FULL_SIZE * p_nand_info->sec_per_page; 538 u8 status; 539 540 op.sfcmd.d32 = 0; 541 op.sfcmd.b.cmd = p_nand_info->page_read_cmd; 542 op.sfcmd.b.rw = SFC_WRITE; 543 op.sfcmd.b.addrbits = SFC_ADDR_24BITS; 544 545 op.sfctrl.d32 = 0; 546 547 sfc_request(&op, addr, p_page_buf, 0); 548 if (sfc_nand_dev.read_lines == DATA_LINES_X4 && 549 p_nand_info->feature & FEA_SOFT_QOP_BIT && 550 sfc_get_version() < SFC_VER_3) 551 sfc_nand_rw_preset(); 552 553 sfc_nand_wait_busy(&status, 1000 * 1000); 554 if (p_nand_info->ecc_status) 555 ecc_result = p_nand_info->ecc_status(); 556 else 557 ecc_result = sfc_nand_ecc_status(); 558 559 op.sfcmd.d32 = 0; 560 op.sfcmd.b.cmd = sfc_nand_dev.page_read_cmd; 561 op.sfcmd.b.addrbits = SFC_ADDR_24BITS; 562 563 op.sfctrl.d32 = 0; 564 op.sfctrl.b.datalines = sfc_nand_dev.read_lines; 565 566 plane = p_nand_info->plane_per_die == 2 ? ((addr >> 6) & 0x1) << 12 : 0; 567 ret = sfc_request(&op, plane << 8, p_page_buf, page_size); 568 rkflash_print_dio("%s %x %x\n", __func__, addr, p_page_buf[0]); 569 570 if (ret != SFC_OK) 571 return SFC_NAND_ECC_ERROR; 572 573 return ecc_result; 574 } 575 576 u32 sfc_nand_read_page(u8 cs, u32 addr, u32 *p_data, u32 *p_spare) 577 { 578 u32 ret; 579 u32 sec_per_page = p_nand_info->sec_per_page; 580 u32 data_size = sec_per_page * SFC_NAND_SECTOR_SIZE; 581 struct nand_mega_area *meta = &p_nand_info->meta; 582 583 ret = sfc_nand_read_page_raw(cs, addr, gp_page_buf); 584 memcpy(p_data, gp_page_buf, data_size); 585 p_spare[0] = gp_page_buf[(data_size + meta->off0) / 4]; 586 p_spare[1] = gp_page_buf[(data_size + meta->off1) / 4]; 587 if (p_nand_info->sec_per_page == 8) { 588 p_spare[2] = gp_page_buf[(data_size + meta->off2) / 4]; 589 p_spare[3] = gp_page_buf[(data_size + meta->off3) / 4]; 590 } 591 592 if (ret != SFC_NAND_ECC_OK) { 593 rkflash_print_error("%s[0x%x], ret=0x%x\n", __func__, addr, ret); 594 if (p_data) 595 rkflash_print_hex("data:", p_data, 4, 8); 596 if (p_spare) 597 rkflash_print_hex("spare:", p_spare, 4, 2); 598 } 599 600 return ret; 601 } 602 603 u32 sfc_nand_check_bad_block(u8 cs, u32 addr) 604 { 605 u32 ret; 606 u32 data_size = p_nand_info->sec_per_page * SFC_NAND_SECTOR_SIZE; 607 608 ret = sfc_nand_read_page_raw(cs, addr, gp_page_buf); 609 if (ret == SFC_NAND_ECC_ERROR) 610 return true; 611 /* Original bad block */ 612 if ((gp_page_buf[data_size / 4] & 0xFF) != 0xFF) 613 return true; 614 615 return false; 616 } 617 618 u32 sfc_nand_mark_bad_block(u8 cs, u32 addr) 619 { 620 u32 ret; 621 u32 data_size = p_nand_info->sec_per_page * SFC_NAND_SECTOR_SIZE; 622 623 ret = sfc_nand_read_page_raw(cs, addr, gp_page_buf); 624 if (ret) 625 return SFC_NAND_HW_ERROR; 626 gp_page_buf[data_size / 4] = 0x0; 627 ret = sfc_nand_prog_page_raw(cs, addr, gp_page_buf); 628 if (ret) 629 return SFC_NAND_HW_ERROR; 630 631 return ret; 632 } 633 634 int sfc_nand_read_id(u8 *data) 635 { 636 int ret; 637 struct rk_sfc_op op; 638 639 op.sfcmd.d32 = 0; 640 op.sfcmd.b.cmd = CMD_READ_JEDECID; 641 op.sfcmd.b.addrbits = SFC_ADDR_XBITS; 642 643 op.sfctrl.d32 = 0; 644 op.sfctrl.b.addrbits = 8; 645 646 ret = sfc_request(&op, 0, data, 3); 647 648 return ret; 649 } 650 651 /* 652 * Read the 1st page's 1st byte of a phy_blk 653 * If not FF, it's bad blk 654 */ 655 static int sfc_nand_get_bad_block_list(u16 *table, u32 die) 656 { 657 u16 blk; 658 u32 bad_cnt, page; 659 u32 blk_per_die; 660 u32 *pread; 661 u32 *pspare_read; 662 663 rkflash_print_info("%s\n", __func__); 664 pread = ftl_malloc(SFC_NAND_PAGE_MAX_SIZE); 665 pspare_read = ftl_malloc(8); 666 if (!pread || !pspare_read) { 667 kfree(pread); 668 kfree(pspare_read); 669 return -1; 670 } 671 672 bad_cnt = 0; 673 blk_per_die = p_nand_info->plane_per_die * 674 p_nand_info->blk_per_plane; 675 for (blk = 0; blk < blk_per_die; blk++) { 676 page = (blk + blk_per_die * die) * 677 p_nand_info->page_per_blk; 678 sfc_nand_read_page(0, page, pread, pspare_read); 679 680 if (pread[0] != 0xFFFFFFFF || 681 pspare_read[0] != 0xFFFFFFFF) { 682 table[bad_cnt++] = blk; 683 rkflash_print_error("die[%d], bad_blk[%d]\n", die, blk); 684 } 685 } 686 ftl_free(pread); 687 ftl_free(pspare_read); 688 689 return (int)bad_cnt; 690 } 691 692 void sfc_nand_ftl_ops_init(void) 693 { 694 /* para init */ 695 g_nand_phy_info.nand_type = 1; 696 g_nand_phy_info.die_num = 1; 697 g_nand_phy_info.plane_per_die = p_nand_info->plane_per_die; 698 g_nand_phy_info.blk_per_plane = p_nand_info->blk_per_plane; 699 g_nand_phy_info.page_per_blk = p_nand_info->page_per_blk; 700 g_nand_phy_info.page_per_slc_blk = p_nand_info->page_per_blk; 701 g_nand_phy_info.byte_per_sec = SFC_NAND_SECTOR_SIZE; 702 g_nand_phy_info.sec_per_page = p_nand_info->sec_per_page; 703 g_nand_phy_info.sec_per_blk = p_nand_info->sec_per_page * 704 p_nand_info->page_per_blk; 705 g_nand_phy_info.reserved_blk = 8; 706 g_nand_phy_info.blk_per_die = p_nand_info->plane_per_die * 707 p_nand_info->blk_per_plane; 708 g_nand_phy_info.ecc_bits = p_nand_info->max_ecc_bits; 709 710 /* driver register */ 711 g_nand_ops.get_bad_blk_list = sfc_nand_get_bad_block_list; 712 g_nand_ops.erase_blk = sfc_nand_erase_block; 713 g_nand_ops.prog_page = sfc_nand_prog_page; 714 g_nand_ops.read_page = sfc_nand_read_page; 715 g_nand_ops.bch_sel = NULL; 716 } 717 718 static int sfc_nand_enable_QE(void) 719 { 720 int ret = SFC_OK; 721 u8 status; 722 int bit_offset = p_nand_info->QE_bits; 723 724 if (bit_offset == 0xFF) 725 return SFC_OK; 726 727 ret = sfc_nand_read_feature(p_nand_info->QE_address, &status); 728 if (ret != SFC_OK) 729 return ret; 730 731 if (status & (1 << bit_offset)) /* is QE bit set */ 732 return SFC_OK; 733 734 status |= (1 << bit_offset); 735 return sfc_nand_write_feature(p_nand_info->QE_address, status); 736 737 return ret; 738 } 739 740 u32 sfc_nand_init(void) 741 { 742 u8 status, id_byte[8]; 743 744 sfc_nand_read_id(id_byte); 745 rkflash_print_error("sfc_nand id: %x %x %x\n", 746 id_byte[0], id_byte[1], id_byte[2]); 747 if (id_byte[0] == 0xFF || id_byte[0] == 0x00) 748 return (u32)FTL_NO_FLASH; 749 750 p_nand_info = sfc_nand_get_info(id_byte); 751 if (!p_nand_info) 752 return (u32)FTL_UNSUPPORTED_FLASH; 753 754 sfc_nand_dev.manufacturer = id_byte[0]; 755 sfc_nand_dev.mem_type = id_byte[1]; 756 sfc_nand_dev.capacity = p_nand_info->density; 757 sfc_nand_dev.block_size = p_nand_info->page_per_blk * p_nand_info->sec_per_page; 758 sfc_nand_dev.page_size = p_nand_info->sec_per_page; 759 760 /* disable block lock */ 761 sfc_nand_write_feature(0xA0, 0); 762 sfc_nand_dev.read_lines = DATA_LINES_X1; 763 sfc_nand_dev.prog_lines = DATA_LINES_X1; 764 sfc_nand_dev.page_read_cmd = p_nand_info->read_cache_cmd_1; 765 sfc_nand_dev.page_prog_cmd = p_nand_info->prog_cache_cmd_1; 766 if (p_nand_info->feature & FEA_4BIT_READ) { 767 if (sfc_nand_enable_QE() == SFC_OK) { 768 sfc_nand_dev.read_lines = DATA_LINES_X4; 769 sfc_nand_dev.page_read_cmd = 770 p_nand_info->read_cache_cmd_4; 771 } 772 } 773 774 if (p_nand_info->feature & FEA_4BIT_PROG && 775 sfc_nand_dev.read_lines == DATA_LINES_X4) { 776 sfc_nand_dev.prog_lines = DATA_LINES_X4; 777 sfc_nand_dev.page_prog_cmd = p_nand_info->prog_cache_cmd_4; 778 } 779 780 sfc_nand_read_feature(0xA0, &status); 781 rkflash_print_info("sfc_nand A0 = 0x%x\n", status); 782 sfc_nand_read_feature(0xB0, &status); 783 rkflash_print_info("sfc_nand B0 = 0x%x\n", status); 784 rkflash_print_info("read_lines = %x\n", sfc_nand_dev.read_lines); 785 rkflash_print_info("prog_lines = %x\n", sfc_nand_dev.prog_lines); 786 rkflash_print_info("page_read_cmd = %x\n", sfc_nand_dev.page_read_cmd); 787 rkflash_print_info("page_prog_cmd = %x\n", sfc_nand_dev.page_prog_cmd); 788 789 return SFC_OK; 790 } 791 792 void sfc_nand_deinit(void) 793 { 794 /* to-do */ 795 } 796 797 struct SFNAND_DEV *sfc_nand_get_private_dev(void) 798 { 799 return &sfc_nand_dev; 800 } 801 802