1 /* 2 * Copyright (c) 2018 Fuzhou Rockchip Electronics Co., Ltd 3 * 4 * SPDX-License-Identifier: GPL-2.0 5 */ 6 7 #include <common.h> 8 #include <linux/bug.h> 9 #include <linux/delay.h> 10 11 #include "flash_com.h" 12 #include "rkflash_debug.h" 13 #include "rk_sftl.h" 14 #include "sfc.h" 15 #include "sfc_nand.h" 16 17 static struct nand_info spi_nand_tbl[] = { 18 /* TC58CVG0S0HxAIx */ 19 {0x98C2, 4, 64, 1, 1024, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x02, 0xD8, 0x00, 18, 8, 0xFF, 0xFF, {4, 8, 0xff, 0xff}, NULL}, 20 /* TC58CVG1S0HxAIx */ 21 {0x98CB, 4, 64, 2, 1024, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x02, 0xD8, 0x00, 19, 8, 0xFF, 0xFF, {4, 8, 0xff, 0xff}, NULL}, 22 /* MX35LF1GE4AB */ 23 {0xC212, 4, 64, 1, 1024, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x32, 0xD8, 0x0C, 18, 4, 0xB0, 0, {4, 8, 0xff, 0xff}, &sfc_nand_ecc_status_sp1}, 24 /* MX35LF2GE4AB */ 25 {0xC222, 4, 64, 2, 1024, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x32, 0xD8, 0x0C, 19, 4, 0xB0, 0, {4, 8, 0xff, 0xff}, &sfc_nand_ecc_status_sp1}, 26 /* GD5F1GQ4UAYIG */ 27 {0xC8F1, 4, 64, 1, 1024, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x32, 0xD8, 0x0C, 18, 8, 0xB0, 0, {4, 8, 0xff, 0xff}, NULL}, 28 /* MT29F1G01ZAC */ 29 {0x2C12, 4, 64, 1, 1024, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x32, 0xD8, 0x00, 18, 1, 0xB0, 0, {4, 8, 0xff, 0xff}, &sfc_nand_ecc_status_sp1}, 30 /* GD5F2GQ40BY2GR */ 31 {0xC8D2, 4, 64, 2, 1024, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x32, 0xD8, 0x0C, 19, 8, 0xB0, 0, {4, 8, 0xff, 0xff}, &sfc_nand_ecc_status_sp3}, 32 /* GD5F1GQ4RB9IGR */ 33 {0xC8D1, 4, 64, 1, 1024, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x32, 0xD8, 0x0C, 18, 8, 0xB0, 0, {4, 8, 0xff, 0xff}, &sfc_nand_ecc_status_sp3}, 34 /* IS37SML01G1 */ 35 {0xC821, 4, 64, 1, 1024, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x32, 0xD8, 0x00, 18, 1, 0xFF, 0xFF, {8, 12, 0xff, 0xff}, &sfc_nand_ecc_status_sp1}, 36 /* W25N01GV */ 37 {0xEFAA, 4, 64, 1, 1024, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x32, 0xD8, 0x4C, 18, 1, 0xFF, 0xFF, {4, 20, 36, 0xff}, &sfc_nand_ecc_status_sp1}, 38 /* HYF2GQ4UAACAE */ 39 {0xC952, 4, 64, 1, 2048, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x32, 0xD8, 0x4C, 19, 14, 0xB0, 0, {4, 36, 0xff, 0xff}, NULL}, 40 /* HYF2GQ4UDACAE */ 41 {0xC922, 4, 64, 1, 2048, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x32, 0xD8, 0x4C, 19, 4, 0xB0, 0, {4, 20, 0xff, 0xff}, NULL}, 42 /* HYF2GQ4UHCCAE */ 43 {0xC95A, 4, 64, 1, 2048, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x32, 0xD8, 0x4C, 19, 14, 0xB0, 0, {4, 36, 0xff, 0xff}, NULL}, 44 /* HYF1GQ4UDACAE */ 45 {0xC921, 4, 64, 1, 1024, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x32, 0xD8, 0x4C, 18, 4, 0xB0, 0, {4, 20, 0xff, 0xff}, NULL}, 46 /* F50L1G41LB */ 47 {0xC801, 4, 64, 1, 1024, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x32, 0xD8, 0x4C, 18, 1, 0xFF, 0xFF, {20, 36, 0xff, 0xff}, &sfc_nand_ecc_status_sp1}, 48 /* XT26G02A */ 49 {0x0BE2, 4, 64, 1, 2048, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x32, 0xD8, 0x4C, 19, 1, 0xB0, 0x0, {8, 12, 0xff, 0xff}, &sfc_nand_ecc_status_sp4}, 50 /* XT26G01A */ 51 {0x0BE1, 4, 64, 1, 1024, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x32, 0xD8, 0x4C, 18, 1, 0xB0, 0x0, {8, 12, 0xff, 0xff}, &sfc_nand_ecc_status_sp4}, 52 /* FS35ND01G-S1 */ 53 {0xCDB1, 4, 64, 1, 1024, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x32, 0xD8, 0x0C, 18, 4, 0xB0, 0x0, {16, 20, 0xff, 0xff}, &sfc_nand_ecc_status_sp5}, 54 /* FS35ND02G-S2 */ 55 {0xCDA2, 4, 64, 1, 2048, 0x13, 0x10, 0x03, 0x02, 0x03, 0x02, 0xD8, 0x00, 19, 4, 0xFF, 0xFF, {16, 20, 0xff, 0xff}, &sfc_nand_ecc_status_sp5}, 56 /* DS35Q1GA-1B */ 57 {0xE571, 4, 64, 1, 1024, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x32, 0xD8, 0x0C, 18, 4, 0xB0, 0x0, {4, 20, 0xff, 0xff}, &sfc_nand_ecc_status_sp1}, 58 /* DS35Q2GA-1B */ 59 {0xE572, 4, 64, 2, 1024, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x32, 0xD8, 0x0C, 19, 4, 0xB0, 0x0, {4, 20, 0xff, 0xff}, &sfc_nand_ecc_status_sp1}, 60 /* EM73C044SNC-G */ 61 {0xD522, 4, 64, 1, 1024, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x32, 0xD8, 0x0C, 18, 8, 0xB0, 0x0, {4, 20, 0xff, 0xff}, NULL}, 62 /* EM73D044SNB-G */ 63 {0xD520, 4, 64, 1, 2048, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x32, 0xD8, 0x0C, 19, 8, 0xB0, 0x0, {4, 20, 0xff, 0xff}, NULL}, 64 /* ATO25D1GA */ 65 {0x9B12, 4, 64, 1, 1024, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x32, 0xD8, 0x40, 18, 1, 0xB0, 0x0, {20, 36, 0xff, 0xff}, &sfc_nand_ecc_status_sp1}, 66 /* XT26G02B */ 67 {0x0BF2, 4, 64, 1, 2048, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x32, 0xD8, 0x4C, 19, 1, 0xB0, 0x0, {8, 12, 0xff, 0xff}, &sfc_nand_ecc_status_sp5}, 68 /* XT26G01B */ 69 {0x0BF1, 4, 64, 1, 1024, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x32, 0xD8, 0x4C, 18, 1, 0xB0, 0x0, {8, 12, 0xff, 0xff}, &sfc_nand_ecc_status_sp4}, 70 /* HYF4GQ4UAACBE */ 71 {0xC9D4, 8, 64, 1, 2048, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x32, 0xD8, 0x4C, 20, 4, 0xB0, 0, {32, 64, 36, 68}, NULL}, 72 /* FM25S01 */ 73 {0xA1A1, 4, 64, 1, 1024, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x32, 0xD8, 0x4C, 18, 1, 0xB0, 0, {0, 4, 0xff, 0xff}, &sfc_nand_ecc_status_sp1}, 74 /* HYF1GQ4UPACAE */ 75 {0xC9A1, 4, 64, 1, 1024, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x32, 0xD8, 0x4C, 18, 4, 0xB0, 0, {4, 20, 0xff, 0xff}, &sfc_nand_ecc_status_sp1}, 76 /* EM73E044SNA-G */ 77 {0xD503, 8, 64, 1, 2048, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x32, 0xD8, 0x4C, 20, 8, 0xB0, 0, {4, 40, 8, 44}, NULL}, 78 /* GD5F2GQ5UEYIG */ 79 {0xC852, 4, 64, 1, 2048, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x32, 0xD8, 0x4C, 19, 4, 0xB0, 0, {4, 20, 0xff, 0xff}, &sfc_nand_ecc_status_sp2}, 80 /* GD5F1GQ4R */ 81 {0xC8C1, 4, 64, 1, 1024, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x32, 0xD8, 0x0C, 18, 8, 0xB0, 0, {4, 8, 0xff, 0xff}, &sfc_nand_ecc_status_sp3}, 82 /* TC58CVG2S0HRAIJ */ 83 {0x98ED, 8, 64, 1, 2048, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x32, 0xD8, 0x0C, 20, 8, 0xFF, 0xFF, {4, 12, 8, 16}, NULL}, 84 /* TC58CVG1S3HRAIJ */ 85 {0x98EB, 4, 64, 1, 2048, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x32, 0xD8, 0x0C, 19, 8, 0xFF, 0xFF, {4, 8, 0xff, 0xff}, NULL}, 86 /* TC58CVG0S3HRAIJ */ 87 {0x98E2, 4, 64, 1, 1024, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x32, 0xD8, 0x0C, 18, 8, 0xFF, 0xFF, {4, 8, 0xff, 0xff}, NULL}, 88 /* XT26G04A */ 89 {0x0BE3, 4, 128, 1, 2048, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x32, 0xD8, 0x4C, 20, 1, 0xB0, 0x0, {8, 12, 0xff, 0xff}, &sfc_nand_ecc_status_sp4}, 90 /* FS35ND01G-S1Y2 */ 91 {0xCDEA, 4, 64, 1, 1024, 0x13, 0x10, 0x03, 0x02, 0x6B, 0x32, 0xD8, 0x4C, 18, 4, 0xFF, 0xFF, {4, 8, 12, 16}, &sfc_nand_ecc_status_sp1}, 92 }; 93 94 static struct nand_info *p_nand_info; 95 static u32 gp_page_buf[SFC_NAND_PAGE_MAX_SIZE / 4]; 96 static struct SFNAND_DEV sfc_nand_dev; 97 98 static struct nand_info *sfc_nand_get_info(u8 *nand_id) 99 { 100 u32 i; 101 u32 id = (nand_id[0] << 8) | (nand_id[1] << 0); 102 103 for (i = 0; i < ARRAY_SIZE(spi_nand_tbl); i++) { 104 if (spi_nand_tbl[i].id == id) 105 return &spi_nand_tbl[i]; 106 } 107 return NULL; 108 } 109 110 static int sfc_nand_write_en(void) 111 { 112 int ret; 113 struct rk_sfc_op op; 114 115 op.sfcmd.d32 = 0; 116 op.sfcmd.b.cmd = CMD_WRITE_EN; 117 118 op.sfctrl.d32 = 0; 119 120 ret = sfc_request(&op, 0, NULL, 0); 121 return ret; 122 } 123 124 static int sfc_nand_rw_preset(void) 125 { 126 int ret; 127 struct rk_sfc_op op; 128 u8 status = 0xFF; 129 130 op.sfcmd.d32 = 0; 131 op.sfcmd.b.cmd = 0; 132 op.sfcmd.b.rw = SFC_WRITE; 133 134 op.sfctrl.d32 = 0; 135 op.sfctrl.b.datalines = 2; 136 137 ret = sfc_request(&op, 0, &status, 1); 138 return ret; 139 } 140 141 static int sfc_nand_read_feature(u8 addr, u8 *data) 142 { 143 int ret; 144 struct rk_sfc_op op; 145 146 op.sfcmd.d32 = 0; 147 op.sfcmd.b.cmd = 0x0F; 148 op.sfcmd.b.addrbits = SFC_ADDR_XBITS; 149 150 op.sfctrl.d32 = 0; 151 op.sfctrl.b.addrbits = 8; 152 153 *data = 0; 154 155 ret = sfc_request(&op, addr, data, 1); 156 if (ret != SFC_OK) 157 return ret; 158 return SFC_OK; 159 } 160 161 static int sfc_nand_write_feature(u32 addr, u8 status) 162 { 163 int ret; 164 struct rk_sfc_op op; 165 166 sfc_nand_write_en(); 167 168 op.sfcmd.d32 = 0; 169 op.sfcmd.b.cmd = 0x1F; 170 op.sfcmd.b.addrbits = SFC_ADDR_XBITS; 171 op.sfcmd.b.rw = SFC_WRITE; 172 173 op.sfctrl.d32 = 0; 174 op.sfctrl.b.addrbits = 8; 175 176 ret = sfc_request(&op, addr, &status, 1); 177 if (ret != SFC_OK) 178 return ret; 179 return ret; 180 } 181 182 static int sfc_nand_wait_busy(u8 *data, int timeout) 183 { 184 int ret; 185 int i; 186 u8 status; 187 188 *data = 0; 189 for (i = 0; i < timeout; i++) { 190 ret = sfc_nand_read_feature(0xC0, &status); 191 if (ret != SFC_OK) 192 return ret; 193 *data = status; 194 if (!(status & (1 << 0))) 195 return SFC_OK; 196 sfc_delay(1); 197 } 198 return -1; 199 } 200 201 /* 202 * ecc default: 203 * ecc bits: 0xC0[4,5] 204 * 0x00, No bit errors were detected 205 * 0x01, Bit errors were detected and corrected. 206 * 0x10, Multiple bit errors were detected and not corrected. 207 * 0x11, Bits errors were detected and corrected, bit error count 208 * exceed the bit flip detection threshold 209 */ 210 static u32 sfc_nand_ecc_status(void) 211 { 212 u32 ret; 213 u32 i; 214 u8 ecc; 215 u8 status; 216 u32 timeout = 1000 * 1000; 217 218 for (i = 0; i < timeout; i++) { 219 ret = sfc_nand_read_feature(0xC0, &status); 220 if (ret != SFC_OK) 221 return SFC_NAND_ECC_ERROR; 222 if (!(status & (1 << 0))) 223 break; 224 sfc_delay(1); 225 } 226 227 ecc = (status >> 4) & 0x03; 228 229 if (ecc <= 1) 230 ret = SFC_NAND_ECC_OK; 231 else if (ecc == 2) 232 ret = (u32)SFC_NAND_ECC_ERROR; 233 else 234 ret = SFC_NAND_ECC_REFRESH; 235 236 return ret; 237 } 238 239 /* 240 * ecc spectial type1: 241 * ecc bits: 0xC0[4,5] 242 * 0x00, No bit errors were detected; 243 * 0x01, Bits errors were detected and corrected, bit error count 244 * may reach the bit flip detection threshold; 245 * 0x10, Multiple bit errors were detected and not corrected; 246 * 0x11, Reserved. 247 */ 248 u32 sfc_nand_ecc_status_sp1(void) 249 { 250 u32 ret; 251 u32 i; 252 u8 ecc; 253 u8 status; 254 u32 timeout = 1000 * 1000; 255 256 for (i = 0; i < timeout; i++) { 257 ret = sfc_nand_read_feature(0xC0, &status); 258 if (ret != SFC_OK) 259 return SFC_NAND_ECC_ERROR; 260 if (!(status & (1 << 0))) 261 break; 262 sfc_delay(1); 263 } 264 265 ecc = (status >> 4) & 0x03; 266 267 if (ecc == 0) 268 ret = SFC_NAND_ECC_OK; 269 else if (ecc == 1) 270 ret = SFC_NAND_ECC_REFRESH; 271 else 272 ret = (u32)SFC_NAND_ECC_ERROR; 273 274 return ret; 275 } 276 277 /* 278 * ecc spectial type2: 279 * ecc bits: 0xC0[4,5] 0xF0[4,5] 280 * [0x0000, 0x0011], No bit errors were detected; 281 * [0x0100, 0x0111], Bit errors were detected and corrected. Not 282 * reach Flipping Bits; 283 * [0x1000, 0x1011], Multiple bit errors were detected and 284 * not corrected. 285 * [0x1100, 0x1111], reserved. 286 */ 287 u32 sfc_nand_ecc_status_sp2(void) 288 { 289 u32 ret; 290 u32 i; 291 u8 ecc; 292 u8 status, status1; 293 u32 timeout = 1000 * 1000; 294 295 for (i = 0; i < timeout; i++) { 296 ret = sfc_nand_read_feature(0xC0, &status); 297 if (ret != SFC_OK) 298 return SFC_NAND_ECC_ERROR; 299 ret = sfc_nand_read_feature(0xF0, &status1); 300 if (ret != SFC_OK) 301 return SFC_NAND_ECC_ERROR; 302 if (!(status & (1 << 0))) 303 break; 304 sfc_delay(1); 305 } 306 307 ecc = (status >> 4) & 0x03; 308 ecc = (ecc << 2) | ((status1 >> 4) & 0x03); 309 if (ecc < 7) 310 ret = SFC_NAND_ECC_OK; 311 else if (ecc == 7) 312 ret = SFC_NAND_ECC_REFRESH; 313 else 314 ret = (u32)SFC_NAND_ECC_ERROR; 315 316 return ret; 317 } 318 319 /* 320 * ecc spectial type3: 321 * ecc bits: 0xC0[4,5] 0xF0[4,5] 322 * [0x0000, 0x0011], No bit errors were detected; 323 * [0x0100, 0x0111], Bit errors were detected and corrected. Not 324 * reach Flipping Bits; 325 * [0x1000, 0x1011], Multiple bit errors were detected and 326 * not corrected. 327 * [0x1100, 0x1111], Bit error count equals the bit flip 328 * detectio nthreshold 329 */ 330 u32 sfc_nand_ecc_status_sp3(void) 331 { 332 u32 ret; 333 u32 i; 334 u8 ecc; 335 u8 status, status1; 336 u32 timeout = 1000 * 1000; 337 338 for (i = 0; i < timeout; i++) { 339 ret = sfc_nand_read_feature(0xC0, &status); 340 if (ret != SFC_OK) 341 return SFC_NAND_ECC_ERROR; 342 ret = sfc_nand_read_feature(0xF0, &status1); 343 if (ret != SFC_OK) 344 return SFC_NAND_ECC_ERROR; 345 if (!(status & (1 << 0))) 346 break; 347 sfc_delay(1); 348 } 349 350 ecc = (status >> 4) & 0x03; 351 ecc = (ecc << 2) | ((status1 >> 4) & 0x03); 352 if (ecc < 7) 353 ret = SFC_NAND_ECC_OK; 354 else if (ecc == 7 || ecc >= 12) 355 ret = SFC_NAND_ECC_REFRESH; 356 else 357 ret = (u32)SFC_NAND_ECC_ERROR; 358 359 return ret; 360 } 361 362 /* 363 * ecc spectial type4: 364 * ecc bits: 0xC0[2,5] 365 * [0x0000], No bit errors were detected; 366 * [0x0001, 0x0111], Bit errors were detected and corrected. Not 367 * reach Flipping Bits; 368 * [0x1000], Multiple bit errors were detected and 369 * not corrected. 370 * [0x1100], Bit error count equals the bit flip 371 * detectionthreshold 372 * else, reserved 373 */ 374 u32 sfc_nand_ecc_status_sp4(void) 375 { 376 u32 ret; 377 u32 i; 378 u8 ecc; 379 u8 status; 380 u32 timeout = 1000 * 1000; 381 382 for (i = 0; i < timeout; i++) { 383 ret = sfc_nand_read_feature(0xC0, &status); 384 if (ret != SFC_OK) 385 return SFC_NAND_ECC_ERROR; 386 if (!(status & (1 << 0))) 387 break; 388 sfc_delay(1); 389 } 390 391 ecc = (status >> 2) & 0x0f; 392 if (ecc < 7) 393 ret = SFC_NAND_ECC_OK; 394 else if (ecc == 7 || ecc == 12) 395 ret = SFC_NAND_ECC_REFRESH; 396 else 397 ret = (u32)SFC_NAND_ECC_ERROR; 398 399 return ret; 400 } 401 402 /* 403 * ecc spectial type5: 404 * ecc bits: 0xC0[4,6] 405 * [0x0], No bit errors were detected; 406 * [0x001, 0x011], Bit errors were detected and corrected. Not 407 * reach Flipping Bits; 408 * [0x100], Bit error count equals the bit flip 409 * detectionthreshold 410 * [0x101, 0x110], Reserved; 411 * [0x111], Multiple bit errors were detected and 412 * not corrected. 413 */ 414 u32 sfc_nand_ecc_status_sp5(void) 415 { 416 u32 ret; 417 u32 i; 418 u8 ecc; 419 u8 status; 420 u32 timeout = 1000 * 1000; 421 422 for (i = 0; i < timeout; i++) { 423 ret = sfc_nand_read_feature(0xC0, &status); 424 if (ret != SFC_OK) 425 return SFC_NAND_ECC_ERROR; 426 if (!(status & (1 << 0))) 427 break; 428 sfc_delay(1); 429 } 430 431 ecc = (status >> 4) & 0x07; 432 if (ecc < 4) 433 ret = SFC_NAND_ECC_OK; 434 else if (ecc == 4) 435 ret = SFC_NAND_ECC_REFRESH; 436 else 437 ret = (u32)SFC_NAND_ECC_ERROR; 438 439 return ret; 440 } 441 442 u32 sfc_nand_erase_block(u8 cs, u32 addr) 443 { 444 int ret; 445 struct rk_sfc_op op; 446 u8 status; 447 448 rkflash_print_dio("%s %x\n", __func__, addr); 449 op.sfcmd.d32 = 0; 450 op.sfcmd.b.cmd = p_nand_info->block_erase_cmd; 451 op.sfcmd.b.addrbits = SFC_ADDR_24BITS; 452 op.sfcmd.b.rw = SFC_WRITE; 453 454 op.sfctrl.d32 = 0; 455 456 sfc_nand_write_en(); 457 ret = sfc_request(&op, addr, NULL, 0); 458 if (ret != SFC_OK) 459 return ret; 460 ret = sfc_nand_wait_busy(&status, 1000 * 1000); 461 if (status & (1 << 2)) 462 return SFC_NAND_PROG_ERASE_ERROR; 463 464 return ret; 465 } 466 467 static u32 sfc_nand_prog_page_raw(u8 cs, u32 addr, u32 *p_page_buf) 468 { 469 int ret; 470 u32 plane; 471 struct rk_sfc_op op; 472 u8 status; 473 u32 page_size = SFC_NAND_SECTOR_FULL_SIZE * p_nand_info->sec_per_page; 474 475 rkflash_print_dio("%s %x %x\n", __func__, addr, p_page_buf[0]); 476 sfc_nand_write_en(); 477 if (sfc_nand_dev.prog_lines == DATA_LINES_X4 && 478 p_nand_info->feature & FEA_SOFT_QOP_BIT && 479 sfc_get_version() < SFC_VER_3) 480 sfc_nand_rw_preset(); 481 482 op.sfcmd.d32 = 0; 483 op.sfcmd.b.cmd = sfc_nand_dev.page_prog_cmd; 484 op.sfcmd.b.addrbits = SFC_ADDR_XBITS; 485 op.sfcmd.b.rw = SFC_WRITE; 486 487 op.sfctrl.d32 = 0; 488 op.sfctrl.b.datalines = sfc_nand_dev.prog_lines; 489 op.sfctrl.b.addrbits = 16; 490 plane = p_nand_info->plane_per_die == 2 ? ((addr >> 6) & 0x1) << 12 : 0; 491 sfc_request(&op, plane, p_page_buf, page_size); 492 493 op.sfcmd.d32 = 0; 494 op.sfcmd.b.cmd = p_nand_info->page_prog_cmd; 495 op.sfcmd.b.addrbits = SFC_ADDR_24BITS; 496 op.sfcmd.b.rw = SFC_WRITE; 497 498 op.sfctrl.d32 = 0; 499 ret = sfc_request(&op, addr, p_page_buf, 0); 500 if (ret != SFC_OK) 501 return ret; 502 ret = sfc_nand_wait_busy(&status, 1000 * 1000); 503 if (status & (1 << 3)) 504 return SFC_NAND_PROG_ERASE_ERROR; 505 506 return ret; 507 } 508 509 u32 sfc_nand_prog_page(u8 cs, u32 addr, u32 *p_data, u32 *p_spare) 510 { 511 int ret; 512 u32 sec_per_page = p_nand_info->sec_per_page; 513 u32 data_size = sec_per_page * SFC_NAND_SECTOR_SIZE; 514 struct nand_mega_area *meta = &p_nand_info->meta; 515 516 memcpy(gp_page_buf, p_data, data_size); 517 memset(&gp_page_buf[data_size / 4], 0xff, sec_per_page * 16); 518 gp_page_buf[(data_size + meta->off0) / 4] = p_spare[0]; 519 gp_page_buf[(data_size + meta->off1) / 4] = p_spare[1]; 520 if (sec_per_page == 8) { 521 gp_page_buf[(data_size + meta->off2) / 4] = p_spare[2]; 522 gp_page_buf[(data_size + meta->off3) / 4] = p_spare[3]; 523 } 524 ret = sfc_nand_prog_page_raw(cs, addr, gp_page_buf); 525 526 return ret; 527 } 528 529 static u32 sfc_nand_read_page_raw(u8 cs, u32 addr, u32 *p_page_buf) 530 { 531 int ret; 532 u32 plane; 533 struct rk_sfc_op op; 534 u32 ecc_result; 535 u32 page_size = SFC_NAND_SECTOR_FULL_SIZE * p_nand_info->sec_per_page; 536 u8 status; 537 538 op.sfcmd.d32 = 0; 539 op.sfcmd.b.cmd = p_nand_info->page_read_cmd; 540 op.sfcmd.b.rw = SFC_WRITE; 541 op.sfcmd.b.addrbits = SFC_ADDR_24BITS; 542 543 op.sfctrl.d32 = 0; 544 545 sfc_request(&op, addr, p_page_buf, 0); 546 if (sfc_nand_dev.read_lines == DATA_LINES_X4 && 547 p_nand_info->feature & FEA_SOFT_QOP_BIT && 548 sfc_get_version() < SFC_VER_3) 549 sfc_nand_rw_preset(); 550 551 sfc_nand_wait_busy(&status, 1000 * 1000); 552 if (p_nand_info->ecc_status) 553 ecc_result = p_nand_info->ecc_status(); 554 else 555 ecc_result = sfc_nand_ecc_status(); 556 557 op.sfcmd.d32 = 0; 558 op.sfcmd.b.cmd = sfc_nand_dev.page_read_cmd; 559 op.sfcmd.b.addrbits = SFC_ADDR_24BITS; 560 561 op.sfctrl.d32 = 0; 562 op.sfctrl.b.datalines = sfc_nand_dev.read_lines; 563 564 plane = p_nand_info->plane_per_die == 2 ? ((addr >> 6) & 0x1) << 12 : 0; 565 ret = sfc_request(&op, plane << 8, p_page_buf, page_size); 566 rkflash_print_dio("%s %x %x\n", __func__, addr, p_page_buf[0]); 567 568 if (ret != SFC_OK) 569 return SFC_NAND_ECC_ERROR; 570 571 return ecc_result; 572 } 573 574 u32 sfc_nand_read_page(u8 cs, u32 addr, u32 *p_data, u32 *p_spare) 575 { 576 u32 ret; 577 u32 sec_per_page = p_nand_info->sec_per_page; 578 u32 data_size = sec_per_page * SFC_NAND_SECTOR_SIZE; 579 struct nand_mega_area *meta = &p_nand_info->meta; 580 581 ret = sfc_nand_read_page_raw(cs, addr, gp_page_buf); 582 memcpy(p_data, gp_page_buf, data_size); 583 p_spare[0] = gp_page_buf[(data_size + meta->off0) / 4]; 584 p_spare[1] = gp_page_buf[(data_size + meta->off1) / 4]; 585 if (p_nand_info->sec_per_page == 8) { 586 p_spare[2] = gp_page_buf[(data_size + meta->off2) / 4]; 587 p_spare[3] = gp_page_buf[(data_size + meta->off3) / 4]; 588 } 589 590 if (ret != SFC_NAND_ECC_OK) { 591 rkflash_print_error("%s[0x%x], ret=0x%x\n", __func__, addr, ret); 592 if (p_data) 593 rkflash_print_hex("data:", p_data, 4, 8); 594 if (p_spare) 595 rkflash_print_hex("spare:", p_spare, 4, 2); 596 } 597 598 return ret; 599 } 600 601 u32 sfc_nand_check_bad_block(u8 cs, u32 addr) 602 { 603 u32 ret; 604 u32 data_size = p_nand_info->sec_per_page * SFC_NAND_SECTOR_SIZE; 605 606 ret = sfc_nand_read_page_raw(cs, addr, gp_page_buf); 607 if (ret == SFC_NAND_ECC_ERROR) 608 return true; 609 /* Original bad block */ 610 if ((gp_page_buf[data_size / 4] & 0xFF) != 0xFF) 611 return true; 612 613 return false; 614 } 615 616 u32 sfc_nand_mark_bad_block(u8 cs, u32 addr) 617 { 618 u32 ret; 619 u32 data_size = p_nand_info->sec_per_page * SFC_NAND_SECTOR_SIZE; 620 621 ret = sfc_nand_read_page_raw(cs, addr, gp_page_buf); 622 if (ret) 623 return SFC_NAND_HW_ERROR; 624 gp_page_buf[data_size / 4] = 0x0; 625 ret = sfc_nand_prog_page_raw(cs, addr, gp_page_buf); 626 if (ret) 627 return SFC_NAND_HW_ERROR; 628 629 return ret; 630 } 631 632 int sfc_nand_read_id(u8 *data) 633 { 634 int ret; 635 struct rk_sfc_op op; 636 637 op.sfcmd.d32 = 0; 638 op.sfcmd.b.cmd = CMD_READ_JEDECID; 639 op.sfcmd.b.addrbits = SFC_ADDR_XBITS; 640 641 op.sfctrl.d32 = 0; 642 op.sfctrl.b.addrbits = 8; 643 644 ret = sfc_request(&op, 0, data, 3); 645 646 return ret; 647 } 648 649 /* 650 * Read the 1st page's 1st byte of a phy_blk 651 * If not FF, it's bad blk 652 */ 653 static int sfc_nand_get_bad_block_list(u16 *table, u32 die) 654 { 655 u16 blk; 656 u32 bad_cnt, page; 657 u32 blk_per_die; 658 u32 *pread; 659 u32 *pspare_read; 660 661 rkflash_print_info("%s\n", __func__); 662 pread = ftl_malloc(SFC_NAND_PAGE_MAX_SIZE); 663 pspare_read = ftl_malloc(8); 664 if (!pread || !pspare_read) { 665 kfree(pread); 666 kfree(pspare_read); 667 return -1; 668 } 669 670 bad_cnt = 0; 671 blk_per_die = p_nand_info->plane_per_die * 672 p_nand_info->blk_per_plane; 673 for (blk = 0; blk < blk_per_die; blk++) { 674 page = (blk + blk_per_die * die) * 675 p_nand_info->page_per_blk; 676 sfc_nand_read_page(0, page, pread, pspare_read); 677 678 if (pread[0] != 0xFFFFFFFF || 679 pspare_read[0] != 0xFFFFFFFF) { 680 table[bad_cnt++] = blk; 681 rkflash_print_error("die[%d], bad_blk[%d]\n", die, blk); 682 } 683 } 684 ftl_free(pread); 685 ftl_free(pspare_read); 686 687 return (int)bad_cnt; 688 } 689 690 void sfc_nand_ftl_ops_init(void) 691 { 692 /* para init */ 693 g_nand_phy_info.nand_type = 1; 694 g_nand_phy_info.die_num = 1; 695 g_nand_phy_info.plane_per_die = p_nand_info->plane_per_die; 696 g_nand_phy_info.blk_per_plane = p_nand_info->blk_per_plane; 697 g_nand_phy_info.page_per_blk = p_nand_info->page_per_blk; 698 g_nand_phy_info.page_per_slc_blk = p_nand_info->page_per_blk; 699 g_nand_phy_info.byte_per_sec = SFC_NAND_SECTOR_SIZE; 700 g_nand_phy_info.sec_per_page = p_nand_info->sec_per_page; 701 g_nand_phy_info.sec_per_blk = p_nand_info->sec_per_page * 702 p_nand_info->page_per_blk; 703 g_nand_phy_info.reserved_blk = 8; 704 g_nand_phy_info.blk_per_die = p_nand_info->plane_per_die * 705 p_nand_info->blk_per_plane; 706 g_nand_phy_info.ecc_bits = p_nand_info->max_ecc_bits; 707 708 /* driver register */ 709 g_nand_ops.get_bad_blk_list = sfc_nand_get_bad_block_list; 710 g_nand_ops.erase_blk = sfc_nand_erase_block; 711 g_nand_ops.prog_page = sfc_nand_prog_page; 712 g_nand_ops.read_page = sfc_nand_read_page; 713 g_nand_ops.bch_sel = NULL; 714 } 715 716 static int sfc_nand_enable_QE(void) 717 { 718 int ret = SFC_OK; 719 u8 status; 720 int bit_offset = p_nand_info->QE_bits; 721 722 if (bit_offset == 0xFF) 723 return SFC_OK; 724 725 ret = sfc_nand_read_feature(p_nand_info->QE_address, &status); 726 if (ret != SFC_OK) 727 return ret; 728 729 if (status & (1 << bit_offset)) /* is QE bit set */ 730 return SFC_OK; 731 732 status |= (1 << bit_offset); 733 return sfc_nand_write_feature(p_nand_info->QE_address, status); 734 735 return ret; 736 } 737 738 u32 sfc_nand_init(void) 739 { 740 u8 status, id_byte[8]; 741 742 sfc_nand_read_id(id_byte); 743 rkflash_print_error("sfc_nand id: %x %x %x\n", 744 id_byte[0], id_byte[1], id_byte[2]); 745 if (id_byte[0] == 0xFF || id_byte[0] == 0x00) 746 return (u32)FTL_NO_FLASH; 747 748 p_nand_info = sfc_nand_get_info(id_byte); 749 if (!p_nand_info) 750 return (u32)FTL_UNSUPPORTED_FLASH; 751 752 sfc_nand_dev.manufacturer = id_byte[0]; 753 sfc_nand_dev.mem_type = id_byte[1]; 754 sfc_nand_dev.capacity = p_nand_info->density; 755 sfc_nand_dev.block_size = p_nand_info->page_per_blk * p_nand_info->sec_per_page; 756 sfc_nand_dev.page_size = p_nand_info->sec_per_page; 757 758 /* disable block lock */ 759 sfc_nand_write_feature(0xA0, 0); 760 sfc_nand_dev.read_lines = DATA_LINES_X1; 761 sfc_nand_dev.prog_lines = DATA_LINES_X1; 762 sfc_nand_dev.page_read_cmd = p_nand_info->read_cache_cmd_1; 763 sfc_nand_dev.page_prog_cmd = p_nand_info->prog_cache_cmd_1; 764 if (p_nand_info->feature & FEA_4BIT_READ) { 765 if (sfc_nand_enable_QE() == SFC_OK) { 766 sfc_nand_dev.read_lines = DATA_LINES_X4; 767 sfc_nand_dev.page_read_cmd = 768 p_nand_info->read_cache_cmd_4; 769 } 770 } 771 772 if (p_nand_info->feature & FEA_4BIT_PROG && 773 sfc_nand_dev.read_lines == DATA_LINES_X4) { 774 sfc_nand_dev.prog_lines = DATA_LINES_X4; 775 sfc_nand_dev.page_prog_cmd = p_nand_info->prog_cache_cmd_4; 776 } 777 778 sfc_nand_read_feature(0xA0, &status); 779 rkflash_print_info("sfc_nand A0 = 0x%x\n", status); 780 sfc_nand_read_feature(0xB0, &status); 781 rkflash_print_info("sfc_nand B0 = 0x%x\n", status); 782 rkflash_print_info("read_lines = %x\n", sfc_nand_dev.read_lines); 783 rkflash_print_info("prog_lines = %x\n", sfc_nand_dev.prog_lines); 784 rkflash_print_info("page_read_cmd = %x\n", sfc_nand_dev.page_read_cmd); 785 rkflash_print_info("page_prog_cmd = %x\n", sfc_nand_dev.page_prog_cmd); 786 787 return SFC_OK; 788 } 789 790 void sfc_nand_deinit(void) 791 { 792 /* to-do */ 793 } 794 795 struct SFNAND_DEV *sfc_nand_get_private_dev(void) 796 { 797 return &sfc_nand_dev; 798 } 799 800