1 /* 2 * (C) Copyright 2019 Rockchip Electronics Co., Ltd 3 * 4 * SPDX-License-Identifier: GPL-2.0+ 5 */ 6 7 #include <common.h> 8 #include <blk.h> 9 #include <boot_rkimg.h> 10 #include <dm.h> 11 #include <errno.h> 12 #include <malloc.h> 13 #include <nand.h> 14 #include <part.h> 15 #include <dm/device-internal.h> 16 17 #define MTD_PART_NAND_HEAD "mtdparts=" 18 #define MTD_PART_INFO_MAX_SIZE 512 19 #define MTD_SINGLE_PART_INFO_MAX_SIZE 40 20 21 #define MTD_BLK_TABLE_BLOCK_UNKNOWN (-2) 22 #define MTD_BLK_TABLE_BLOCK_SHIFT (-1) 23 24 static int *mtd_map_blk_table; 25 26 int mtd_blk_map_table_init(struct blk_desc *desc, 27 loff_t offset, 28 size_t length) 29 { 30 u32 blk_total, blk_begin, blk_cnt; 31 struct mtd_info *mtd = NULL; 32 int i, j; 33 34 if (!desc) 35 return -ENODEV; 36 37 if (desc->devnum == BLK_MTD_NAND) { 38 #if defined(CONFIG_NAND) && !defined(CONFIG_SPL_BUILD) 39 mtd = dev_get_priv(desc->bdev->parent); 40 #endif 41 } else if (desc->devnum == BLK_MTD_SPI_NAND) { 42 #if defined(CONFIG_MTD_SPI_NAND) && !defined(CONFIG_SPL_BUILD) 43 mtd = desc->bdev->priv; 44 #endif 45 } 46 47 if (!mtd) { 48 return -ENODEV; 49 } else { 50 blk_total = (mtd->size + mtd->erasesize - 1) >> mtd->erasesize_shift; 51 if (!mtd_map_blk_table) { 52 mtd_map_blk_table = (int *)malloc(blk_total * 4); 53 memset(mtd_map_blk_table, MTD_BLK_TABLE_BLOCK_UNKNOWN, 54 blk_total * 4); 55 } 56 57 blk_begin = (u32)offset >> mtd->erasesize_shift; 58 blk_cnt = ((u32)((offset & mtd->erasesize_mask) + length) >> mtd->erasesize_shift); 59 if ((blk_begin + blk_cnt) > blk_total) 60 blk_cnt = blk_total - blk_begin; 61 62 if (mtd_map_blk_table[blk_begin] != MTD_BLK_TABLE_BLOCK_UNKNOWN) 63 return 0; 64 65 j = 0; 66 /* should not across blk_cnt */ 67 for (i = 0; i < blk_cnt; i++) { 68 if (j >= blk_cnt) 69 mtd_map_blk_table[blk_begin + i] = MTD_BLK_TABLE_BLOCK_SHIFT; 70 for (; j < blk_cnt; j++) { 71 if (!mtd_block_isbad(mtd, (blk_begin + j) << mtd->erasesize_shift)) { 72 mtd_map_blk_table[blk_begin + i] = blk_begin + j; 73 j++; 74 if (j == blk_cnt) 75 j++; 76 break; 77 } 78 } 79 } 80 81 return 0; 82 } 83 } 84 85 static bool get_mtd_blk_map_address(struct mtd_info *mtd, loff_t *off) 86 { 87 bool mapped; 88 loff_t offset = *off; 89 size_t block_offset = offset & (mtd->erasesize - 1); 90 91 mapped = false; 92 if (!mtd_map_blk_table || 93 mtd_map_blk_table[(u64)offset >> mtd->erasesize_shift] == 94 MTD_BLK_TABLE_BLOCK_UNKNOWN || 95 mtd_map_blk_table[(u64)offset >> mtd->erasesize_shift] == 96 0xffffffff) 97 return mapped; 98 99 mapped = true; 100 *off = (loff_t)(((u32)mtd_map_blk_table[(u64)offset >> 101 mtd->erasesize_shift] << mtd->erasesize_shift) + block_offset); 102 103 return mapped; 104 } 105 106 void mtd_blk_map_partitions(struct blk_desc *desc) 107 { 108 disk_partition_t info; 109 int i, ret; 110 111 if (!desc) 112 return; 113 114 if (desc->if_type != IF_TYPE_MTD) 115 return; 116 117 for (i = 1; i < MAX_SEARCH_PARTITIONS; i++) { 118 ret = part_get_info(desc, i, &info); 119 if (ret != 0) 120 continue; 121 122 if (mtd_blk_map_table_init(desc, 123 info.start << 9, 124 info.size << 9)) { 125 printf("mtd block map table fail\n"); 126 } 127 } 128 } 129 130 static __maybe_unused int mtd_map_read(struct mtd_info *mtd, loff_t offset, 131 size_t *length, size_t *actual, 132 loff_t lim, u_char *buffer) 133 { 134 size_t left_to_read = *length; 135 u_char *p_buffer = buffer; 136 u32 erasesize = mtd->erasesize; 137 int rval; 138 139 while (left_to_read > 0) { 140 size_t block_offset = offset & (erasesize - 1); 141 size_t read_length; 142 143 if (offset >= mtd->size) 144 return 0; 145 146 if (!get_mtd_blk_map_address(mtd, &offset)) { 147 if (mtd_block_isbad(mtd, offset & ~(erasesize - 1))) { 148 printf("Skip bad block 0x%08llx\n", 149 offset & ~(erasesize - 1)); 150 offset += erasesize - block_offset; 151 continue; 152 } 153 } 154 155 if (left_to_read < (erasesize - block_offset)) 156 read_length = left_to_read; 157 else 158 read_length = erasesize - block_offset; 159 160 rval = mtd_read(mtd, offset, read_length, &read_length, 161 p_buffer); 162 if (rval && rval != -EUCLEAN) { 163 printf("NAND read from offset %llx failed %d\n", 164 offset, rval); 165 *length -= left_to_read; 166 return rval; 167 } 168 169 left_to_read -= read_length; 170 offset += read_length; 171 p_buffer += read_length; 172 } 173 174 return 0; 175 } 176 177 char *mtd_part_parse(void) 178 { 179 char mtd_part_info_temp[MTD_SINGLE_PART_INFO_MAX_SIZE] = {0}; 180 u32 length, data_len = MTD_PART_INFO_MAX_SIZE; 181 struct blk_desc *dev_desc; 182 disk_partition_t info; 183 char *mtd_part_info_p; 184 struct mtd_info *mtd; 185 char *mtd_part_info; 186 int ret; 187 int p; 188 189 dev_desc = rockchip_get_bootdev(); 190 if (!dev_desc) 191 return NULL; 192 193 mtd = (struct mtd_info *)dev_desc->bdev->priv; 194 if (!mtd) 195 return NULL; 196 197 mtd_part_info = (char *)calloc(MTD_PART_INFO_MAX_SIZE, sizeof(char)); 198 if (!mtd_part_info) { 199 printf("%s: Fail to malloc!", __func__); 200 return NULL; 201 } 202 203 mtd_part_info_p = mtd_part_info; 204 snprintf(mtd_part_info_p, data_len - 1, "%s%s:", 205 MTD_PART_NAND_HEAD, 206 dev_desc->product); 207 data_len -= strlen(mtd_part_info_p); 208 mtd_part_info_p = mtd_part_info_p + strlen(mtd_part_info_p); 209 210 for (p = 1; p < MAX_SEARCH_PARTITIONS; p++) { 211 ret = part_get_info(dev_desc, p, &info); 212 if (ret) 213 break; 214 215 debug("name is %s, start addr is %x\n", info.name, 216 (int)(size_t)info.start); 217 218 snprintf(mtd_part_info_p, data_len - 1, "0x%x@0x%x(%s)", 219 (int)(size_t)info.size << 9, 220 (int)(size_t)info.start << 9, 221 info.name); 222 snprintf(mtd_part_info_temp, MTD_SINGLE_PART_INFO_MAX_SIZE - 1, 223 "0x%x@0x%x(%s)", 224 (int)(size_t)info.size << 9, 225 (int)(size_t)info.start << 9, 226 info.name); 227 strcat(mtd_part_info, ","); 228 if (part_get_info(dev_desc, p + 1, &info)) { 229 /* Nand flash is erased by block and gpt table just 230 * resserve 33 sectors for the last partition. This 231 * will erase the backup gpt table by user program, 232 * so reserve one block. 233 */ 234 snprintf(mtd_part_info_p, data_len - 1, "0x%x@0x%x(%s)", 235 (int)(size_t)(info.size - 236 (info.size - 1) % 237 (mtd->erasesize >> 9) - 1) << 9, 238 (int)(size_t)info.start << 9, 239 info.name); 240 break; 241 } 242 length = strlen(mtd_part_info_temp); 243 data_len -= length; 244 mtd_part_info_p = mtd_part_info_p + length + 1; 245 memset(mtd_part_info_temp, 0, MTD_SINGLE_PART_INFO_MAX_SIZE); 246 } 247 248 return mtd_part_info; 249 } 250 251 ulong mtd_dread(struct udevice *udev, lbaint_t start, 252 lbaint_t blkcnt, void *dst) 253 { 254 struct blk_desc *desc = dev_get_uclass_platdata(udev); 255 #if defined(CONFIG_NAND) || defined(CONFIG_MTD_SPI_NAND) || defined(CONFIG_SPI_FLASH_MTD) 256 loff_t off = (loff_t)(start * 512); 257 size_t rwsize = blkcnt * 512; 258 #endif 259 struct mtd_info *mtd; 260 int ret = 0; 261 262 if (!desc) 263 return ret; 264 265 mtd = desc->bdev->priv; 266 if (!mtd) 267 return 0; 268 269 if (blkcnt == 0) 270 return 0; 271 272 if (desc->devnum == BLK_MTD_NAND) { 273 #if defined(CONFIG_NAND) && !defined(CONFIG_SPL_BUILD) 274 mtd = dev_get_priv(udev->parent); 275 if (!mtd) 276 return 0; 277 278 ret = nand_read_skip_bad(mtd, off, &rwsize, 279 NULL, mtd->size, 280 (u_char *)(dst)); 281 #else 282 ret = mtd_map_read(mtd, off, &rwsize, 283 NULL, mtd->size, 284 (u_char *)(dst)); 285 #endif 286 if (!ret) 287 return blkcnt; 288 else 289 return 0; 290 } else if (desc->devnum == BLK_MTD_SPI_NAND) { 291 ret = mtd_map_read(mtd, off, &rwsize, 292 NULL, mtd->size, 293 (u_char *)(dst)); 294 if (!ret) 295 return blkcnt; 296 else 297 return 0; 298 } else if (desc->devnum == BLK_MTD_SPI_NOR) { 299 #if defined(CONFIG_SPI_FLASH_MTD) || defined(CONFIG_SPL_BUILD) 300 size_t retlen_nor; 301 302 mtd_read(mtd, off, rwsize, &retlen_nor, dst); 303 if (retlen_nor == rwsize) 304 return blkcnt; 305 else 306 #endif 307 return 0; 308 } else { 309 return 0; 310 } 311 } 312 313 ulong mtd_dwrite(struct udevice *udev, lbaint_t start, 314 lbaint_t blkcnt, const void *src) 315 { 316 /* Not implemented */ 317 return 0; 318 } 319 320 ulong mtd_derase(struct udevice *udev, lbaint_t start, 321 lbaint_t blkcnt) 322 { 323 /* Not implemented */ 324 return 0; 325 } 326 327 static int mtd_blk_probe(struct udevice *udev) 328 { 329 struct mtd_info *mtd = dev_get_uclass_priv(udev->parent); 330 struct blk_desc *desc = dev_get_uclass_platdata(udev); 331 int ret, i; 332 333 desc->bdev->priv = mtd; 334 sprintf(desc->vendor, "0x%.4x", 0x2207); 335 memcpy(desc->product, mtd->name, strlen(mtd->name)); 336 memcpy(desc->revision, "V1.00", sizeof("V1.00")); 337 if (mtd->type == MTD_NANDFLASH) { 338 if (desc->devnum == BLK_MTD_NAND) 339 mtd = dev_get_priv(udev->parent); 340 /* 341 * Find the first useful block in the end, 342 * and it is the end lba of the nand storage. 343 */ 344 for (i = 0; i < (mtd->size / mtd->erasesize); i++) { 345 ret = mtd_block_isbad(mtd, 346 mtd->size - mtd->erasesize * (i + 1)); 347 if (!ret) { 348 desc->lba = (mtd->size >> 9) - 349 (mtd->erasesize >> 9) * i; 350 break; 351 } 352 } 353 } else { 354 desc->lba = mtd->size >> 9; 355 } 356 357 debug("MTD: desc->lba is %lx\n", desc->lba); 358 359 return 0; 360 } 361 362 static const struct blk_ops mtd_blk_ops = { 363 .read = mtd_dread, 364 #ifndef CONFIG_SPL_BUILD 365 .write = mtd_dwrite, 366 .erase = mtd_derase, 367 #endif 368 }; 369 370 U_BOOT_DRIVER(mtd_blk) = { 371 .name = "mtd_blk", 372 .id = UCLASS_BLK, 373 .ops = &mtd_blk_ops, 374 .probe = mtd_blk_probe, 375 }; 376