1 /* 2 * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <arch_helpers.h> 8 #include <debug.h> 9 #include <io/io_block.h> 10 #include <mmio.h> 11 #include <platform_def.h> 12 #include <sys/types.h> 13 #include <utils_def.h> 14 15 #include "uniphier.h" 16 17 #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) 18 19 #define NAND_CMD_READ0 0 20 #define NAND_CMD_READSTART 0x30 21 22 #define DENALI_ECC_ENABLE 0x0e0 23 #define DENALI_PAGES_PER_BLOCK 0x150 24 #define DENALI_DEVICE_MAIN_AREA_SIZE 0x170 25 #define DENALI_DEVICE_SPARE_AREA_SIZE 0x180 26 #define DENALI_TWO_ROW_ADDR_CYCLES 0x190 27 #define DENALI_INTR_STATUS0 0x410 28 #define DENALI_INTR_ECC_UNCOR_ERR BIT(1) 29 #define DENALI_INTR_DMA_CMD_COMP BIT(2) 30 #define DENALI_INTR_INT_ACT BIT(12) 31 32 #define DENALI_DMA_ENABLE 0x700 33 34 #define DENALI_HOST_ADDR 0x00 35 #define DENALI_HOST_DATA 0x10 36 37 #define DENALI_MAP01 (1 << 26) 38 #define DENALI_MAP10 (2 << 26) 39 #define DENALI_MAP11 (3 << 26) 40 41 #define DENALI_MAP11_CMD ((DENALI_MAP11) | 0) 42 #define DENALI_MAP11_ADDR ((DENALI_MAP11) | 1) 43 #define DENALI_MAP11_DATA ((DENALI_MAP11) | 2) 44 45 #define DENALI_ACCESS_DEFAULT_AREA 0x42 46 47 #define UNIPHIER_NAND_BBT_UNKNOWN 0xff 48 49 struct uniphier_nand { 50 uintptr_t host_base; 51 uintptr_t reg_base; 52 int pages_per_block; 53 int page_size; 54 int two_row_addr_cycles; 55 uint8_t bbt[16]; 56 }; 57 58 struct uniphier_nand uniphier_nand; 59 60 static void uniphier_nand_host_write(struct uniphier_nand *nand, 61 uint32_t addr, uint32_t data) 62 { 63 mmio_write_32(nand->host_base + DENALI_HOST_ADDR, addr); 64 mmio_write_32(nand->host_base + DENALI_HOST_DATA, data); 65 } 66 67 static uint32_t uniphier_nand_host_read(struct uniphier_nand *nand, 68 uint32_t addr) 69 { 70 mmio_write_32(nand->host_base + DENALI_HOST_ADDR, addr); 71 return mmio_read_32(nand->host_base + DENALI_HOST_DATA); 72 } 73 74 static int uniphier_nand_block_isbad(struct uniphier_nand *nand, int block) 75 { 76 int page = nand->pages_per_block * block; 77 int column = nand->page_size; 78 uint8_t bbm; 79 uint32_t status; 80 int is_bad; 81 82 /* use cache if available */ 83 if (block < ARRAY_SIZE(nand->bbt) && 84 nand->bbt[block] != UNIPHIER_NAND_BBT_UNKNOWN) 85 return nand->bbt[block]; 86 87 mmio_write_32(nand->reg_base + DENALI_ECC_ENABLE, 0); 88 89 mmio_write_32(nand->reg_base + DENALI_INTR_STATUS0, -1); 90 91 uniphier_nand_host_write(nand, DENALI_MAP11_CMD, NAND_CMD_READ0); 92 uniphier_nand_host_write(nand, DENALI_MAP11_ADDR, column & 0xff); 93 uniphier_nand_host_write(nand, DENALI_MAP11_ADDR, (column >> 8) & 0xff); 94 uniphier_nand_host_write(nand, DENALI_MAP11_ADDR, page & 0xff); 95 uniphier_nand_host_write(nand, DENALI_MAP11_ADDR, (page >> 8) & 0xff); 96 if (!nand->two_row_addr_cycles) 97 uniphier_nand_host_write(nand, DENALI_MAP11_ADDR, 98 (page >> 16) & 0xff); 99 uniphier_nand_host_write(nand, DENALI_MAP11_CMD, NAND_CMD_READSTART); 100 101 do { 102 status = mmio_read_32(nand->reg_base + DENALI_INTR_STATUS0); 103 } while (!(status & DENALI_INTR_INT_ACT)); 104 105 bbm = uniphier_nand_host_read(nand, DENALI_MAP11_DATA); 106 107 is_bad = bbm != 0xff; 108 109 /* save the result for future re-use */ 110 nand->bbt[block] = is_bad; 111 112 if (is_bad) 113 WARN("found bad block at %d. skip.\n", block); 114 115 return is_bad; 116 } 117 118 static int uniphier_nand_read_pages(struct uniphier_nand *nand, uintptr_t buf, 119 int page_start, int page_count) 120 { 121 uint32_t status; 122 123 mmio_write_32(nand->reg_base + DENALI_ECC_ENABLE, 1); 124 mmio_write_32(nand->reg_base + DENALI_DMA_ENABLE, 1); 125 126 mmio_write_32(nand->reg_base + DENALI_INTR_STATUS0, -1); 127 128 /* use Data DMA (64bit) */ 129 mmio_write_32(nand->host_base + DENALI_HOST_ADDR, 130 DENALI_MAP10 | page_start); 131 132 /* 133 * 1. setup transfer type, interrupt when complete, 134 * burst len = 64 bytes, the number of pages 135 */ 136 mmio_write_32(nand->host_base + DENALI_HOST_DATA, 137 0x01002000 | (64 << 16) | page_count); 138 139 /* 2. set memory low address */ 140 mmio_write_32(nand->host_base + DENALI_HOST_DATA, buf); 141 142 /* 3. set memory high address */ 143 mmio_write_32(nand->host_base + DENALI_HOST_DATA, buf >> 32); 144 145 do { 146 status = mmio_read_32(nand->reg_base + DENALI_INTR_STATUS0); 147 } while (!(status & DENALI_INTR_DMA_CMD_COMP)); 148 149 mmio_write_32(nand->reg_base + DENALI_DMA_ENABLE, 0); 150 151 if (status & DENALI_INTR_ECC_UNCOR_ERR) { 152 ERROR("uncorrectable error in page range %d-%d", 153 page_start, page_start + page_count - 1); 154 return -EBADMSG; 155 } 156 157 return 0; 158 } 159 160 static size_t __uniphier_nand_read(struct uniphier_nand *nand, int lba, 161 uintptr_t buf, size_t size) 162 { 163 int pages_per_block = nand->pages_per_block; 164 int page_size = nand->page_size; 165 int blocks_to_skip = lba / pages_per_block; 166 int pages_to_read = DIV_ROUND_UP(size, page_size); 167 int page = lba % pages_per_block; 168 int block = 0; 169 uintptr_t p = buf; 170 int page_count, ret; 171 172 while (blocks_to_skip) { 173 ret = uniphier_nand_block_isbad(nand, block); 174 if (ret < 0) 175 goto out; 176 177 if (!ret) 178 blocks_to_skip--; 179 180 block++; 181 } 182 183 while (pages_to_read) { 184 ret = uniphier_nand_block_isbad(nand, block); 185 if (ret < 0) 186 goto out; 187 188 if (ret) { 189 block++; 190 continue; 191 } 192 193 page_count = MIN(pages_per_block - page, pages_to_read); 194 195 ret = uniphier_nand_read_pages(nand, p, 196 block * pages_per_block + page, 197 page_count); 198 if (ret) 199 goto out; 200 201 block++; 202 page = 0; 203 p += page_size * page_count; 204 pages_to_read -= page_count; 205 } 206 207 out: 208 /* number of read bytes */ 209 return MIN(size, p - buf); 210 } 211 212 static size_t uniphier_nand_read(int lba, uintptr_t buf, size_t size) 213 { 214 size_t count; 215 216 inv_dcache_range(buf, size); 217 218 count = __uniphier_nand_read(&uniphier_nand, lba, buf, size); 219 220 inv_dcache_range(buf, size); 221 222 return count; 223 } 224 225 static struct io_block_dev_spec uniphier_nand_dev_spec = { 226 .buffer = { 227 .offset = UNIPHIER_BLOCK_BUF_BASE, 228 .length = UNIPHIER_BLOCK_BUF_SIZE, 229 }, 230 .ops = { 231 .read = uniphier_nand_read, 232 }, 233 /* fill .block_size at run-time */ 234 }; 235 236 static int uniphier_nand_hw_init(struct uniphier_nand *nand) 237 { 238 int i; 239 240 for (i = 0; i < ARRAY_SIZE(nand->bbt); i++) 241 nand->bbt[i] = UNIPHIER_NAND_BBT_UNKNOWN; 242 243 nand->host_base = 0x68000000; 244 nand->reg_base = 0x68100000; 245 246 nand->pages_per_block = 247 mmio_read_32(nand->reg_base + DENALI_PAGES_PER_BLOCK); 248 249 nand->page_size = 250 mmio_read_32(nand->reg_base + DENALI_DEVICE_MAIN_AREA_SIZE); 251 252 if (mmio_read_32(nand->reg_base + DENALI_TWO_ROW_ADDR_CYCLES) & BIT(0)) 253 nand->two_row_addr_cycles = 1; 254 255 uniphier_nand_host_write(nand, DENALI_MAP10, 256 DENALI_ACCESS_DEFAULT_AREA); 257 258 return 0; 259 } 260 261 int uniphier_nand_init(uintptr_t *block_dev_spec) 262 { 263 int ret; 264 265 ret = uniphier_nand_hw_init(&uniphier_nand); 266 if (ret) 267 return ret; 268 269 uniphier_nand_dev_spec.block_size = uniphier_nand.page_size; 270 271 *block_dev_spec = (uintptr_t)&uniphier_nand_dev_spec; 272 273 return 0; 274 } 275