1 /* 2 * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <stdint.h> 8 9 #include <platform_def.h> 10 11 #include <arch_helpers.h> 12 #include <common/debug.h> 13 #include <drivers/io/io_block.h> 14 #include <lib/mmio.h> 15 #include <lib/utils_def.h> 16 17 #include "uniphier.h" 18 19 #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) 20 21 #define NAND_CMD_READ0 0 22 #define NAND_CMD_READSTART 0x30 23 24 #define DENALI_ECC_ENABLE 0x0e0 25 #define DENALI_PAGES_PER_BLOCK 0x150 26 #define DENALI_DEVICE_MAIN_AREA_SIZE 0x170 27 #define DENALI_DEVICE_SPARE_AREA_SIZE 0x180 28 #define DENALI_TWO_ROW_ADDR_CYCLES 0x190 29 #define DENALI_INTR_STATUS0 0x410 30 #define DENALI_INTR_ECC_UNCOR_ERR BIT(1) 31 #define DENALI_INTR_DMA_CMD_COMP BIT(2) 32 #define DENALI_INTR_INT_ACT BIT(12) 33 34 #define DENALI_DMA_ENABLE 0x700 35 36 #define DENALI_HOST_ADDR 0x00 37 #define DENALI_HOST_DATA 0x10 38 39 #define DENALI_MAP01 (1 << 26) 40 #define DENALI_MAP10 (2 << 26) 41 #define DENALI_MAP11 (3 << 26) 42 43 #define DENALI_MAP11_CMD ((DENALI_MAP11) | 0) 44 #define DENALI_MAP11_ADDR ((DENALI_MAP11) | 1) 45 #define DENALI_MAP11_DATA ((DENALI_MAP11) | 2) 46 47 #define DENALI_ACCESS_DEFAULT_AREA 0x42 48 49 #define UNIPHIER_NAND_BBT_UNKNOWN 0xff 50 51 struct uniphier_nand { 52 uintptr_t host_base; 53 uintptr_t reg_base; 54 int pages_per_block; 55 int page_size; 56 int two_row_addr_cycles; 57 uint8_t bbt[16]; 58 }; 59 60 struct uniphier_nand uniphier_nand; 61 62 static void uniphier_nand_host_write(struct uniphier_nand *nand, 63 uint32_t addr, uint32_t data) 64 { 65 mmio_write_32(nand->host_base + DENALI_HOST_ADDR, addr); 66 mmio_write_32(nand->host_base + DENALI_HOST_DATA, data); 67 } 68 69 static uint32_t uniphier_nand_host_read(struct uniphier_nand *nand, 70 uint32_t addr) 71 { 72 mmio_write_32(nand->host_base + DENALI_HOST_ADDR, addr); 73 return mmio_read_32(nand->host_base + DENALI_HOST_DATA); 74 } 75 76 static int uniphier_nand_block_isbad(struct uniphier_nand *nand, int block) 77 { 78 int page = nand->pages_per_block * block; 79 int column = nand->page_size; 80 uint8_t bbm; 81 uint32_t status; 82 int is_bad; 83 84 /* use cache if available */ 85 if (block < ARRAY_SIZE(nand->bbt) && 86 nand->bbt[block] != UNIPHIER_NAND_BBT_UNKNOWN) 87 return nand->bbt[block]; 88 89 mmio_write_32(nand->reg_base + DENALI_ECC_ENABLE, 0); 90 91 mmio_write_32(nand->reg_base + DENALI_INTR_STATUS0, -1); 92 93 uniphier_nand_host_write(nand, DENALI_MAP11_CMD, NAND_CMD_READ0); 94 uniphier_nand_host_write(nand, DENALI_MAP11_ADDR, column & 0xff); 95 uniphier_nand_host_write(nand, DENALI_MAP11_ADDR, (column >> 8) & 0xff); 96 uniphier_nand_host_write(nand, DENALI_MAP11_ADDR, page & 0xff); 97 uniphier_nand_host_write(nand, DENALI_MAP11_ADDR, (page >> 8) & 0xff); 98 if (!nand->two_row_addr_cycles) 99 uniphier_nand_host_write(nand, DENALI_MAP11_ADDR, 100 (page >> 16) & 0xff); 101 uniphier_nand_host_write(nand, DENALI_MAP11_CMD, NAND_CMD_READSTART); 102 103 do { 104 status = mmio_read_32(nand->reg_base + DENALI_INTR_STATUS0); 105 } while (!(status & DENALI_INTR_INT_ACT)); 106 107 bbm = uniphier_nand_host_read(nand, DENALI_MAP11_DATA); 108 109 is_bad = bbm != 0xff; 110 111 /* if possible, save the result for future re-use */ 112 if (block < ARRAY_SIZE(nand->bbt)) 113 nand->bbt[block] = is_bad; 114 115 if (is_bad) 116 WARN("found bad block at %d. skip.\n", block); 117 118 return is_bad; 119 } 120 121 static int uniphier_nand_read_pages(struct uniphier_nand *nand, uintptr_t buf, 122 int page_start, int page_count) 123 { 124 uint32_t status; 125 126 mmio_write_32(nand->reg_base + DENALI_ECC_ENABLE, 1); 127 mmio_write_32(nand->reg_base + DENALI_DMA_ENABLE, 1); 128 129 mmio_write_32(nand->reg_base + DENALI_INTR_STATUS0, -1); 130 131 /* use Data DMA (64bit) */ 132 mmio_write_32(nand->host_base + DENALI_HOST_ADDR, 133 DENALI_MAP10 | page_start); 134 135 /* 136 * 1. setup transfer type, interrupt when complete, 137 * burst len = 64 bytes, the number of pages 138 */ 139 mmio_write_32(nand->host_base + DENALI_HOST_DATA, 140 0x01002000 | (64 << 16) | page_count); 141 142 /* 2. set memory low address */ 143 mmio_write_32(nand->host_base + DENALI_HOST_DATA, buf); 144 145 /* 3. set memory high address */ 146 mmio_write_32(nand->host_base + DENALI_HOST_DATA, buf >> 32); 147 148 do { 149 status = mmio_read_32(nand->reg_base + DENALI_INTR_STATUS0); 150 } while (!(status & DENALI_INTR_DMA_CMD_COMP)); 151 152 mmio_write_32(nand->reg_base + DENALI_DMA_ENABLE, 0); 153 154 if (status & DENALI_INTR_ECC_UNCOR_ERR) { 155 ERROR("uncorrectable error in page range %d-%d", 156 page_start, page_start + page_count - 1); 157 return -EBADMSG; 158 } 159 160 return 0; 161 } 162 163 static size_t __uniphier_nand_read(struct uniphier_nand *nand, int lba, 164 uintptr_t buf, size_t size) 165 { 166 int pages_per_block = nand->pages_per_block; 167 int page_size = nand->page_size; 168 int blocks_to_skip = lba / pages_per_block; 169 int pages_to_read = DIV_ROUND_UP(size, page_size); 170 int page = lba % pages_per_block; 171 int block = 0; 172 uintptr_t p = buf; 173 int page_count, ret; 174 175 while (blocks_to_skip) { 176 ret = uniphier_nand_block_isbad(nand, block); 177 if (ret < 0) 178 goto out; 179 180 if (!ret) 181 blocks_to_skip--; 182 183 block++; 184 } 185 186 while (pages_to_read) { 187 ret = uniphier_nand_block_isbad(nand, block); 188 if (ret < 0) 189 goto out; 190 191 if (ret) { 192 block++; 193 continue; 194 } 195 196 page_count = MIN(pages_per_block - page, pages_to_read); 197 198 ret = uniphier_nand_read_pages(nand, p, 199 block * pages_per_block + page, 200 page_count); 201 if (ret) 202 goto out; 203 204 block++; 205 page = 0; 206 p += page_size * page_count; 207 pages_to_read -= page_count; 208 } 209 210 out: 211 /* number of read bytes */ 212 return MIN(size, p - buf); 213 } 214 215 static size_t uniphier_nand_read(int lba, uintptr_t buf, size_t size) 216 { 217 size_t count; 218 219 inv_dcache_range(buf, size); 220 221 count = __uniphier_nand_read(&uniphier_nand, lba, buf, size); 222 223 inv_dcache_range(buf, size); 224 225 return count; 226 } 227 228 static struct io_block_dev_spec uniphier_nand_dev_spec = { 229 .buffer = { 230 .offset = UNIPHIER_BLOCK_BUF_BASE, 231 .length = UNIPHIER_BLOCK_BUF_SIZE, 232 }, 233 .ops = { 234 .read = uniphier_nand_read, 235 }, 236 /* fill .block_size at run-time */ 237 }; 238 239 static int uniphier_nand_hw_init(struct uniphier_nand *nand) 240 { 241 int i; 242 243 for (i = 0; i < ARRAY_SIZE(nand->bbt); i++) 244 nand->bbt[i] = UNIPHIER_NAND_BBT_UNKNOWN; 245 246 nand->host_base = 0x68000000; 247 nand->reg_base = 0x68100000; 248 249 nand->pages_per_block = 250 mmio_read_32(nand->reg_base + DENALI_PAGES_PER_BLOCK); 251 252 nand->page_size = 253 mmio_read_32(nand->reg_base + DENALI_DEVICE_MAIN_AREA_SIZE); 254 255 if (mmio_read_32(nand->reg_base + DENALI_TWO_ROW_ADDR_CYCLES) & BIT(0)) 256 nand->two_row_addr_cycles = 1; 257 258 uniphier_nand_host_write(nand, DENALI_MAP10, 259 DENALI_ACCESS_DEFAULT_AREA); 260 261 return 0; 262 } 263 264 int uniphier_nand_init(uintptr_t *block_dev_spec) 265 { 266 int ret; 267 268 ret = uniphier_nand_hw_init(&uniphier_nand); 269 if (ret) 270 return ret; 271 272 uniphier_nand_dev_spec.block_size = uniphier_nand.page_size; 273 274 *block_dev_spec = (uintptr_t)&uniphier_nand_dev_spec; 275 276 return 0; 277 } 278