1 /* 2 * Copyright 2007-2011 Freescale Semiconductor, Inc. 3 * 4 * (C) Copyright 2003 Motorola Inc. 5 * Modified by Xianghua Xiao, X.Xiao@motorola.com 6 * 7 * (C) Copyright 2000 8 * Wolfgang Denk, DENX Software Engineering, wd@denx.de. 9 * 10 * See file CREDITS for list of people who contributed to this 11 * project. 12 * 13 * This program is free software; you can redistribute it and/or 14 * modify it under the terms of the GNU General Public License as 15 * published by the Free Software Foundation; either version 2 of 16 * the License, or (at your option) any later version. 17 * 18 * This program is distributed in the hope that it will be useful, 19 * but WITHOUT ANY WARRANTY; without even the implied warranty of 20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 21 * GNU General Public License for more details. 22 * 23 * You should have received a copy of the GNU General Public License 24 * along with this program; if not, write to the Free Software 25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, 26 * MA 02111-1307 USA 27 */ 28 29 #include <common.h> 30 #include <watchdog.h> 31 #include <asm/processor.h> 32 #include <ioports.h> 33 #include <sata.h> 34 #include <asm/io.h> 35 #include <asm/cache.h> 36 #include <asm/mmu.h> 37 #include <asm/fsl_law.h> 38 #include <asm/fsl_serdes.h> 39 #include "mp.h" 40 #ifdef CONFIG_SYS_QE_FW_IN_NAND 41 #include <nand.h> 42 #include <errno.h> 43 #endif 44 45 DECLARE_GLOBAL_DATA_PTR; 46 47 extern void srio_init(void); 48 49 #ifdef CONFIG_QE 50 extern qe_iop_conf_t qe_iop_conf_tab[]; 51 extern void qe_config_iopin(u8 port, u8 pin, int dir, 52 int open_drain, int assign); 53 extern void qe_init(uint qe_base); 54 extern void qe_reset(void); 55 56 static void config_qe_ioports(void) 57 { 58 u8 port, pin; 59 int dir, open_drain, assign; 60 int i; 61 62 for (i = 0; qe_iop_conf_tab[i].assign != QE_IOP_TAB_END; i++) { 63 port = qe_iop_conf_tab[i].port; 64 pin = qe_iop_conf_tab[i].pin; 65 dir = qe_iop_conf_tab[i].dir; 66 open_drain = qe_iop_conf_tab[i].open_drain; 67 assign = qe_iop_conf_tab[i].assign; 68 qe_config_iopin(port, pin, dir, open_drain, assign); 69 } 70 } 71 #endif 72 73 #ifdef CONFIG_CPM2 74 void config_8560_ioports (volatile ccsr_cpm_t * cpm) 75 { 76 int portnum; 77 78 for (portnum = 0; portnum < 4; portnum++) { 79 uint pmsk = 0, 80 ppar = 0, 81 psor = 0, 82 pdir = 0, 83 podr = 0, 84 pdat = 0; 85 iop_conf_t *iopc = (iop_conf_t *) & iop_conf_tab[portnum][0]; 86 iop_conf_t *eiopc = iopc + 32; 87 uint msk = 1; 88 89 /* 90 * NOTE: 91 * index 0 refers to pin 31, 92 * index 31 refers to pin 0 93 */ 94 while (iopc < eiopc) { 95 if (iopc->conf) { 96 pmsk |= msk; 97 if (iopc->ppar) 98 ppar |= msk; 99 if (iopc->psor) 100 psor |= msk; 101 if (iopc->pdir) 102 pdir |= msk; 103 if (iopc->podr) 104 podr |= msk; 105 if (iopc->pdat) 106 pdat |= msk; 107 } 108 109 msk <<= 1; 110 iopc++; 111 } 112 113 if (pmsk != 0) { 114 volatile ioport_t *iop = ioport_addr (cpm, portnum); 115 uint tpmsk = ~pmsk; 116 117 /* 118 * the (somewhat confused) paragraph at the 119 * bottom of page 35-5 warns that there might 120 * be "unknown behaviour" when programming 121 * PSORx and PDIRx, if PPARx = 1, so I 122 * decided this meant I had to disable the 123 * dedicated function first, and enable it 124 * last. 125 */ 126 iop->ppar &= tpmsk; 127 iop->psor = (iop->psor & tpmsk) | psor; 128 iop->podr = (iop->podr & tpmsk) | podr; 129 iop->pdat = (iop->pdat & tpmsk) | pdat; 130 iop->pdir = (iop->pdir & tpmsk) | pdir; 131 iop->ppar |= ppar; 132 } 133 } 134 } 135 #endif 136 137 #ifdef CONFIG_SYS_FSL_CPC 138 static void enable_cpc(void) 139 { 140 int i; 141 u32 size = 0; 142 143 cpc_corenet_t *cpc = (cpc_corenet_t *)CONFIG_SYS_FSL_CPC_ADDR; 144 145 for (i = 0; i < CONFIG_SYS_NUM_CPC; i++, cpc++) { 146 u32 cpccfg0 = in_be32(&cpc->cpccfg0); 147 size += CPC_CFG0_SZ_K(cpccfg0); 148 149 #ifdef CONFIG_SYS_FSL_ERRATUM_CPC_A002 150 setbits_be32(&cpc->cpchdbcr0, CPC_HDBCR0_TAG_ECC_SCRUB_DIS); 151 #endif 152 #ifdef CONFIG_SYS_FSL_ERRATUM_CPC_A003 153 setbits_be32(&cpc->cpchdbcr0, CPC_HDBCR0_DATA_ECC_SCRUB_DIS); 154 #endif 155 156 out_be32(&cpc->cpccsr0, CPC_CSR0_CE | CPC_CSR0_PE); 157 /* Read back to sync write */ 158 in_be32(&cpc->cpccsr0); 159 160 } 161 162 printf("Corenet Platform Cache: %d KB enabled\n", size); 163 } 164 165 void invalidate_cpc(void) 166 { 167 int i; 168 cpc_corenet_t *cpc = (cpc_corenet_t *)CONFIG_SYS_FSL_CPC_ADDR; 169 170 for (i = 0; i < CONFIG_SYS_NUM_CPC; i++, cpc++) { 171 /* Flash invalidate the CPC and clear all the locks */ 172 out_be32(&cpc->cpccsr0, CPC_CSR0_FI | CPC_CSR0_LFC); 173 while (in_be32(&cpc->cpccsr0) & (CPC_CSR0_FI | CPC_CSR0_LFC)) 174 ; 175 } 176 } 177 #else 178 #define enable_cpc() 179 #define invalidate_cpc() 180 #endif /* CONFIG_SYS_FSL_CPC */ 181 182 /* 183 * Breathe some life into the CPU... 184 * 185 * Set up the memory map 186 * initialize a bunch of registers 187 */ 188 189 #ifdef CONFIG_FSL_CORENET 190 static void corenet_tb_init(void) 191 { 192 volatile ccsr_rcpm_t *rcpm = 193 (void *)(CONFIG_SYS_FSL_CORENET_RCPM_ADDR); 194 volatile ccsr_pic_t *pic = 195 (void *)(CONFIG_SYS_MPC8xxx_PIC_ADDR); 196 u32 whoami = in_be32(&pic->whoami); 197 198 /* Enable the timebase register for this core */ 199 out_be32(&rcpm->ctbenrl, (1 << whoami)); 200 } 201 #endif 202 203 void cpu_init_f (void) 204 { 205 extern void m8560_cpm_reset (void); 206 #ifdef CONFIG_MPC8548 207 ccsr_local_ecm_t *ecm = (void *)(CONFIG_SYS_MPC85xx_ECM_ADDR); 208 uint svr = get_svr(); 209 210 /* 211 * CPU2 errata workaround: A core hang possible while executing 212 * a msync instruction and a snoopable transaction from an I/O 213 * master tagged to make quick forward progress is present. 214 * Fixed in silicon rev 2.1. 215 */ 216 if ((SVR_MAJ(svr) == 1) || ((SVR_MAJ(svr) == 2 && SVR_MIN(svr) == 0x0))) 217 out_be32(&ecm->eebpcr, in_be32(&ecm->eebpcr) | (1 << 16)); 218 #endif 219 220 disable_tlb(14); 221 disable_tlb(15); 222 223 #ifdef CONFIG_CPM2 224 config_8560_ioports((ccsr_cpm_t *)CONFIG_SYS_MPC85xx_CPM_ADDR); 225 #endif 226 227 init_early_memctl_regs(); 228 229 #if defined(CONFIG_CPM2) 230 m8560_cpm_reset(); 231 #endif 232 #ifdef CONFIG_QE 233 /* Config QE ioports */ 234 config_qe_ioports(); 235 #endif 236 #if defined(CONFIG_FSL_DMA) 237 dma_init(); 238 #endif 239 #ifdef CONFIG_FSL_CORENET 240 corenet_tb_init(); 241 #endif 242 init_used_tlb_cams(); 243 244 /* Invalidate the CPC before DDR gets enabled */ 245 invalidate_cpc(); 246 } 247 248 /* Implement a dummy function for those platforms w/o SERDES */ 249 static void __fsl_serdes__init(void) 250 { 251 return ; 252 } 253 __attribute__((weak, alias("__fsl_serdes__init"))) void fsl_serdes_init(void); 254 255 /* 256 * Initialize L2 as cache. 257 * 258 * The newer 8548, etc, parts have twice as much cache, but 259 * use the same bit-encoding as the older 8555, etc, parts. 260 * 261 */ 262 int cpu_init_r(void) 263 { 264 #ifdef CONFIG_SYS_LBC_LCRR 265 volatile fsl_lbc_t *lbc = LBC_BASE_ADDR; 266 #endif 267 268 #if defined(CONFIG_SYS_P4080_ERRATUM_CPU22) 269 flush_dcache(); 270 mtspr(L1CSR2, (mfspr(L1CSR2) | L1CSR2_DCWS)); 271 sync(); 272 #endif 273 274 puts ("L2: "); 275 276 #if defined(CONFIG_L2_CACHE) 277 volatile ccsr_l2cache_t *l2cache = (void *)CONFIG_SYS_MPC85xx_L2_ADDR; 278 volatile uint cache_ctl; 279 uint svr, ver; 280 uint l2srbar; 281 u32 l2siz_field; 282 283 svr = get_svr(); 284 ver = SVR_SOC_VER(svr); 285 286 asm("msync;isync"); 287 cache_ctl = l2cache->l2ctl; 288 289 #if defined(CONFIG_SYS_RAMBOOT) && defined(CONFIG_SYS_INIT_L2_ADDR) 290 if (cache_ctl & MPC85xx_L2CTL_L2E) { 291 /* Clear L2 SRAM memory-mapped base address */ 292 out_be32(&l2cache->l2srbar0, 0x0); 293 out_be32(&l2cache->l2srbar1, 0x0); 294 295 /* set MBECCDIS=0, SBECCDIS=0 */ 296 clrbits_be32(&l2cache->l2errdis, 297 (MPC85xx_L2ERRDIS_MBECC | 298 MPC85xx_L2ERRDIS_SBECC)); 299 300 /* set L2E=0, L2SRAM=0 */ 301 clrbits_be32(&l2cache->l2ctl, 302 (MPC85xx_L2CTL_L2E | 303 MPC85xx_L2CTL_L2SRAM_ENTIRE)); 304 } 305 #endif 306 307 l2siz_field = (cache_ctl >> 28) & 0x3; 308 309 switch (l2siz_field) { 310 case 0x0: 311 printf(" unknown size (0x%08x)\n", cache_ctl); 312 return -1; 313 break; 314 case 0x1: 315 if (ver == SVR_8540 || ver == SVR_8560 || 316 ver == SVR_8541 || ver == SVR_8541_E || 317 ver == SVR_8555 || ver == SVR_8555_E) { 318 puts("128 KB "); 319 /* set L2E=1, L2I=1, & L2BLKSZ=1 (128 Kbyte) */ 320 cache_ctl = 0xc4000000; 321 } else { 322 puts("256 KB "); 323 cache_ctl = 0xc0000000; /* set L2E=1, L2I=1, & L2SRAM=0 */ 324 } 325 break; 326 case 0x2: 327 if (ver == SVR_8540 || ver == SVR_8560 || 328 ver == SVR_8541 || ver == SVR_8541_E || 329 ver == SVR_8555 || ver == SVR_8555_E) { 330 puts("256 KB "); 331 /* set L2E=1, L2I=1, & L2BLKSZ=2 (256 Kbyte) */ 332 cache_ctl = 0xc8000000; 333 } else { 334 puts ("512 KB "); 335 /* set L2E=1, L2I=1, & L2SRAM=0 */ 336 cache_ctl = 0xc0000000; 337 } 338 break; 339 case 0x3: 340 puts("1024 KB "); 341 /* set L2E=1, L2I=1, & L2SRAM=0 */ 342 cache_ctl = 0xc0000000; 343 break; 344 } 345 346 if (l2cache->l2ctl & MPC85xx_L2CTL_L2E) { 347 puts("already enabled"); 348 l2srbar = l2cache->l2srbar0; 349 #if defined(CONFIG_SYS_INIT_L2_ADDR) && defined(CONFIG_SYS_FLASH_BASE) 350 if (l2cache->l2ctl & MPC85xx_L2CTL_L2SRAM_ENTIRE 351 && l2srbar >= CONFIG_SYS_FLASH_BASE) { 352 l2srbar = CONFIG_SYS_INIT_L2_ADDR; 353 l2cache->l2srbar0 = l2srbar; 354 printf("moving to 0x%08x", CONFIG_SYS_INIT_L2_ADDR); 355 } 356 #endif /* CONFIG_SYS_INIT_L2_ADDR */ 357 puts("\n"); 358 } else { 359 asm("msync;isync"); 360 l2cache->l2ctl = cache_ctl; /* invalidate & enable */ 361 asm("msync;isync"); 362 puts("enabled\n"); 363 } 364 #elif defined(CONFIG_BACKSIDE_L2_CACHE) 365 u32 l2cfg0 = mfspr(SPRN_L2CFG0); 366 367 /* invalidate the L2 cache */ 368 mtspr(SPRN_L2CSR0, (L2CSR0_L2FI|L2CSR0_L2LFC)); 369 while (mfspr(SPRN_L2CSR0) & (L2CSR0_L2FI|L2CSR0_L2LFC)) 370 ; 371 372 #ifdef CONFIG_SYS_CACHE_STASHING 373 /* set stash id to (coreID) * 2 + 32 + L2 (1) */ 374 mtspr(SPRN_L2CSR1, (32 + 1)); 375 #endif 376 377 /* enable the cache */ 378 mtspr(SPRN_L2CSR0, CONFIG_SYS_INIT_L2CSR0); 379 380 if (CONFIG_SYS_INIT_L2CSR0 & L2CSR0_L2E) { 381 while (!(mfspr(SPRN_L2CSR0) & L2CSR0_L2E)) 382 ; 383 printf("%d KB enabled\n", (l2cfg0 & 0x3fff) * 64); 384 } 385 #else 386 puts("disabled\n"); 387 #endif 388 389 enable_cpc(); 390 391 /* needs to be in ram since code uses global static vars */ 392 fsl_serdes_init(); 393 394 #ifdef CONFIG_SYS_SRIO 395 srio_init(); 396 #endif 397 398 #if defined(CONFIG_MP) 399 setup_mp(); 400 #endif 401 402 #ifdef CONFIG_SYS_FSL_ERRATUM_ESDHC136 403 { 404 void *p; 405 p = (void *)CONFIG_SYS_DCSRBAR + 0x20520; 406 setbits_be32(p, 1 << (31 - 14)); 407 } 408 #endif 409 410 #ifdef CONFIG_SYS_LBC_LCRR 411 /* 412 * Modify the CLKDIV field of LCRR register to improve the writing 413 * speed for NOR flash. 414 */ 415 clrsetbits_be32(&lbc->lcrr, LCRR_CLKDIV, CONFIG_SYS_LBC_LCRR); 416 __raw_readl(&lbc->lcrr); 417 isync(); 418 #endif 419 420 return 0; 421 } 422 423 extern void setup_ivors(void); 424 425 void arch_preboot_os(void) 426 { 427 u32 msr; 428 429 /* 430 * We are changing interrupt offsets and are about to boot the OS so 431 * we need to make sure we disable all async interrupts. EE is already 432 * disabled by the time we get called. 433 */ 434 msr = mfmsr(); 435 msr &= ~(MSR_ME|MSR_CE|MSR_DE); 436 mtmsr(msr); 437 438 setup_ivors(); 439 } 440 441 #if defined(CONFIG_CMD_SATA) && defined(CONFIG_FSL_SATA) 442 int sata_initialize(void) 443 { 444 if (is_serdes_configured(SATA1) || is_serdes_configured(SATA2)) 445 return __sata_initialize(); 446 447 return 1; 448 } 449 #endif 450 451 void cpu_secondary_init_r(void) 452 { 453 #ifdef CONFIG_QE 454 uint qe_base = CONFIG_SYS_IMMR + 0x00080000; /* QE immr base */ 455 #ifdef CONFIG_SYS_QE_FW_IN_NAND 456 int ret; 457 size_t fw_length = CONFIG_SYS_QE_FW_LENGTH; 458 459 /* load QE firmware from NAND flash to DDR first */ 460 ret = nand_read(&nand_info[0], (loff_t)CONFIG_SYS_QE_FW_IN_NAND, 461 &fw_length, (u_char *)CONFIG_SYS_QE_FW_ADDR); 462 463 if (ret && ret == -EUCLEAN) { 464 printf ("NAND read for QE firmware at offset %x failed %d\n", 465 CONFIG_SYS_QE_FW_IN_NAND, ret); 466 } 467 #endif 468 qe_init(qe_base); 469 qe_reset(); 470 #endif 471 } 472