1/* 2 * Copyright 2004, 2007-2012 Freescale Semiconductor, Inc. 3 * Copyright (C) 2003 Motorola,Inc. 4 * 5 * See file CREDITS for list of people who contributed to this 6 * project. 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License as 10 * published by the Free Software Foundation; either version 2 of 11 * the License, or (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, 21 * MA 02111-1307 USA 22 */ 23 24/* U-Boot Startup Code for Motorola 85xx PowerPC based Embedded Boards 25 * 26 * The processor starts at 0xfffffffc and the code is first executed in the 27 * last 4K page(0xfffff000-0xffffffff) in flash/rom. 28 * 29 */ 30 31#include <asm-offsets.h> 32#include <config.h> 33#include <mpc85xx.h> 34#include <version.h> 35 36#define _LINUX_CONFIG_H 1 /* avoid reading Linux autoconf.h file */ 37 38#include <ppc_asm.tmpl> 39#include <ppc_defs.h> 40 41#include <asm/cache.h> 42#include <asm/mmu.h> 43 44#undef MSR_KERNEL 45#define MSR_KERNEL ( MSR_ME ) /* Machine Check */ 46 47/* 48 * Set up GOT: Global Offset Table 49 * 50 * Use r12 to access the GOT 51 */ 52 START_GOT 53 GOT_ENTRY(_GOT2_TABLE_) 54 GOT_ENTRY(_FIXUP_TABLE_) 55 56#ifndef CONFIG_NAND_SPL 57 GOT_ENTRY(_start) 58 GOT_ENTRY(_start_of_vectors) 59 GOT_ENTRY(_end_of_vectors) 60 GOT_ENTRY(transfer_to_handler) 61#endif 62 63 GOT_ENTRY(__init_end) 64 GOT_ENTRY(__bss_end__) 65 GOT_ENTRY(__bss_start) 66 END_GOT 67 68/* 69 * e500 Startup -- after reset only the last 4KB of the effective 70 * address space is mapped in the MMU L2 TLB1 Entry0. The .bootpg 71 * section is located at THIS LAST page and basically does three 72 * things: clear some registers, set up exception tables and 73 * add more TLB entries for 'larger spaces'(e.g. the boot rom) to 74 * continue the boot procedure. 75 76 * Once the boot rom is mapped by TLB entries we can proceed 77 * with normal startup. 78 * 79 */ 80 81 .section .bootpg,"ax" 82 .globl _start_e500 83 84_start_e500: 85/* Enable debug exception */ 86 li r1,MSR_DE 87 mtmsr r1 88 89#ifdef CONFIG_SYS_FSL_ERRATUM_A004510 90 mfspr r3,SPRN_SVR 91 rlwinm r3,r3,0,0xff 92 li r4,CONFIG_SYS_FSL_ERRATUM_A004510_SVR_REV 93 cmpw r3,r4 94 beq 1f 95 96#ifdef CONFIG_SYS_FSL_ERRATUM_A004510_SVR_REV2 97 li r4,CONFIG_SYS_FSL_ERRATUM_A004510_SVR_REV2 98 cmpw r3,r4 99 beq 1f 100#endif 101 102 /* Not a supported revision affected by erratum */ 103 li r27,0 104 b 2f 105 1061: li r27,1 /* Remember for later that we have the erratum */ 107 /* Erratum says set bits 55:60 to 001001 */ 108 msync 109 isync 110 mfspr r3,976 111 li r4,0x48 112 rlwimi r3,r4,0,0x1f8 113 mtspr 976,r3 114 isync 1152: 116#endif 117 118#if defined(CONFIG_SECURE_BOOT) && defined(CONFIG_E500MC) 119 /* ISBC uses L2 as stack. 120 * Disable L2 cache here so that u-boot can enable it later 121 * as part of it's normal flow 122 */ 123 124 /* Check if L2 is enabled */ 125 mfspr r3, SPRN_L2CSR0 126 lis r2, L2CSR0_L2E@h 127 ori r2, r2, L2CSR0_L2E@l 128 and. r4, r3, r2 129 beq l2_disabled 130 131 mfspr r3, SPRN_L2CSR0 132 /* Flush L2 cache */ 133 lis r2,(L2CSR0_L2FL)@h 134 ori r2, r2, (L2CSR0_L2FL)@l 135 or r3, r2, r3 136 sync 137 isync 138 mtspr SPRN_L2CSR0,r3 139 isync 1401: 141 mfspr r3, SPRN_L2CSR0 142 and. r1, r3, r2 143 bne 1b 144 145 mfspr r3, SPRN_L2CSR0 146 lis r2, L2CSR0_L2E@h 147 ori r2, r2, L2CSR0_L2E@l 148 andc r4, r3, r2 149 sync 150 isync 151 mtspr SPRN_L2CSR0,r4 152 isync 153 154l2_disabled: 155#endif 156 157/* clear registers/arrays not reset by hardware */ 158 159 /* L1 */ 160 li r0,2 161 mtspr L1CSR0,r0 /* invalidate d-cache */ 162 mtspr L1CSR1,r0 /* invalidate i-cache */ 163 164 mfspr r1,DBSR 165 mtspr DBSR,r1 /* Clear all valid bits */ 166 167 /* 168 * Enable L1 Caches early 169 * 170 */ 171 172#if defined(CONFIG_E500MC) && defined(CONFIG_SYS_CACHE_STASHING) 173 /* set stash id to (coreID) * 2 + 32 + L1 CT (0) */ 174 li r2,(32 + 0) 175 mtspr L1CSR2,r2 176#endif 177 178 /* Enable/invalidate the I-Cache */ 179 lis r2,(L1CSR1_ICFI|L1CSR1_ICLFR)@h 180 ori r2,r2,(L1CSR1_ICFI|L1CSR1_ICLFR)@l 181 mtspr SPRN_L1CSR1,r2 1821: 183 mfspr r3,SPRN_L1CSR1 184 and. r1,r3,r2 185 bne 1b 186 187 lis r3,(L1CSR1_CPE|L1CSR1_ICE)@h 188 ori r3,r3,(L1CSR1_CPE|L1CSR1_ICE)@l 189 mtspr SPRN_L1CSR1,r3 190 isync 1912: 192 mfspr r3,SPRN_L1CSR1 193 andi. r1,r3,L1CSR1_ICE@l 194 beq 2b 195 196 /* Enable/invalidate the D-Cache */ 197 lis r2,(L1CSR0_DCFI|L1CSR0_DCLFR)@h 198 ori r2,r2,(L1CSR0_DCFI|L1CSR0_DCLFR)@l 199 mtspr SPRN_L1CSR0,r2 2001: 201 mfspr r3,SPRN_L1CSR0 202 and. r1,r3,r2 203 bne 1b 204 205 lis r3,(L1CSR0_CPE|L1CSR0_DCE)@h 206 ori r3,r3,(L1CSR0_CPE|L1CSR0_DCE)@l 207 mtspr SPRN_L1CSR0,r3 208 isync 2092: 210 mfspr r3,SPRN_L1CSR0 211 andi. r1,r3,L1CSR0_DCE@l 212 beq 2b 213 214#if defined(CONFIG_SYS_PPC_E500_DEBUG_TLB) && !defined(CONFIG_NAND_SPL) 215/* 216 * TLB entry for debuggging in AS1 217 * Create temporary TLB entry in AS0 to handle debug exception 218 * As on debug exception MSR is cleared i.e. Address space is changed 219 * to 0. A TLB entry (in AS0) is required to handle debug exception generated 220 * in AS1. 221 */ 222 223 lis r6,FSL_BOOKE_MAS0(1, 224 CONFIG_SYS_PPC_E500_DEBUG_TLB, 0)@h 225 ori r6,r6,FSL_BOOKE_MAS0(1, 226 CONFIG_SYS_PPC_E500_DEBUG_TLB, 0)@l 227 228#if !defined(CONFIG_SYS_RAMBOOT) && !defined(CONFIG_SECURE_BOOT) 229/* 230 * TLB entry is created for IVPR + IVOR15 to map on valid OP code address 231 * bacause flash's virtual address maps to 0xff800000 - 0xffffffff. 232 * and this window is outside of 4K boot window. 233 */ 234 lis r7,FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_4M)@h 235 ori r7,r7,FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_4M)@l 236 237 lis r8,FSL_BOOKE_MAS2(CONFIG_SYS_MONITOR_BASE & 0xffc00000, 238 (MAS2_I|MAS2_G))@h 239 ori r8,r8,FSL_BOOKE_MAS2(CONFIG_SYS_MONITOR_BASE & 0xffc00000, 240 (MAS2_I|MAS2_G))@l 241 242 /* The 85xx has the default boot window 0xff800000 - 0xffffffff */ 243 lis r9,FSL_BOOKE_MAS3(0xffc00000, 0, (MAS3_SX|MAS3_SW|MAS3_SR))@h 244 ori r9,r9,FSL_BOOKE_MAS3(0xffc00000, 0, (MAS3_SX|MAS3_SW|MAS3_SR))@l 245#elif !defined(CONFIG_SYS_RAMBOOT) && defined(CONFIG_SECURE_BOOT) 246 lis r7,FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_1M)@h 247 ori r7,r7,FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_1M)@l 248 249 lis r8,FSL_BOOKE_MAS2(CONFIG_SYS_MONITOR_BASE,(MAS2_I|MAS2_G))@h 250 ori r8,r8,FSL_BOOKE_MAS2(CONFIG_SYS_MONITOR_BASE,(MAS2_I|MAS2_G))@l 251 252 lis r9,FSL_BOOKE_MAS3(CONFIG_SYS_PBI_FLASH_WINDOW, 0, 253 (MAS3_SX|MAS3_SW|MAS3_SR))@h 254 ori r9,r9,FSL_BOOKE_MAS3(CONFIG_SYS_PBI_FLASH_WINDOW, 0, 255 (MAS3_SX|MAS3_SW|MAS3_SR))@l 256#else 257/* 258 * TLB entry is created for IVPR + IVOR15 to map on valid OP code address 259 * because "nexti" will resize TLB to 4K 260 */ 261 lis r7,FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_256K)@h 262 ori r7,r7,FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_256K)@l 263 264 lis r8,FSL_BOOKE_MAS2(CONFIG_SYS_MONITOR_BASE, (MAS2_I))@h 265 ori r8,r8,FSL_BOOKE_MAS2(CONFIG_SYS_MONITOR_BASE, 266 (MAS2_I))@l 267 lis r9,FSL_BOOKE_MAS3(CONFIG_SYS_MONITOR_BASE, 0, 268 (MAS3_SX|MAS3_SW|MAS3_SR))@h 269 ori r9,r9,FSL_BOOKE_MAS3(CONFIG_SYS_MONITOR_BASE, 0, 270 (MAS3_SX|MAS3_SW|MAS3_SR))@l 271#endif 272 mtspr MAS0,r6 273 mtspr MAS1,r7 274 mtspr MAS2,r8 275 mtspr MAS3,r9 276 tlbwe 277 isync 278#endif 279 280/* 281 * Ne need to setup interrupt vector for NAND SPL 282 * because NAND SPL never compiles it. 283 */ 284#if !defined(CONFIG_NAND_SPL) 285 /* Setup interrupt vectors */ 286 lis r1,CONFIG_SYS_MONITOR_BASE@h 287 mtspr IVPR,r1 288 289 lis r3,(CONFIG_SYS_MONITOR_BASE & 0xffff)@h 290 ori r3,r3,(CONFIG_SYS_MONITOR_BASE & 0xffff)@l 291 292 addi r4,r3,CriticalInput - _start + _START_OFFSET 293 mtspr IVOR0,r4 /* 0: Critical input */ 294 addi r4,r3,MachineCheck - _start + _START_OFFSET 295 mtspr IVOR1,r4 /* 1: Machine check */ 296 addi r4,r3,DataStorage - _start + _START_OFFSET 297 mtspr IVOR2,r4 /* 2: Data storage */ 298 addi r4,r3,InstStorage - _start + _START_OFFSET 299 mtspr IVOR3,r4 /* 3: Instruction storage */ 300 addi r4,r3,ExtInterrupt - _start + _START_OFFSET 301 mtspr IVOR4,r4 /* 4: External interrupt */ 302 addi r4,r3,Alignment - _start + _START_OFFSET 303 mtspr IVOR5,r4 /* 5: Alignment */ 304 addi r4,r3,ProgramCheck - _start + _START_OFFSET 305 mtspr IVOR6,r4 /* 6: Program check */ 306 addi r4,r3,FPUnavailable - _start + _START_OFFSET 307 mtspr IVOR7,r4 /* 7: floating point unavailable */ 308 addi r4,r3,SystemCall - _start + _START_OFFSET 309 mtspr IVOR8,r4 /* 8: System call */ 310 /* 9: Auxiliary processor unavailable(unsupported) */ 311 addi r4,r3,Decrementer - _start + _START_OFFSET 312 mtspr IVOR10,r4 /* 10: Decrementer */ 313 addi r4,r3,IntervalTimer - _start + _START_OFFSET 314 mtspr IVOR11,r4 /* 11: Interval timer */ 315 addi r4,r3,WatchdogTimer - _start + _START_OFFSET 316 mtspr IVOR12,r4 /* 12: Watchdog timer */ 317 addi r4,r3,DataTLBError - _start + _START_OFFSET 318 mtspr IVOR13,r4 /* 13: Data TLB error */ 319 addi r4,r3,InstructionTLBError - _start + _START_OFFSET 320 mtspr IVOR14,r4 /* 14: Instruction TLB error */ 321 addi r4,r3,DebugBreakpoint - _start + _START_OFFSET 322 mtspr IVOR15,r4 /* 15: Debug */ 323#endif 324 325 /* Clear and set up some registers. */ 326 li r0,0x0000 327 lis r1,0xffff 328 mtspr DEC,r0 /* prevent dec exceptions */ 329 mttbl r0 /* prevent fit & wdt exceptions */ 330 mttbu r0 331 mtspr TSR,r1 /* clear all timer exception status */ 332 mtspr TCR,r0 /* disable all */ 333 mtspr ESR,r0 /* clear exception syndrome register */ 334 mtspr MCSR,r0 /* machine check syndrome register */ 335 mtxer r0 /* clear integer exception register */ 336 337#ifdef CONFIG_SYS_BOOK3E_HV 338 mtspr MAS8,r0 /* make sure MAS8 is clear */ 339#endif 340 341 /* Enable Time Base and Select Time Base Clock */ 342 lis r0,HID0_EMCP@h /* Enable machine check */ 343#if defined(CONFIG_ENABLE_36BIT_PHYS) 344 ori r0,r0,HID0_ENMAS7@l /* Enable MAS7 */ 345#endif 346#ifndef CONFIG_E500MC 347 ori r0,r0,HID0_TBEN@l /* Enable Timebase */ 348#endif 349 mtspr HID0,r0 350 351#ifndef CONFIG_E500MC 352 li r0,(HID1_ASTME|HID1_ABE)@l /* Addr streaming & broadcast */ 353 mfspr r3,PVR 354 andi. r3,r3, 0xff 355 cmpwi r3,0x50@l /* if we are rev 5.0 or greater set MBDD */ 356 blt 1f 357 /* Set MBDD bit also */ 358 ori r0, r0, HID1_MBDD@l 3591: 360 mtspr HID1,r0 361#endif 362 363#ifdef CONFIG_SYS_FSL_ERRATUM_CPU_A003999 364 mfspr r3,977 365 oris r3,r3,0x0100 366 mtspr 977,r3 367#endif 368 369 /* Enable Branch Prediction */ 370#if defined(CONFIG_BTB) 371 lis r0,BUCSR_ENABLE@h 372 ori r0,r0,BUCSR_ENABLE@l 373 mtspr SPRN_BUCSR,r0 374#endif 375 376#if defined(CONFIG_SYS_INIT_DBCR) 377 lis r1,0xffff 378 ori r1,r1,0xffff 379 mtspr DBSR,r1 /* Clear all status bits */ 380 lis r0,CONFIG_SYS_INIT_DBCR@h /* DBCR0[IDM] must be set */ 381 ori r0,r0,CONFIG_SYS_INIT_DBCR@l 382 mtspr DBCR0,r0 383#endif 384 385#ifdef CONFIG_MPC8569 386#define CONFIG_SYS_LBC_ADDR (CONFIG_SYS_CCSRBAR_DEFAULT + 0x5000) 387#define CONFIG_SYS_LBCR_ADDR (CONFIG_SYS_LBC_ADDR + 0xd0) 388 389 /* MPC8569 Rev.0 silcon needs to set bit 13 of LBCR to allow elBC to 390 * use address space which is more than 12bits, and it must be done in 391 * the 4K boot page. So we set this bit here. 392 */ 393 394 /* create a temp mapping TLB0[0] for LBCR */ 395 lis r6,FSL_BOOKE_MAS0(0, 0, 0)@h 396 ori r6,r6,FSL_BOOKE_MAS0(0, 0, 0)@l 397 398 lis r7,FSL_BOOKE_MAS1(1, 0, 0, 0, BOOKE_PAGESZ_4K)@h 399 ori r7,r7,FSL_BOOKE_MAS1(1, 0, 0, 0, BOOKE_PAGESZ_4K)@l 400 401 lis r8,FSL_BOOKE_MAS2(CONFIG_SYS_LBC_ADDR, MAS2_I|MAS2_G)@h 402 ori r8,r8,FSL_BOOKE_MAS2(CONFIG_SYS_LBC_ADDR, MAS2_I|MAS2_G)@l 403 404 lis r9,FSL_BOOKE_MAS3(CONFIG_SYS_LBC_ADDR, 0, 405 (MAS3_SX|MAS3_SW|MAS3_SR))@h 406 ori r9,r9,FSL_BOOKE_MAS3(CONFIG_SYS_LBC_ADDR, 0, 407 (MAS3_SX|MAS3_SW|MAS3_SR))@l 408 409 mtspr MAS0,r6 410 mtspr MAS1,r7 411 mtspr MAS2,r8 412 mtspr MAS3,r9 413 isync 414 msync 415 tlbwe 416 417 /* Set LBCR register */ 418 lis r4,CONFIG_SYS_LBCR_ADDR@h 419 ori r4,r4,CONFIG_SYS_LBCR_ADDR@l 420 421 lis r5,CONFIG_SYS_LBC_LBCR@h 422 ori r5,r5,CONFIG_SYS_LBC_LBCR@l 423 stw r5,0(r4) 424 isync 425 426 /* invalidate this temp TLB */ 427 lis r4,CONFIG_SYS_LBC_ADDR@h 428 ori r4,r4,CONFIG_SYS_LBC_ADDR@l 429 tlbivax 0,r4 430 isync 431 432#endif /* CONFIG_MPC8569 */ 433 434/* 435 * Search for the TLB that covers the code we're executing, and shrink it 436 * so that it covers only this 4K page. That will ensure that any other 437 * TLB we create won't interfere with it. We assume that the TLB exists, 438 * which is why we don't check the Valid bit of MAS1. 439 * 440 * This is necessary, for example, when booting from the on-chip ROM, 441 * which (oddly) creates a single 4GB TLB that covers CCSR and DDR. 442 * If we don't shrink this TLB now, then we'll accidentally delete it 443 * in "purge_old_ccsr_tlb" below. 444 */ 445 bl nexti /* Find our address */ 446nexti: mflr r1 /* R1 = our PC */ 447 li r2, 0 448 mtspr MAS6, r2 /* Assume the current PID and AS are 0 */ 449 isync 450 msync 451 tlbsx 0, r1 /* This must succeed */ 452 453 /* Set the size of the TLB to 4KB */ 454 mfspr r3, MAS1 455 li r2, 0xF00 456 andc r3, r3, r2 /* Clear the TSIZE bits */ 457 ori r3, r3, MAS1_TSIZE(BOOKE_PAGESZ_4K)@l 458 mtspr MAS1, r3 459 460 /* 461 * Set the base address of the TLB to our PC. We assume that 462 * virtual == physical. We also assume that MAS2_EPN == MAS3_RPN. 463 */ 464 lis r3, MAS2_EPN@h 465 ori r3, r3, MAS2_EPN@l /* R3 = MAS2_EPN */ 466 467 and r1, r1, r3 /* Our PC, rounded down to the nearest page */ 468 469 mfspr r2, MAS2 470 andc r2, r2, r3 471 or r2, r2, r1 472#ifdef CONFIG_SYS_FSL_ERRATUM_A004510 473 cmpwi r27,0 474 beq 1f 475 andi. r15, r2, MAS2_I|MAS2_G /* save the old I/G for later */ 476 rlwinm r2, r2, 0, ~MAS2_I 477 ori r2, r2, MAS2_G 4781: 479#endif 480 mtspr MAS2, r2 /* Set the EPN to our PC base address */ 481 482 mfspr r2, MAS3 483 andc r2, r2, r3 484 or r2, r2, r1 485 mtspr MAS3, r2 /* Set the RPN to our PC base address */ 486 487 isync 488 msync 489 tlbwe 490 491/* 492 * Relocate CCSR, if necessary. We relocate CCSR if (obviously) the default 493 * location is not where we want it. This typically happens on a 36-bit 494 * system, where we want to move CCSR to near the top of 36-bit address space. 495 * 496 * To move CCSR, we create two temporary TLBs, one for the old location, and 497 * another for the new location. On CoreNet systems, we also need to create 498 * a special, temporary LAW. 499 * 500 * As a general rule, TLB0 is used for short-term TLBs, and TLB1 is used for 501 * long-term TLBs, so we use TLB0 here. 502 */ 503#if (CONFIG_SYS_CCSRBAR_DEFAULT != CONFIG_SYS_CCSRBAR_PHYS) 504 505#if !defined(CONFIG_SYS_CCSRBAR_PHYS_HIGH) || !defined(CONFIG_SYS_CCSRBAR_PHYS_LOW) 506#error "CONFIG_SYS_CCSRBAR_PHYS_HIGH and CONFIG_SYS_CCSRBAR_PHYS_LOW) must be defined." 507#endif 508 509purge_old_ccsr_tlb: 510 lis r8, CONFIG_SYS_CCSRBAR@h 511 ori r8, r8, CONFIG_SYS_CCSRBAR@l 512 lis r9, (CONFIG_SYS_CCSRBAR + 0x1000)@h 513 ori r9, r9, (CONFIG_SYS_CCSRBAR + 0x1000)@l 514 515 /* 516 * In a multi-stage boot (e.g. NAND boot), a previous stage may have 517 * created a TLB for CCSR, which will interfere with our relocation 518 * code. Since we're going to create a new TLB for CCSR anyway, 519 * it should be safe to delete this old TLB here. We have to search 520 * for it, though. 521 */ 522 523 li r1, 0 524 mtspr MAS6, r1 /* Search the current address space and PID */ 525 isync 526 msync 527 tlbsx 0, r8 528 mfspr r1, MAS1 529 andis. r2, r1, MAS1_VALID@h /* Check for the Valid bit */ 530 beq 1f /* Skip if no TLB found */ 531 532 rlwinm r1, r1, 0, 1, 31 /* Clear Valid bit */ 533 mtspr MAS1, r1 534 isync 535 msync 536 tlbwe 5371: 538 539create_ccsr_new_tlb: 540 /* 541 * Create a TLB for the new location of CCSR. Register R8 is reserved 542 * for the virtual address of this TLB (CONFIG_SYS_CCSRBAR). 543 */ 544 lis r0, FSL_BOOKE_MAS0(0, 0, 0)@h 545 ori r0, r0, FSL_BOOKE_MAS0(0, 0, 0)@l 546 lis r1, FSL_BOOKE_MAS1(1, 0, 0, 0, BOOKE_PAGESZ_4K)@h 547 ori r1, r1, FSL_BOOKE_MAS1(1, 0, 0, 0, BOOKE_PAGESZ_4K)@l 548 lis r2, FSL_BOOKE_MAS2(CONFIG_SYS_CCSRBAR, (MAS2_I|MAS2_G))@h 549 ori r2, r2, FSL_BOOKE_MAS2(CONFIG_SYS_CCSRBAR, (MAS2_I|MAS2_G))@l 550 lis r3, FSL_BOOKE_MAS3(CONFIG_SYS_CCSRBAR_PHYS_LOW, 0, (MAS3_SW|MAS3_SR))@h 551 ori r3, r3, FSL_BOOKE_MAS3(CONFIG_SYS_CCSRBAR_PHYS_LOW, 0, (MAS3_SW|MAS3_SR))@l 552#ifdef CONFIG_ENABLE_36BIT_PHYS 553 lis r7, CONFIG_SYS_CCSRBAR_PHYS_HIGH@h 554 ori r7, r7, CONFIG_SYS_CCSRBAR_PHYS_HIGH@l 555 mtspr MAS7, r7 556#endif 557 mtspr MAS0, r0 558 mtspr MAS1, r1 559 mtspr MAS2, r2 560 mtspr MAS3, r3 561 isync 562 msync 563 tlbwe 564 565 /* 566 * Create a TLB for the current location of CCSR. Register R9 is reserved 567 * for the virtual address of this TLB (CONFIG_SYS_CCSRBAR + 0x1000). 568 */ 569create_ccsr_old_tlb: 570 lis r0, FSL_BOOKE_MAS0(0, 1, 0)@h 571 ori r0, r0, FSL_BOOKE_MAS0(0, 1, 0)@l 572 lis r2, FSL_BOOKE_MAS2(CONFIG_SYS_CCSRBAR + 0x1000, (MAS2_I|MAS2_G))@h 573 ori r2, r2, FSL_BOOKE_MAS2(CONFIG_SYS_CCSRBAR + 0x1000, (MAS2_I|MAS2_G))@l 574 lis r3, FSL_BOOKE_MAS3(CONFIG_SYS_CCSRBAR_DEFAULT, 0, (MAS3_SW|MAS3_SR))@h 575 ori r3, r3, FSL_BOOKE_MAS3(CONFIG_SYS_CCSRBAR_DEFAULT, 0, (MAS3_SW|MAS3_SR))@l 576#ifdef CONFIG_ENABLE_36BIT_PHYS 577 li r7, 0 /* The default CCSR address is always a 32-bit number */ 578 mtspr MAS7, r7 579#endif 580 mtspr MAS0, r0 581 /* MAS1 is the same as above */ 582 mtspr MAS2, r2 583 mtspr MAS3, r3 584 isync 585 msync 586 tlbwe 587 588 /* 589 * We have a TLB for what we think is the current (old) CCSR. Let's 590 * verify that, otherwise we won't be able to move it. 591 * CONFIG_SYS_CCSRBAR_DEFAULT is always a 32-bit number, so we only 592 * need to compare the lower 32 bits of CCSRBAR on CoreNet systems. 593 */ 594verify_old_ccsr: 595 lis r0, CONFIG_SYS_CCSRBAR_DEFAULT@h 596 ori r0, r0, CONFIG_SYS_CCSRBAR_DEFAULT@l 597#ifdef CONFIG_FSL_CORENET 598 lwz r1, 4(r9) /* CCSRBARL */ 599#else 600 lwz r1, 0(r9) /* CCSRBAR, shifted right by 12 */ 601 slwi r1, r1, 12 602#endif 603 604 cmpl 0, r0, r1 605 606 /* 607 * If the value we read from CCSRBARL is not what we expect, then 608 * enter an infinite loop. This will at least allow a debugger to 609 * halt execution and examine TLBs, etc. There's no point in going 610 * on. 611 */ 612infinite_debug_loop: 613 bne infinite_debug_loop 614 615#ifdef CONFIG_FSL_CORENET 616 617#define CCSR_LAWBARH0 (CONFIG_SYS_CCSRBAR + 0x1000) 618#define LAW_EN 0x80000000 619#define LAW_SIZE_4K 0xb 620#define CCSRBAR_LAWAR (LAW_EN | (0x1e << 20) | LAW_SIZE_4K) 621#define CCSRAR_C 0x80000000 /* Commit */ 622 623create_temp_law: 624 /* 625 * On CoreNet systems, we create the temporary LAW using a special LAW 626 * target ID of 0x1e. LAWBARH is at offset 0xc00 in CCSR. 627 */ 628 lis r0, CONFIG_SYS_CCSRBAR_PHYS_HIGH@h 629 ori r0, r0, CONFIG_SYS_CCSRBAR_PHYS_HIGH@l 630 lis r1, CONFIG_SYS_CCSRBAR_PHYS_LOW@h 631 ori r1, r1, CONFIG_SYS_CCSRBAR_PHYS_LOW@l 632 lis r2, CCSRBAR_LAWAR@h 633 ori r2, r2, CCSRBAR_LAWAR@l 634 635 stw r0, 0xc00(r9) /* LAWBARH0 */ 636 stw r1, 0xc04(r9) /* LAWBARL0 */ 637 sync 638 stw r2, 0xc08(r9) /* LAWAR0 */ 639 640 /* 641 * Read back from LAWAR to ensure the update is complete. e500mc 642 * cores also require an isync. 643 */ 644 lwz r0, 0xc08(r9) /* LAWAR0 */ 645 isync 646 647 /* 648 * Read the current CCSRBARH and CCSRBARL using load word instructions. 649 * Follow this with an isync instruction. This forces any outstanding 650 * accesses to configuration space to completion. 651 */ 652read_old_ccsrbar: 653 lwz r0, 0(r9) /* CCSRBARH */ 654 lwz r0, 4(r9) /* CCSRBARL */ 655 isync 656 657 /* 658 * Write the new values for CCSRBARH and CCSRBARL to their old 659 * locations. The CCSRBARH has a shadow register. When the CCSRBARH 660 * has a new value written it loads a CCSRBARH shadow register. When 661 * the CCSRBARL is written, the CCSRBARH shadow register contents 662 * along with the CCSRBARL value are loaded into the CCSRBARH and 663 * CCSRBARL registers, respectively. Follow this with a sync 664 * instruction. 665 */ 666write_new_ccsrbar: 667 lis r0, CONFIG_SYS_CCSRBAR_PHYS_HIGH@h 668 ori r0, r0, CONFIG_SYS_CCSRBAR_PHYS_HIGH@l 669 lis r1, CONFIG_SYS_CCSRBAR_PHYS_LOW@h 670 ori r1, r1, CONFIG_SYS_CCSRBAR_PHYS_LOW@l 671 lis r2, CCSRAR_C@h 672 ori r2, r2, CCSRAR_C@l 673 674 stw r0, 0(r9) /* Write to CCSRBARH */ 675 sync /* Make sure we write to CCSRBARH first */ 676 stw r1, 4(r9) /* Write to CCSRBARL */ 677 sync 678 679 /* 680 * Write a 1 to the commit bit (C) of CCSRAR at the old location. 681 * Follow this with a sync instruction. 682 */ 683 stw r2, 8(r9) 684 sync 685 686 /* Delete the temporary LAW */ 687delete_temp_law: 688 li r1, 0 689 stw r1, 0xc08(r8) 690 sync 691 stw r1, 0xc00(r8) 692 stw r1, 0xc04(r8) 693 sync 694 695#else /* #ifdef CONFIG_FSL_CORENET */ 696 697write_new_ccsrbar: 698 /* 699 * Read the current value of CCSRBAR using a load word instruction 700 * followed by an isync. This forces all accesses to configuration 701 * space to complete. 702 */ 703 sync 704 lwz r0, 0(r9) 705 isync 706 707/* CONFIG_SYS_CCSRBAR_PHYS right shifted by 12 */ 708#define CCSRBAR_PHYS_RS12 ((CONFIG_SYS_CCSRBAR_PHYS_HIGH << 20) | \ 709 (CONFIG_SYS_CCSRBAR_PHYS_LOW >> 12)) 710 711 /* Write the new value to CCSRBAR. */ 712 lis r0, CCSRBAR_PHYS_RS12@h 713 ori r0, r0, CCSRBAR_PHYS_RS12@l 714 stw r0, 0(r9) 715 sync 716 717 /* 718 * The manual says to perform a load of an address that does not 719 * access configuration space or the on-chip SRAM using an existing TLB, 720 * but that doesn't appear to be necessary. We will do the isync, 721 * though. 722 */ 723 isync 724 725 /* 726 * Read the contents of CCSRBAR from its new location, followed by 727 * another isync. 728 */ 729 lwz r0, 0(r8) 730 isync 731 732#endif /* #ifdef CONFIG_FSL_CORENET */ 733 734 /* Delete the temporary TLBs */ 735delete_temp_tlbs: 736 lis r0, FSL_BOOKE_MAS0(0, 0, 0)@h 737 ori r0, r0, FSL_BOOKE_MAS0(0, 0, 0)@l 738 li r1, 0 739 lis r2, FSL_BOOKE_MAS2(CONFIG_SYS_CCSRBAR, (MAS2_I|MAS2_G))@h 740 ori r2, r2, FSL_BOOKE_MAS2(CONFIG_SYS_CCSRBAR, (MAS2_I|MAS2_G))@l 741 mtspr MAS0, r0 742 mtspr MAS1, r1 743 mtspr MAS2, r2 744 isync 745 msync 746 tlbwe 747 748 lis r0, FSL_BOOKE_MAS0(0, 1, 0)@h 749 ori r0, r0, FSL_BOOKE_MAS0(0, 1, 0)@l 750 lis r2, FSL_BOOKE_MAS2(CONFIG_SYS_CCSRBAR + 0x1000, (MAS2_I|MAS2_G))@h 751 ori r2, r2, FSL_BOOKE_MAS2(CONFIG_SYS_CCSRBAR + 0x1000, (MAS2_I|MAS2_G))@l 752 mtspr MAS0, r0 753 mtspr MAS2, r2 754 isync 755 msync 756 tlbwe 757#endif /* #if (CONFIG_SYS_CCSRBAR_DEFAULT != CONFIG_SYS_CCSRBAR_PHYS) */ 758 759#ifdef CONFIG_SYS_FSL_ERRATUM_A004510 760#define DCSR_LAWBARH0 (CONFIG_SYS_CCSRBAR + 0x1000) 761#define LAW_SIZE_1M 0x13 762#define DCSRBAR_LAWAR (LAW_EN | (0x1d << 20) | LAW_SIZE_1M) 763 764 cmpwi r27,0 765 beq 9f 766 767 /* 768 * Create a TLB entry for CCSR 769 * 770 * We're executing out of TLB1 entry in r14, and that's the only 771 * TLB entry that exists. To allocate some TLB entries for our 772 * own use, flip a bit high enough that we won't flip it again 773 * via incrementing. 774 */ 775 776 xori r8, r14, 32 777 lis r0, MAS0_TLBSEL(1)@h 778 rlwimi r0, r8, 16, MAS0_ESEL_MSK 779 lis r1, FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_16M)@h 780 ori r1, r1, FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_16M)@l 781 lis r7, CONFIG_SYS_CCSRBAR@h 782 ori r7, r7, CONFIG_SYS_CCSRBAR@l 783 ori r2, r7, MAS2_I|MAS2_G 784 lis r3, FSL_BOOKE_MAS3(CONFIG_SYS_CCSRBAR_PHYS_LOW, 0, (MAS3_SW|MAS3_SR))@h 785 ori r3, r3, FSL_BOOKE_MAS3(CONFIG_SYS_CCSRBAR_PHYS_LOW, 0, (MAS3_SW|MAS3_SR))@l 786 lis r4, CONFIG_SYS_CCSRBAR_PHYS_HIGH@h 787 ori r4, r4, CONFIG_SYS_CCSRBAR_PHYS_HIGH@l 788 mtspr MAS0, r0 789 mtspr MAS1, r1 790 mtspr MAS2, r2 791 mtspr MAS3, r3 792 mtspr MAS7, r4 793 isync 794 tlbwe 795 isync 796 msync 797 798 /* Map DCSR temporarily to physical address zero */ 799 li r0, 0 800 lis r3, DCSRBAR_LAWAR@h 801 ori r3, r3, DCSRBAR_LAWAR@l 802 803 stw r0, 0xc00(r7) /* LAWBARH0 */ 804 stw r0, 0xc04(r7) /* LAWBARL0 */ 805 sync 806 stw r3, 0xc08(r7) /* LAWAR0 */ 807 808 /* Read back from LAWAR to ensure the update is complete. */ 809 lwz r3, 0xc08(r7) /* LAWAR0 */ 810 isync 811 812 /* Create a TLB entry for DCSR at zero */ 813 814 addi r9, r8, 1 815 lis r0, MAS0_TLBSEL(1)@h 816 rlwimi r0, r9, 16, MAS0_ESEL_MSK 817 lis r1, FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_1M)@h 818 ori r1, r1, FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_1M)@l 819 li r6, 0 /* DCSR effective address */ 820 ori r2, r6, MAS2_I|MAS2_G 821 li r3, MAS3_SW|MAS3_SR 822 li r4, 0 823 mtspr MAS0, r0 824 mtspr MAS1, r1 825 mtspr MAS2, r2 826 mtspr MAS3, r3 827 mtspr MAS7, r4 828 isync 829 tlbwe 830 isync 831 msync 832 833 /* enable the timebase */ 834#define CTBENR 0xe2084 835 li r3, 1 836 addis r4, r7, CTBENR@ha 837 stw r3, CTBENR@l(r4) 838 lwz r3, CTBENR@l(r4) 839 twi 0,r3,0 840 isync 841 842 .macro erratum_set_ccsr offset value 843 addis r3, r7, \offset@ha 844 lis r4, \value@h 845 addi r3, r3, \offset@l 846 ori r4, r4, \value@l 847 bl erratum_set_value 848 .endm 849 850 .macro erratum_set_dcsr offset value 851 addis r3, r6, \offset@ha 852 lis r4, \value@h 853 addi r3, r3, \offset@l 854 ori r4, r4, \value@l 855 bl erratum_set_value 856 .endm 857 858 erratum_set_dcsr 0xb0e08 0xe0201800 859 erratum_set_dcsr 0xb0e18 0xe0201800 860 erratum_set_dcsr 0xb0e38 0xe0400000 861 erratum_set_dcsr 0xb0008 0x00900000 862 erratum_set_dcsr 0xb0e40 0xe00a0000 863 erratum_set_ccsr 0x18600 CONFIG_SYS_FSL_CORENET_SNOOPVEC_COREONLY 864 erratum_set_ccsr 0x10f00 0x415e5000 865 erratum_set_ccsr 0x11f00 0x415e5000 866 867 /* Make temp mapping uncacheable again, if it was initially */ 868 bl 2f 8692: mflr r3 870 tlbsx 0, r3 871 mfspr r4, MAS2 872 rlwimi r4, r15, 0, MAS2_I 873 rlwimi r4, r15, 0, MAS2_G 874 mtspr MAS2, r4 875 isync 876 tlbwe 877 isync 878 msync 879 880 /* Clear the cache */ 881 lis r3,(L1CSR1_ICFI|L1CSR1_ICLFR)@h 882 ori r3,r3,(L1CSR1_ICFI|L1CSR1_ICLFR)@l 883 sync 884 isync 885 mtspr SPRN_L1CSR1,r3 886 isync 8872: sync 888 mfspr r4,SPRN_L1CSR1 889 and. r4,r4,r3 890 bne 2b 891 892 lis r3,(L1CSR1_CPE|L1CSR1_ICE)@h 893 ori r3,r3,(L1CSR1_CPE|L1CSR1_ICE)@l 894 sync 895 isync 896 mtspr SPRN_L1CSR1,r3 897 isync 8982: sync 899 mfspr r4,SPRN_L1CSR1 900 and. r4,r4,r3 901 beq 2b 902 903 /* Remove temporary mappings */ 904 lis r0, MAS0_TLBSEL(1)@h 905 rlwimi r0, r9, 16, MAS0_ESEL_MSK 906 li r3, 0 907 mtspr MAS0, r0 908 mtspr MAS1, r3 909 isync 910 tlbwe 911 isync 912 msync 913 914 li r3, 0 915 stw r3, 0xc08(r7) /* LAWAR0 */ 916 lwz r3, 0xc08(r7) 917 isync 918 919 lis r0, MAS0_TLBSEL(1)@h 920 rlwimi r0, r8, 16, MAS0_ESEL_MSK 921 li r3, 0 922 mtspr MAS0, r0 923 mtspr MAS1, r3 924 isync 925 tlbwe 926 isync 927 msync 928 929 b 9f 930 931 /* r3 = addr, r4 = value, clobbers r5, r11, r12 */ 932erratum_set_value: 933 /* Lock two cache lines into I-Cache */ 934 sync 935 mfspr r11, SPRN_L1CSR1 936 rlwinm r11, r11, 0, ~L1CSR1_ICUL 937 sync 938 isync 939 mtspr SPRN_L1CSR1, r11 940 isync 941 942 mflr r12 943 bl 5f 9445: mflr r5 945 addi r5, r5, 2f - 5b 946 icbtls 0, 0, r5 947 addi r5, r5, 64 948 949 sync 950 mfspr r11, SPRN_L1CSR1 9513: andi. r11, r11, L1CSR1_ICUL 952 bne 3b 953 954 icbtls 0, 0, r5 955 addi r5, r5, 64 956 957 sync 958 mfspr r11, SPRN_L1CSR1 9593: andi. r11, r11, L1CSR1_ICUL 960 bne 3b 961 962 b 2f 963 .align 6 964 /* Inside a locked cacheline, wait a while, write, then wait a while */ 9652: sync 966 967 mfspr r5, SPRN_TBRL 968 addis r11, r5, 0x10000@h /* wait 65536 timebase ticks */ 9694: mfspr r5, SPRN_TBRL 970 subf. r5, r5, r11 971 bgt 4b 972 973 stw r4, 0(r3) 974 975 mfspr r5, SPRN_TBRL 976 addis r11, r5, 0x10000@h /* wait 65536 timebase ticks */ 9774: mfspr r5, SPRN_TBRL 978 subf. r5, r5, r11 979 bgt 4b 980 981 sync 982 983 /* 984 * Fill out the rest of this cache line and the next with nops, 985 * to ensure that nothing outside the locked area will be 986 * fetched due to a branch. 987 */ 988 .rept 19 989 nop 990 .endr 991 992 sync 993 mfspr r11, SPRN_L1CSR1 994 rlwinm r11, r11, 0, ~L1CSR1_ICUL 995 sync 996 isync 997 mtspr SPRN_L1CSR1, r11 998 isync 999 1000 mtlr r12 1001 blr 1002 10039: 1004#endif 1005 1006create_init_ram_area: 1007 lis r6,FSL_BOOKE_MAS0(1, 15, 0)@h 1008 ori r6,r6,FSL_BOOKE_MAS0(1, 15, 0)@l 1009 1010#if !defined(CONFIG_SYS_RAMBOOT) && !defined(CONFIG_SECURE_BOOT) 1011 /* create a temp mapping in AS=1 to the 4M boot window */ 1012 lis r7,FSL_BOOKE_MAS1(1, 1, 0, 1, BOOKE_PAGESZ_4M)@h 1013 ori r7,r7,FSL_BOOKE_MAS1(1, 1, 0, 1, BOOKE_PAGESZ_4M)@l 1014 1015 lis r8,FSL_BOOKE_MAS2(CONFIG_SYS_MONITOR_BASE & 0xffc00000, (MAS2_I|MAS2_G))@h 1016 ori r8,r8,FSL_BOOKE_MAS2(CONFIG_SYS_MONITOR_BASE & 0xffc00000, (MAS2_I|MAS2_G))@l 1017 1018 /* The 85xx has the default boot window 0xff800000 - 0xffffffff */ 1019 lis r9,FSL_BOOKE_MAS3(0xffc00000, 0, (MAS3_SX|MAS3_SW|MAS3_SR))@h 1020 ori r9,r9,FSL_BOOKE_MAS3(0xffc00000, 0, (MAS3_SX|MAS3_SW|MAS3_SR))@l 1021#elif !defined(CONFIG_SYS_RAMBOOT) && defined(CONFIG_SECURE_BOOT) 1022 /* create a temp mapping in AS = 1 for Flash mapping 1023 * created by PBL for ISBC code 1024 */ 1025 lis r7,FSL_BOOKE_MAS1(1, 1, 0, 1, BOOKE_PAGESZ_1M)@h 1026 ori r7,r7,FSL_BOOKE_MAS1(1, 1, 0, 1, BOOKE_PAGESZ_1M)@l 1027 1028 lis r8,FSL_BOOKE_MAS2(CONFIG_SYS_MONITOR_BASE, (MAS2_I|MAS2_G))@h 1029 ori r8,r8,FSL_BOOKE_MAS2(CONFIG_SYS_MONITOR_BASE, (MAS2_I|MAS2_G))@l 1030 1031 lis r9,FSL_BOOKE_MAS3(CONFIG_SYS_PBI_FLASH_WINDOW, 0, 1032 (MAS3_SX|MAS3_SW|MAS3_SR))@h 1033 ori r9,r9,FSL_BOOKE_MAS3(CONFIG_SYS_PBI_FLASH_WINDOW, 0, 1034 (MAS3_SX|MAS3_SW|MAS3_SR))@l 1035#else 1036 /* 1037 * create a temp mapping in AS=1 to the 1M CONFIG_SYS_MONITOR_BASE space, the main 1038 * image has been relocated to CONFIG_SYS_MONITOR_BASE on the second stage. 1039 */ 1040 lis r7,FSL_BOOKE_MAS1(1, 1, 0, 1, BOOKE_PAGESZ_1M)@h 1041 ori r7,r7,FSL_BOOKE_MAS1(1, 1, 0, 1, BOOKE_PAGESZ_1M)@l 1042 1043 lis r8,FSL_BOOKE_MAS2(CONFIG_SYS_MONITOR_BASE, (MAS2_I|MAS2_G))@h 1044 ori r8,r8,FSL_BOOKE_MAS2(CONFIG_SYS_MONITOR_BASE, (MAS2_I|MAS2_G))@l 1045 1046 lis r9,FSL_BOOKE_MAS3(CONFIG_SYS_MONITOR_BASE, 0, (MAS3_SX|MAS3_SW|MAS3_SR))@h 1047 ori r9,r9,FSL_BOOKE_MAS3(CONFIG_SYS_MONITOR_BASE, 0, (MAS3_SX|MAS3_SW|MAS3_SR))@l 1048#endif 1049 1050 mtspr MAS0,r6 1051 mtspr MAS1,r7 1052 mtspr MAS2,r8 1053 mtspr MAS3,r9 1054 isync 1055 msync 1056 tlbwe 1057 1058 /* create a temp mapping in AS=1 to the stack */ 1059 lis r6,FSL_BOOKE_MAS0(1, 14, 0)@h 1060 ori r6,r6,FSL_BOOKE_MAS0(1, 14, 0)@l 1061 1062 lis r7,FSL_BOOKE_MAS1(1, 1, 0, 1, BOOKE_PAGESZ_16K)@h 1063 ori r7,r7,FSL_BOOKE_MAS1(1, 1, 0, 1, BOOKE_PAGESZ_16K)@l 1064 1065 lis r8,FSL_BOOKE_MAS2(CONFIG_SYS_INIT_RAM_ADDR, 0)@h 1066 ori r8,r8,FSL_BOOKE_MAS2(CONFIG_SYS_INIT_RAM_ADDR, 0)@l 1067 1068#if defined(CONFIG_SYS_INIT_RAM_ADDR_PHYS_LOW) && \ 1069 defined(CONFIG_SYS_INIT_RAM_ADDR_PHYS_HIGH) 1070 lis r9,FSL_BOOKE_MAS3(CONFIG_SYS_INIT_RAM_ADDR_PHYS_LOW, 0, 1071 (MAS3_SX|MAS3_SW|MAS3_SR))@h 1072 ori r9,r9,FSL_BOOKE_MAS3(CONFIG_SYS_INIT_RAM_ADDR_PHYS_LOW, 0, 1073 (MAS3_SX|MAS3_SW|MAS3_SR))@l 1074 li r10,CONFIG_SYS_INIT_RAM_ADDR_PHYS_HIGH 1075 mtspr MAS7,r10 1076#else 1077 lis r9,FSL_BOOKE_MAS3(CONFIG_SYS_INIT_RAM_ADDR, 0, (MAS3_SX|MAS3_SW|MAS3_SR))@h 1078 ori r9,r9,FSL_BOOKE_MAS3(CONFIG_SYS_INIT_RAM_ADDR, 0, (MAS3_SX|MAS3_SW|MAS3_SR))@l 1079#endif 1080 1081 mtspr MAS0,r6 1082 mtspr MAS1,r7 1083 mtspr MAS2,r8 1084 mtspr MAS3,r9 1085 isync 1086 msync 1087 tlbwe 1088 1089 lis r6,MSR_IS|MSR_DS|MSR_DE@h 1090 ori r6,r6,MSR_IS|MSR_DS|MSR_DE@l 1091 lis r7,switch_as@h 1092 ori r7,r7,switch_as@l 1093 1094 mtspr SPRN_SRR0,r7 1095 mtspr SPRN_SRR1,r6 1096 rfi 1097 1098switch_as: 1099/* L1 DCache is used for initial RAM */ 1100 1101 /* Allocate Initial RAM in data cache. 1102 */ 1103 lis r3,CONFIG_SYS_INIT_RAM_ADDR@h 1104 ori r3,r3,CONFIG_SYS_INIT_RAM_ADDR@l 1105 mfspr r2, L1CFG0 1106 andi. r2, r2, 0x1ff 1107 /* cache size * 1024 / (2 * L1 line size) */ 1108 slwi r2, r2, (10 - 1 - L1_CACHE_SHIFT) 1109 mtctr r2 1110 li r0,0 11111: 1112 dcbz r0,r3 1113 dcbtls 0,r0,r3 1114 addi r3,r3,CONFIG_SYS_CACHELINE_SIZE 1115 bdnz 1b 1116 1117 /* Jump out the last 4K page and continue to 'normal' start */ 1118#ifdef CONFIG_SYS_RAMBOOT 1119 b _start_cont 1120#else 1121 /* Calculate absolute address in FLASH and jump there */ 1122 /*--------------------------------------------------------------*/ 1123 lis r3,CONFIG_SYS_MONITOR_BASE@h 1124 ori r3,r3,CONFIG_SYS_MONITOR_BASE@l 1125 addi r3,r3,_start_cont - _start + _START_OFFSET 1126 mtlr r3 1127 blr 1128#endif 1129 1130 .text 1131 .globl _start 1132_start: 1133 .long 0x27051956 /* U-BOOT Magic Number */ 1134 .globl version_string 1135version_string: 1136 .ascii U_BOOT_VERSION_STRING, "\0" 1137 1138 .align 4 1139 .globl _start_cont 1140_start_cont: 1141 /* Setup the stack in initial RAM,could be L2-as-SRAM or L1 dcache*/ 1142 lis r3,(CONFIG_SYS_INIT_RAM_ADDR)@h 1143 ori r3,r3,((CONFIG_SYS_INIT_SP_OFFSET-16)&~0xf)@l /* Align to 16 */ 1144 li r0,0 1145 stw r0,0(r3) /* Terminate Back Chain */ 1146 stw r0,+4(r3) /* NULL return address. */ 1147 mr r1,r3 /* Transfer to SP(r1) */ 1148 1149 GET_GOT 1150 bl cpu_init_early_f 1151 1152 /* switch back to AS = 0 */ 1153 lis r3,(MSR_CE|MSR_ME|MSR_DE)@h 1154 ori r3,r3,(MSR_CE|MSR_ME|MSR_DE)@l 1155 mtmsr r3 1156 isync 1157 1158 bl cpu_init_f 1159 bl board_init_f 1160 isync 1161 1162 /* NOTREACHED - board_init_f() does not return */ 1163 1164#ifndef CONFIG_NAND_SPL 1165 . = EXC_OFF_SYS_RESET 1166 .globl _start_of_vectors 1167_start_of_vectors: 1168 1169/* Critical input. */ 1170 CRIT_EXCEPTION(0x0100, CriticalInput, CritcalInputException) 1171 1172/* Machine check */ 1173 MCK_EXCEPTION(0x200, MachineCheck, MachineCheckException) 1174 1175/* Data Storage exception. */ 1176 STD_EXCEPTION(0x0300, DataStorage, UnknownException) 1177 1178/* Instruction Storage exception. */ 1179 STD_EXCEPTION(0x0400, InstStorage, UnknownException) 1180 1181/* External Interrupt exception. */ 1182 STD_EXCEPTION(0x0500, ExtInterrupt, ExtIntException) 1183 1184/* Alignment exception. */ 1185 . = 0x0600 1186Alignment: 1187 EXCEPTION_PROLOG(SRR0, SRR1) 1188 mfspr r4,DAR 1189 stw r4,_DAR(r21) 1190 mfspr r5,DSISR 1191 stw r5,_DSISR(r21) 1192 addi r3,r1,STACK_FRAME_OVERHEAD 1193 EXC_XFER_TEMPLATE(Alignment, AlignmentException, MSR_KERNEL, COPY_EE) 1194 1195/* Program check exception */ 1196 . = 0x0700 1197ProgramCheck: 1198 EXCEPTION_PROLOG(SRR0, SRR1) 1199 addi r3,r1,STACK_FRAME_OVERHEAD 1200 EXC_XFER_TEMPLATE(ProgramCheck, ProgramCheckException, 1201 MSR_KERNEL, COPY_EE) 1202 1203 /* No FPU on MPC85xx. This exception is not supposed to happen. 1204 */ 1205 STD_EXCEPTION(0x0800, FPUnavailable, UnknownException) 1206 1207 . = 0x0900 1208/* 1209 * r0 - SYSCALL number 1210 * r3-... arguments 1211 */ 1212SystemCall: 1213 addis r11,r0,0 /* get functions table addr */ 1214 ori r11,r11,0 /* Note: this code is patched in trap_init */ 1215 addis r12,r0,0 /* get number of functions */ 1216 ori r12,r12,0 1217 1218 cmplw 0,r0,r12 1219 bge 1f 1220 1221 rlwinm r0,r0,2,0,31 /* fn_addr = fn_tbl[r0] */ 1222 add r11,r11,r0 1223 lwz r11,0(r11) 1224 1225 li r20,0xd00-4 /* Get stack pointer */ 1226 lwz r12,0(r20) 1227 subi r12,r12,12 /* Adjust stack pointer */ 1228 li r0,0xc00+_end_back-SystemCall 1229 cmplw 0,r0,r12 /* Check stack overflow */ 1230 bgt 1f 1231 stw r12,0(r20) 1232 1233 mflr r0 1234 stw r0,0(r12) 1235 mfspr r0,SRR0 1236 stw r0,4(r12) 1237 mfspr r0,SRR1 1238 stw r0,8(r12) 1239 1240 li r12,0xc00+_back-SystemCall 1241 mtlr r12 1242 mtspr SRR0,r11 1243 12441: SYNC 1245 rfi 1246_back: 1247 1248 mfmsr r11 /* Disable interrupts */ 1249 li r12,0 1250 ori r12,r12,MSR_EE 1251 andc r11,r11,r12 1252 SYNC /* Some chip revs need this... */ 1253 mtmsr r11 1254 SYNC 1255 1256 li r12,0xd00-4 /* restore regs */ 1257 lwz r12,0(r12) 1258 1259 lwz r11,0(r12) 1260 mtlr r11 1261 lwz r11,4(r12) 1262 mtspr SRR0,r11 1263 lwz r11,8(r12) 1264 mtspr SRR1,r11 1265 1266 addi r12,r12,12 /* Adjust stack pointer */ 1267 li r20,0xd00-4 1268 stw r12,0(r20) 1269 1270 SYNC 1271 rfi 1272_end_back: 1273 1274 STD_EXCEPTION(0x0a00, Decrementer, timer_interrupt) 1275 STD_EXCEPTION(0x0b00, IntervalTimer, UnknownException) 1276 STD_EXCEPTION(0x0c00, WatchdogTimer, UnknownException) 1277 1278 STD_EXCEPTION(0x0d00, DataTLBError, UnknownException) 1279 STD_EXCEPTION(0x0e00, InstructionTLBError, UnknownException) 1280 1281 CRIT_EXCEPTION(0x0f00, DebugBreakpoint, DebugException ) 1282 1283 .globl _end_of_vectors 1284_end_of_vectors: 1285 1286 1287 . = . + (0x100 - ( . & 0xff )) /* align for debug */ 1288 1289/* 1290 * This code finishes saving the registers to the exception frame 1291 * and jumps to the appropriate handler for the exception. 1292 * Register r21 is pointer into trap frame, r1 has new stack pointer. 1293 */ 1294 .globl transfer_to_handler 1295transfer_to_handler: 1296 stw r22,_NIP(r21) 1297 lis r22,MSR_POW@h 1298 andc r23,r23,r22 1299 stw r23,_MSR(r21) 1300 SAVE_GPR(7, r21) 1301 SAVE_4GPRS(8, r21) 1302 SAVE_8GPRS(12, r21) 1303 SAVE_8GPRS(24, r21) 1304 1305 mflr r23 1306 andi. r24,r23,0x3f00 /* get vector offset */ 1307 stw r24,TRAP(r21) 1308 li r22,0 1309 stw r22,RESULT(r21) 1310 mtspr SPRG2,r22 /* r1 is now kernel sp */ 1311 1312 lwz r24,0(r23) /* virtual address of handler */ 1313 lwz r23,4(r23) /* where to go when done */ 1314 mtspr SRR0,r24 1315 mtspr SRR1,r20 1316 mtlr r23 1317 SYNC 1318 rfi /* jump to handler, enable MMU */ 1319 1320int_return: 1321 mfmsr r28 /* Disable interrupts */ 1322 li r4,0 1323 ori r4,r4,MSR_EE 1324 andc r28,r28,r4 1325 SYNC /* Some chip revs need this... */ 1326 mtmsr r28 1327 SYNC 1328 lwz r2,_CTR(r1) 1329 lwz r0,_LINK(r1) 1330 mtctr r2 1331 mtlr r0 1332 lwz r2,_XER(r1) 1333 lwz r0,_CCR(r1) 1334 mtspr XER,r2 1335 mtcrf 0xFF,r0 1336 REST_10GPRS(3, r1) 1337 REST_10GPRS(13, r1) 1338 REST_8GPRS(23, r1) 1339 REST_GPR(31, r1) 1340 lwz r2,_NIP(r1) /* Restore environment */ 1341 lwz r0,_MSR(r1) 1342 mtspr SRR0,r2 1343 mtspr SRR1,r0 1344 lwz r0,GPR0(r1) 1345 lwz r2,GPR2(r1) 1346 lwz r1,GPR1(r1) 1347 SYNC 1348 rfi 1349 1350crit_return: 1351 mfmsr r28 /* Disable interrupts */ 1352 li r4,0 1353 ori r4,r4,MSR_EE 1354 andc r28,r28,r4 1355 SYNC /* Some chip revs need this... */ 1356 mtmsr r28 1357 SYNC 1358 lwz r2,_CTR(r1) 1359 lwz r0,_LINK(r1) 1360 mtctr r2 1361 mtlr r0 1362 lwz r2,_XER(r1) 1363 lwz r0,_CCR(r1) 1364 mtspr XER,r2 1365 mtcrf 0xFF,r0 1366 REST_10GPRS(3, r1) 1367 REST_10GPRS(13, r1) 1368 REST_8GPRS(23, r1) 1369 REST_GPR(31, r1) 1370 lwz r2,_NIP(r1) /* Restore environment */ 1371 lwz r0,_MSR(r1) 1372 mtspr SPRN_CSRR0,r2 1373 mtspr SPRN_CSRR1,r0 1374 lwz r0,GPR0(r1) 1375 lwz r2,GPR2(r1) 1376 lwz r1,GPR1(r1) 1377 SYNC 1378 rfci 1379 1380mck_return: 1381 mfmsr r28 /* Disable interrupts */ 1382 li r4,0 1383 ori r4,r4,MSR_EE 1384 andc r28,r28,r4 1385 SYNC /* Some chip revs need this... */ 1386 mtmsr r28 1387 SYNC 1388 lwz r2,_CTR(r1) 1389 lwz r0,_LINK(r1) 1390 mtctr r2 1391 mtlr r0 1392 lwz r2,_XER(r1) 1393 lwz r0,_CCR(r1) 1394 mtspr XER,r2 1395 mtcrf 0xFF,r0 1396 REST_10GPRS(3, r1) 1397 REST_10GPRS(13, r1) 1398 REST_8GPRS(23, r1) 1399 REST_GPR(31, r1) 1400 lwz r2,_NIP(r1) /* Restore environment */ 1401 lwz r0,_MSR(r1) 1402 mtspr SPRN_MCSRR0,r2 1403 mtspr SPRN_MCSRR1,r0 1404 lwz r0,GPR0(r1) 1405 lwz r2,GPR2(r1) 1406 lwz r1,GPR1(r1) 1407 SYNC 1408 rfmci 1409 1410/* Cache functions. 1411*/ 1412.globl flush_icache 1413flush_icache: 1414.globl invalidate_icache 1415invalidate_icache: 1416 mfspr r0,L1CSR1 1417 ori r0,r0,L1CSR1_ICFI 1418 msync 1419 isync 1420 mtspr L1CSR1,r0 1421 isync 1422 blr /* entire I cache */ 1423 1424.globl invalidate_dcache 1425invalidate_dcache: 1426 mfspr r0,L1CSR0 1427 ori r0,r0,L1CSR0_DCFI 1428 msync 1429 isync 1430 mtspr L1CSR0,r0 1431 isync 1432 blr 1433 1434 .globl icache_enable 1435icache_enable: 1436 mflr r8 1437 bl invalidate_icache 1438 mtlr r8 1439 isync 1440 mfspr r4,L1CSR1 1441 ori r4,r4,0x0001 1442 oris r4,r4,0x0001 1443 mtspr L1CSR1,r4 1444 isync 1445 blr 1446 1447 .globl icache_disable 1448icache_disable: 1449 mfspr r0,L1CSR1 1450 lis r3,0 1451 ori r3,r3,L1CSR1_ICE 1452 andc r0,r0,r3 1453 mtspr L1CSR1,r0 1454 isync 1455 blr 1456 1457 .globl icache_status 1458icache_status: 1459 mfspr r3,L1CSR1 1460 andi. r3,r3,L1CSR1_ICE 1461 blr 1462 1463 .globl dcache_enable 1464dcache_enable: 1465 mflr r8 1466 bl invalidate_dcache 1467 mtlr r8 1468 isync 1469 mfspr r0,L1CSR0 1470 ori r0,r0,0x0001 1471 oris r0,r0,0x0001 1472 msync 1473 isync 1474 mtspr L1CSR0,r0 1475 isync 1476 blr 1477 1478 .globl dcache_disable 1479dcache_disable: 1480 mfspr r3,L1CSR0 1481 lis r4,0 1482 ori r4,r4,L1CSR0_DCE 1483 andc r3,r3,r4 1484 mtspr L1CSR0,r3 1485 isync 1486 blr 1487 1488 .globl dcache_status 1489dcache_status: 1490 mfspr r3,L1CSR0 1491 andi. r3,r3,L1CSR0_DCE 1492 blr 1493 1494 .globl get_pir 1495get_pir: 1496 mfspr r3,PIR 1497 blr 1498 1499 .globl get_pvr 1500get_pvr: 1501 mfspr r3,PVR 1502 blr 1503 1504 .globl get_svr 1505get_svr: 1506 mfspr r3,SVR 1507 blr 1508 1509 .globl wr_tcr 1510wr_tcr: 1511 mtspr TCR,r3 1512 blr 1513 1514/*------------------------------------------------------------------------------- */ 1515/* Function: in8 */ 1516/* Description: Input 8 bits */ 1517/*------------------------------------------------------------------------------- */ 1518 .globl in8 1519in8: 1520 lbz r3,0x0000(r3) 1521 blr 1522 1523/*------------------------------------------------------------------------------- */ 1524/* Function: out8 */ 1525/* Description: Output 8 bits */ 1526/*------------------------------------------------------------------------------- */ 1527 .globl out8 1528out8: 1529 stb r4,0x0000(r3) 1530 sync 1531 blr 1532 1533/*------------------------------------------------------------------------------- */ 1534/* Function: out16 */ 1535/* Description: Output 16 bits */ 1536/*------------------------------------------------------------------------------- */ 1537 .globl out16 1538out16: 1539 sth r4,0x0000(r3) 1540 sync 1541 blr 1542 1543/*------------------------------------------------------------------------------- */ 1544/* Function: out16r */ 1545/* Description: Byte reverse and output 16 bits */ 1546/*------------------------------------------------------------------------------- */ 1547 .globl out16r 1548out16r: 1549 sthbrx r4,r0,r3 1550 sync 1551 blr 1552 1553/*------------------------------------------------------------------------------- */ 1554/* Function: out32 */ 1555/* Description: Output 32 bits */ 1556/*------------------------------------------------------------------------------- */ 1557 .globl out32 1558out32: 1559 stw r4,0x0000(r3) 1560 sync 1561 blr 1562 1563/*------------------------------------------------------------------------------- */ 1564/* Function: out32r */ 1565/* Description: Byte reverse and output 32 bits */ 1566/*------------------------------------------------------------------------------- */ 1567 .globl out32r 1568out32r: 1569 stwbrx r4,r0,r3 1570 sync 1571 blr 1572 1573/*------------------------------------------------------------------------------- */ 1574/* Function: in16 */ 1575/* Description: Input 16 bits */ 1576/*------------------------------------------------------------------------------- */ 1577 .globl in16 1578in16: 1579 lhz r3,0x0000(r3) 1580 blr 1581 1582/*------------------------------------------------------------------------------- */ 1583/* Function: in16r */ 1584/* Description: Input 16 bits and byte reverse */ 1585/*------------------------------------------------------------------------------- */ 1586 .globl in16r 1587in16r: 1588 lhbrx r3,r0,r3 1589 blr 1590 1591/*------------------------------------------------------------------------------- */ 1592/* Function: in32 */ 1593/* Description: Input 32 bits */ 1594/*------------------------------------------------------------------------------- */ 1595 .globl in32 1596in32: 1597 lwz 3,0x0000(3) 1598 blr 1599 1600/*------------------------------------------------------------------------------- */ 1601/* Function: in32r */ 1602/* Description: Input 32 bits and byte reverse */ 1603/*------------------------------------------------------------------------------- */ 1604 .globl in32r 1605in32r: 1606 lwbrx r3,r0,r3 1607 blr 1608#endif /* !CONFIG_NAND_SPL */ 1609 1610/*------------------------------------------------------------------------------*/ 1611 1612/* 1613 * void write_tlb(mas0, mas1, mas2, mas3, mas7) 1614 */ 1615 .globl write_tlb 1616write_tlb: 1617 mtspr MAS0,r3 1618 mtspr MAS1,r4 1619 mtspr MAS2,r5 1620 mtspr MAS3,r6 1621#ifdef CONFIG_ENABLE_36BIT_PHYS 1622 mtspr MAS7,r7 1623#endif 1624 li r3,0 1625#ifdef CONFIG_SYS_BOOK3E_HV 1626 mtspr MAS8,r3 1627#endif 1628 isync 1629 tlbwe 1630 msync 1631 isync 1632 blr 1633 1634/* 1635 * void relocate_code (addr_sp, gd, addr_moni) 1636 * 1637 * This "function" does not return, instead it continues in RAM 1638 * after relocating the monitor code. 1639 * 1640 * r3 = dest 1641 * r4 = src 1642 * r5 = length in bytes 1643 * r6 = cachelinesize 1644 */ 1645 .globl relocate_code 1646relocate_code: 1647 mr r1,r3 /* Set new stack pointer */ 1648 mr r9,r4 /* Save copy of Init Data pointer */ 1649 mr r10,r5 /* Save copy of Destination Address */ 1650 1651 GET_GOT 1652 mr r3,r5 /* Destination Address */ 1653 lis r4,CONFIG_SYS_MONITOR_BASE@h /* Source Address */ 1654 ori r4,r4,CONFIG_SYS_MONITOR_BASE@l 1655 lwz r5,GOT(__init_end) 1656 sub r5,r5,r4 1657 li r6,CONFIG_SYS_CACHELINE_SIZE /* Cache Line Size */ 1658 1659 /* 1660 * Fix GOT pointer: 1661 * 1662 * New GOT-PTR = (old GOT-PTR - CONFIG_SYS_MONITOR_BASE) + Destination Address 1663 * 1664 * Offset: 1665 */ 1666 sub r15,r10,r4 1667 1668 /* First our own GOT */ 1669 add r12,r12,r15 1670 /* the the one used by the C code */ 1671 add r30,r30,r15 1672 1673 /* 1674 * Now relocate code 1675 */ 1676 1677 cmplw cr1,r3,r4 1678 addi r0,r5,3 1679 srwi. r0,r0,2 1680 beq cr1,4f /* In place copy is not necessary */ 1681 beq 7f /* Protect against 0 count */ 1682 mtctr r0 1683 bge cr1,2f 1684 1685 la r8,-4(r4) 1686 la r7,-4(r3) 16871: lwzu r0,4(r8) 1688 stwu r0,4(r7) 1689 bdnz 1b 1690 b 4f 1691 16922: slwi r0,r0,2 1693 add r8,r4,r0 1694 add r7,r3,r0 16953: lwzu r0,-4(r8) 1696 stwu r0,-4(r7) 1697 bdnz 3b 1698 1699/* 1700 * Now flush the cache: note that we must start from a cache aligned 1701 * address. Otherwise we might miss one cache line. 1702 */ 17034: cmpwi r6,0 1704 add r5,r3,r5 1705 beq 7f /* Always flush prefetch queue in any case */ 1706 subi r0,r6,1 1707 andc r3,r3,r0 1708 mr r4,r3 17095: dcbst 0,r4 1710 add r4,r4,r6 1711 cmplw r4,r5 1712 blt 5b 1713 sync /* Wait for all dcbst to complete on bus */ 1714 mr r4,r3 17156: icbi 0,r4 1716 add r4,r4,r6 1717 cmplw r4,r5 1718 blt 6b 17197: sync /* Wait for all icbi to complete on bus */ 1720 isync 1721 1722/* 1723 * We are done. Do not return, instead branch to second part of board 1724 * initialization, now running from RAM. 1725 */ 1726 1727 addi r0,r10,in_ram - _start + _START_OFFSET 1728 1729 /* 1730 * As IVPR is going to point RAM address, 1731 * Make sure IVOR15 has valid opcode to support debugger 1732 */ 1733 mtspr IVOR15,r0 1734 1735 /* 1736 * Re-point the IVPR at RAM 1737 */ 1738 mtspr IVPR,r10 1739 1740 mtlr r0 1741 blr /* NEVER RETURNS! */ 1742 .globl in_ram 1743in_ram: 1744 1745 /* 1746 * Relocation Function, r12 point to got2+0x8000 1747 * 1748 * Adjust got2 pointers, no need to check for 0, this code 1749 * already puts a few entries in the table. 1750 */ 1751 li r0,__got2_entries@sectoff@l 1752 la r3,GOT(_GOT2_TABLE_) 1753 lwz r11,GOT(_GOT2_TABLE_) 1754 mtctr r0 1755 sub r11,r3,r11 1756 addi r3,r3,-4 17571: lwzu r0,4(r3) 1758 cmpwi r0,0 1759 beq- 2f 1760 add r0,r0,r11 1761 stw r0,0(r3) 17622: bdnz 1b 1763 1764 /* 1765 * Now adjust the fixups and the pointers to the fixups 1766 * in case we need to move ourselves again. 1767 */ 1768 li r0,__fixup_entries@sectoff@l 1769 lwz r3,GOT(_FIXUP_TABLE_) 1770 cmpwi r0,0 1771 mtctr r0 1772 addi r3,r3,-4 1773 beq 4f 17743: lwzu r4,4(r3) 1775 lwzux r0,r4,r11 1776 cmpwi r0,0 1777 add r0,r0,r11 1778 stw r4,0(r3) 1779 beq- 5f 1780 stw r0,0(r4) 17815: bdnz 3b 17824: 1783clear_bss: 1784 /* 1785 * Now clear BSS segment 1786 */ 1787 lwz r3,GOT(__bss_start) 1788 lwz r4,GOT(__bss_end__) 1789 1790 cmplw 0,r3,r4 1791 beq 6f 1792 1793 li r0,0 17945: 1795 stw r0,0(r3) 1796 addi r3,r3,4 1797 cmplw 0,r3,r4 1798 bne 5b 17996: 1800 1801 mr r3,r9 /* Init Data pointer */ 1802 mr r4,r10 /* Destination Address */ 1803 bl board_init_r 1804 1805#ifndef CONFIG_NAND_SPL 1806 /* 1807 * Copy exception vector code to low memory 1808 * 1809 * r3: dest_addr 1810 * r7: source address, r8: end address, r9: target address 1811 */ 1812 .globl trap_init 1813trap_init: 1814 mflr r4 /* save link register */ 1815 GET_GOT 1816 lwz r7,GOT(_start_of_vectors) 1817 lwz r8,GOT(_end_of_vectors) 1818 1819 li r9,0x100 /* reset vector always at 0x100 */ 1820 1821 cmplw 0,r7,r8 1822 bgelr /* return if r7>=r8 - just in case */ 18231: 1824 lwz r0,0(r7) 1825 stw r0,0(r9) 1826 addi r7,r7,4 1827 addi r9,r9,4 1828 cmplw 0,r7,r8 1829 bne 1b 1830 1831 /* 1832 * relocate `hdlr' and `int_return' entries 1833 */ 1834 li r7,.L_CriticalInput - _start + _START_OFFSET 1835 bl trap_reloc 1836 li r7,.L_MachineCheck - _start + _START_OFFSET 1837 bl trap_reloc 1838 li r7,.L_DataStorage - _start + _START_OFFSET 1839 bl trap_reloc 1840 li r7,.L_InstStorage - _start + _START_OFFSET 1841 bl trap_reloc 1842 li r7,.L_ExtInterrupt - _start + _START_OFFSET 1843 bl trap_reloc 1844 li r7,.L_Alignment - _start + _START_OFFSET 1845 bl trap_reloc 1846 li r7,.L_ProgramCheck - _start + _START_OFFSET 1847 bl trap_reloc 1848 li r7,.L_FPUnavailable - _start + _START_OFFSET 1849 bl trap_reloc 1850 li r7,.L_Decrementer - _start + _START_OFFSET 1851 bl trap_reloc 1852 li r7,.L_IntervalTimer - _start + _START_OFFSET 1853 li r8,_end_of_vectors - _start + _START_OFFSET 18542: 1855 bl trap_reloc 1856 addi r7,r7,0x100 /* next exception vector */ 1857 cmplw 0,r7,r8 1858 blt 2b 1859 1860 /* Update IVORs as per relocated vector table address */ 1861 li r7,0x0100 1862 mtspr IVOR0,r7 /* 0: Critical input */ 1863 li r7,0x0200 1864 mtspr IVOR1,r7 /* 1: Machine check */ 1865 li r7,0x0300 1866 mtspr IVOR2,r7 /* 2: Data storage */ 1867 li r7,0x0400 1868 mtspr IVOR3,r7 /* 3: Instruction storage */ 1869 li r7,0x0500 1870 mtspr IVOR4,r7 /* 4: External interrupt */ 1871 li r7,0x0600 1872 mtspr IVOR5,r7 /* 5: Alignment */ 1873 li r7,0x0700 1874 mtspr IVOR6,r7 /* 6: Program check */ 1875 li r7,0x0800 1876 mtspr IVOR7,r7 /* 7: floating point unavailable */ 1877 li r7,0x0900 1878 mtspr IVOR8,r7 /* 8: System call */ 1879 /* 9: Auxiliary processor unavailable(unsupported) */ 1880 li r7,0x0a00 1881 mtspr IVOR10,r7 /* 10: Decrementer */ 1882 li r7,0x0b00 1883 mtspr IVOR11,r7 /* 11: Interval timer */ 1884 li r7,0x0c00 1885 mtspr IVOR12,r7 /* 12: Watchdog timer */ 1886 li r7,0x0d00 1887 mtspr IVOR13,r7 /* 13: Data TLB error */ 1888 li r7,0x0e00 1889 mtspr IVOR14,r7 /* 14: Instruction TLB error */ 1890 li r7,0x0f00 1891 mtspr IVOR15,r7 /* 15: Debug */ 1892 1893 lis r7,0x0 1894 mtspr IVPR,r7 1895 1896 mtlr r4 /* restore link register */ 1897 blr 1898 1899.globl unlock_ram_in_cache 1900unlock_ram_in_cache: 1901 /* invalidate the INIT_RAM section */ 1902 lis r3,(CONFIG_SYS_INIT_RAM_ADDR & ~(CONFIG_SYS_CACHELINE_SIZE-1))@h 1903 ori r3,r3,(CONFIG_SYS_INIT_RAM_ADDR & ~(CONFIG_SYS_CACHELINE_SIZE-1))@l 1904 mfspr r4,L1CFG0 1905 andi. r4,r4,0x1ff 1906 slwi r4,r4,(10 - 1 - L1_CACHE_SHIFT) 1907 mtctr r4 19081: dcbi r0,r3 1909 addi r3,r3,CONFIG_SYS_CACHELINE_SIZE 1910 bdnz 1b 1911 sync 1912 1913 /* Invalidate the TLB entries for the cache */ 1914 lis r3,CONFIG_SYS_INIT_RAM_ADDR@h 1915 ori r3,r3,CONFIG_SYS_INIT_RAM_ADDR@l 1916 tlbivax 0,r3 1917 addi r3,r3,0x1000 1918 tlbivax 0,r3 1919 addi r3,r3,0x1000 1920 tlbivax 0,r3 1921 addi r3,r3,0x1000 1922 tlbivax 0,r3 1923 isync 1924 blr 1925 1926.globl flush_dcache 1927flush_dcache: 1928 mfspr r3,SPRN_L1CFG0 1929 1930 rlwinm r5,r3,9,3 /* Extract cache block size */ 1931 twlgti r5,1 /* Only 32 and 64 byte cache blocks 1932 * are currently defined. 1933 */ 1934 li r4,32 1935 subfic r6,r5,2 /* r6 = log2(1KiB / cache block size) - 1936 * log2(number of ways) 1937 */ 1938 slw r5,r4,r5 /* r5 = cache block size */ 1939 1940 rlwinm r7,r3,0,0xff /* Extract number of KiB in the cache */ 1941 mulli r7,r7,13 /* An 8-way cache will require 13 1942 * loads per set. 1943 */ 1944 slw r7,r7,r6 1945 1946 /* save off HID0 and set DCFA */ 1947 mfspr r8,SPRN_HID0 1948 ori r9,r8,HID0_DCFA@l 1949 mtspr SPRN_HID0,r9 1950 isync 1951 1952 lis r4,0 1953 mtctr r7 1954 19551: lwz r3,0(r4) /* Load... */ 1956 add r4,r4,r5 1957 bdnz 1b 1958 1959 msync 1960 lis r4,0 1961 mtctr r7 1962 19631: dcbf 0,r4 /* ...and flush. */ 1964 add r4,r4,r5 1965 bdnz 1b 1966 1967 /* restore HID0 */ 1968 mtspr SPRN_HID0,r8 1969 isync 1970 1971 blr 1972 1973.globl setup_ivors 1974setup_ivors: 1975 1976#include "fixed_ivor.S" 1977 blr 1978#endif /* !CONFIG_NAND_SPL */ 1979