1/* 2 * Copyright 2004, 2007-2012 Freescale Semiconductor, Inc. 3 * Copyright (C) 2003 Motorola,Inc. 4 * 5 * See file CREDITS for list of people who contributed to this 6 * project. 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License as 10 * published by the Free Software Foundation; either version 2 of 11 * the License, or (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, 21 * MA 02111-1307 USA 22 */ 23 24/* U-Boot Startup Code for Motorola 85xx PowerPC based Embedded Boards 25 * 26 * The processor starts at 0xfffffffc and the code is first executed in the 27 * last 4K page(0xfffff000-0xffffffff) in flash/rom. 28 * 29 */ 30 31#include <asm-offsets.h> 32#include <config.h> 33#include <mpc85xx.h> 34#include <version.h> 35 36#define _LINUX_CONFIG_H 1 /* avoid reading Linux autoconf.h file */ 37 38#include <ppc_asm.tmpl> 39#include <ppc_defs.h> 40 41#include <asm/cache.h> 42#include <asm/mmu.h> 43 44#undef MSR_KERNEL 45#define MSR_KERNEL ( MSR_ME ) /* Machine Check */ 46 47/* 48 * Set up GOT: Global Offset Table 49 * 50 * Use r12 to access the GOT 51 */ 52 START_GOT 53 GOT_ENTRY(_GOT2_TABLE_) 54 GOT_ENTRY(_FIXUP_TABLE_) 55 56#ifndef CONFIG_NAND_SPL 57 GOT_ENTRY(_start) 58 GOT_ENTRY(_start_of_vectors) 59 GOT_ENTRY(_end_of_vectors) 60 GOT_ENTRY(transfer_to_handler) 61#endif 62 63 GOT_ENTRY(__init_end) 64 GOT_ENTRY(__bss_end__) 65 GOT_ENTRY(__bss_start) 66 END_GOT 67 68/* 69 * e500 Startup -- after reset only the last 4KB of the effective 70 * address space is mapped in the MMU L2 TLB1 Entry0. The .bootpg 71 * section is located at THIS LAST page and basically does three 72 * things: clear some registers, set up exception tables and 73 * add more TLB entries for 'larger spaces'(e.g. the boot rom) to 74 * continue the boot procedure. 75 76 * Once the boot rom is mapped by TLB entries we can proceed 77 * with normal startup. 78 * 79 */ 80 81 .section .bootpg,"ax" 82 .globl _start_e500 83 84_start_e500: 85/* Enable debug exception */ 86 li r1,MSR_DE 87 mtmsr r1 88 89#ifdef CONFIG_SYS_FSL_ERRATUM_A004510 90 mfspr r3,SPRN_SVR 91 rlwinm r3,r3,0,0xff 92 li r4,CONFIG_SYS_FSL_ERRATUM_A004510_SVR_REV 93 cmpw r3,r4 94 beq 1f 95 96#ifdef CONFIG_SYS_FSL_ERRATUM_A004510_SVR_REV2 97 li r4,CONFIG_SYS_FSL_ERRATUM_A004510_SVR_REV2 98 cmpw r3,r4 99 beq 1f 100#endif 101 102 /* Not a supported revision affected by erratum */ 103 li r27,0 104 b 2f 105 1061: li r27,1 /* Remember for later that we have the erratum */ 107 /* Erratum says set bits 55:60 to 001001 */ 108 msync 109 isync 110 mfspr r3,976 111 li r4,0x48 112 rlwimi r3,r4,0,0x1f8 113 mtspr 976,r3 114 isync 1152: 116#endif 117 118#if defined(CONFIG_SECURE_BOOT) && defined(CONFIG_E500MC) 119 /* ISBC uses L2 as stack. 120 * Disable L2 cache here so that u-boot can enable it later 121 * as part of it's normal flow 122 */ 123 124 /* Check if L2 is enabled */ 125 mfspr r3, SPRN_L2CSR0 126 lis r2, L2CSR0_L2E@h 127 ori r2, r2, L2CSR0_L2E@l 128 and. r4, r3, r2 129 beq l2_disabled 130 131 mfspr r3, SPRN_L2CSR0 132 /* Flush L2 cache */ 133 lis r2,(L2CSR0_L2FL)@h 134 ori r2, r2, (L2CSR0_L2FL)@l 135 or r3, r2, r3 136 sync 137 isync 138 mtspr SPRN_L2CSR0,r3 139 isync 1401: 141 mfspr r3, SPRN_L2CSR0 142 and. r1, r3, r2 143 bne 1b 144 145 mfspr r3, SPRN_L2CSR0 146 lis r2, L2CSR0_L2E@h 147 ori r2, r2, L2CSR0_L2E@l 148 andc r4, r3, r2 149 sync 150 isync 151 mtspr SPRN_L2CSR0,r4 152 isync 153 154l2_disabled: 155#endif 156 157/* clear registers/arrays not reset by hardware */ 158 159 /* L1 */ 160 li r0,2 161 mtspr L1CSR0,r0 /* invalidate d-cache */ 162 mtspr L1CSR1,r0 /* invalidate i-cache */ 163 164 mfspr r1,DBSR 165 mtspr DBSR,r1 /* Clear all valid bits */ 166 167 /* 168 * Enable L1 Caches early 169 * 170 */ 171 172#ifdef CONFIG_SYS_CACHE_STASHING 173 /* set stash id to (coreID) * 2 + 32 + L1 CT (0) */ 174 li r2,(32 + 0) 175 mtspr L1CSR2,r2 176#endif 177 178 /* Enable/invalidate the I-Cache */ 179 lis r2,(L1CSR1_ICFI|L1CSR1_ICLFR)@h 180 ori r2,r2,(L1CSR1_ICFI|L1CSR1_ICLFR)@l 181 mtspr SPRN_L1CSR1,r2 1821: 183 mfspr r3,SPRN_L1CSR1 184 and. r1,r3,r2 185 bne 1b 186 187 lis r3,(L1CSR1_CPE|L1CSR1_ICE)@h 188 ori r3,r3,(L1CSR1_CPE|L1CSR1_ICE)@l 189 mtspr SPRN_L1CSR1,r3 190 isync 1912: 192 mfspr r3,SPRN_L1CSR1 193 andi. r1,r3,L1CSR1_ICE@l 194 beq 2b 195 196 /* Enable/invalidate the D-Cache */ 197 lis r2,(L1CSR0_DCFI|L1CSR0_DCLFR)@h 198 ori r2,r2,(L1CSR0_DCFI|L1CSR0_DCLFR)@l 199 mtspr SPRN_L1CSR0,r2 2001: 201 mfspr r3,SPRN_L1CSR0 202 and. r1,r3,r2 203 bne 1b 204 205 lis r3,(L1CSR0_CPE|L1CSR0_DCE)@h 206 ori r3,r3,(L1CSR0_CPE|L1CSR0_DCE)@l 207 mtspr SPRN_L1CSR0,r3 208 isync 2092: 210 mfspr r3,SPRN_L1CSR0 211 andi. r1,r3,L1CSR0_DCE@l 212 beq 2b 213 214 .macro create_tlb1_entry esel ts tsize epn wimg rpn perm phy_high scratch 215 lis \scratch, FSL_BOOKE_MAS0(1, \esel, 0)@h 216 ori \scratch, \scratch, FSL_BOOKE_MAS0(1, \esel, 0)@l 217 mtspr MAS0, \scratch 218 lis \scratch, FSL_BOOKE_MAS1(1, 1, 0, \ts, \tsize)@h 219 ori \scratch, \scratch, FSL_BOOKE_MAS1(1, 1, 0, \ts, \tsize)@l 220 mtspr MAS1, \scratch 221 lis \scratch, FSL_BOOKE_MAS2(\epn, \wimg)@h 222 ori \scratch, \scratch, FSL_BOOKE_MAS2(\epn, \wimg)@l 223 mtspr MAS2, \scratch 224 lis \scratch, FSL_BOOKE_MAS3(\rpn, 0, \perm)@h 225 ori \scratch, \scratch, FSL_BOOKE_MAS3(\rpn, 0, \perm)@l 226 mtspr MAS3, \scratch 227 lis \scratch, \phy_high@h 228 ori \scratch, \scratch, \phy_high@l 229 mtspr MAS7, \scratch 230 isync 231 msync 232 tlbwe 233 isync 234 .endm 235 236 .macro create_tlb0_entry esel ts tsize epn wimg rpn perm phy_high scratch 237 lis \scratch, FSL_BOOKE_MAS0(0, \esel, 0)@h 238 ori \scratch, \scratch, FSL_BOOKE_MAS0(0, \esel, 0)@l 239 mtspr MAS0, \scratch 240 lis \scratch, FSL_BOOKE_MAS1(1, 0, 0, \ts, \tsize)@h 241 ori \scratch, \scratch, FSL_BOOKE_MAS1(1, 0, 0, \ts, \tsize)@l 242 mtspr MAS1, \scratch 243 lis \scratch, FSL_BOOKE_MAS2(\epn, \wimg)@h 244 ori \scratch, \scratch, FSL_BOOKE_MAS2(\epn, \wimg)@l 245 mtspr MAS2, \scratch 246 lis \scratch, FSL_BOOKE_MAS3(\rpn, 0, \perm)@h 247 ori \scratch, \scratch, FSL_BOOKE_MAS3(\rpn, 0, \perm)@l 248 mtspr MAS3, \scratch 249 lis \scratch, \phy_high@h 250 ori \scratch, \scratch, \phy_high@l 251 mtspr MAS7, \scratch 252 isync 253 msync 254 tlbwe 255 isync 256 .endm 257 258 .macro delete_tlb1_entry esel scratch 259 lis \scratch, FSL_BOOKE_MAS0(1, \esel, 0)@h 260 ori \scratch, \scratch, FSL_BOOKE_MAS0(1, \esel, 0)@l 261 mtspr MAS0, \scratch 262 li \scratch, 0 263 mtspr MAS1, \scratch 264 isync 265 msync 266 tlbwe 267 isync 268 .endm 269 270 .macro delete_tlb0_entry esel epn wimg scratch 271 lis \scratch, FSL_BOOKE_MAS0(0, \esel, 0)@h 272 ori \scratch, \scratch, FSL_BOOKE_MAS0(0, \esel, 0)@l 273 mtspr MAS0, \scratch 274 li \scratch, 0 275 mtspr MAS1, \scratch 276 lis \scratch, FSL_BOOKE_MAS2(\epn, \wimg)@h 277 ori \scratch, \scratch, FSL_BOOKE_MAS2(\epn, \wimg)@l 278 mtspr MAS2, \scratch 279 isync 280 msync 281 tlbwe 282 isync 283 .endm 284 285/* 286 * Ne need to setup interrupt vector for NAND SPL 287 * because NAND SPL never compiles it. 288 */ 289#if !defined(CONFIG_NAND_SPL) 290 /* Setup interrupt vectors */ 291 lis r1,CONFIG_SYS_MONITOR_BASE@h 292 mtspr IVPR,r1 293 294 lis r3,(CONFIG_SYS_MONITOR_BASE & 0xffff)@h 295 ori r3,r3,(CONFIG_SYS_MONITOR_BASE & 0xffff)@l 296 297 addi r4,r3,CriticalInput - _start + _START_OFFSET 298 mtspr IVOR0,r4 /* 0: Critical input */ 299 addi r4,r3,MachineCheck - _start + _START_OFFSET 300 mtspr IVOR1,r4 /* 1: Machine check */ 301 addi r4,r3,DataStorage - _start + _START_OFFSET 302 mtspr IVOR2,r4 /* 2: Data storage */ 303 addi r4,r3,InstStorage - _start + _START_OFFSET 304 mtspr IVOR3,r4 /* 3: Instruction storage */ 305 addi r4,r3,ExtInterrupt - _start + _START_OFFSET 306 mtspr IVOR4,r4 /* 4: External interrupt */ 307 addi r4,r3,Alignment - _start + _START_OFFSET 308 mtspr IVOR5,r4 /* 5: Alignment */ 309 addi r4,r3,ProgramCheck - _start + _START_OFFSET 310 mtspr IVOR6,r4 /* 6: Program check */ 311 addi r4,r3,FPUnavailable - _start + _START_OFFSET 312 mtspr IVOR7,r4 /* 7: floating point unavailable */ 313 addi r4,r3,SystemCall - _start + _START_OFFSET 314 mtspr IVOR8,r4 /* 8: System call */ 315 /* 9: Auxiliary processor unavailable(unsupported) */ 316 addi r4,r3,Decrementer - _start + _START_OFFSET 317 mtspr IVOR10,r4 /* 10: Decrementer */ 318 addi r4,r3,IntervalTimer - _start + _START_OFFSET 319 mtspr IVOR11,r4 /* 11: Interval timer */ 320 addi r4,r3,WatchdogTimer - _start + _START_OFFSET 321 mtspr IVOR12,r4 /* 12: Watchdog timer */ 322 addi r4,r3,DataTLBError - _start + _START_OFFSET 323 mtspr IVOR13,r4 /* 13: Data TLB error */ 324 addi r4,r3,InstructionTLBError - _start + _START_OFFSET 325 mtspr IVOR14,r4 /* 14: Instruction TLB error */ 326 addi r4,r3,DebugBreakpoint - _start + _START_OFFSET 327 mtspr IVOR15,r4 /* 15: Debug */ 328#endif 329 330 /* Clear and set up some registers. */ 331 li r0,0x0000 332 lis r1,0xffff 333 mtspr DEC,r0 /* prevent dec exceptions */ 334 mttbl r0 /* prevent fit & wdt exceptions */ 335 mttbu r0 336 mtspr TSR,r1 /* clear all timer exception status */ 337 mtspr TCR,r0 /* disable all */ 338 mtspr ESR,r0 /* clear exception syndrome register */ 339 mtspr MCSR,r0 /* machine check syndrome register */ 340 mtxer r0 /* clear integer exception register */ 341 342#ifdef CONFIG_SYS_BOOK3E_HV 343 mtspr MAS8,r0 /* make sure MAS8 is clear */ 344#endif 345 346 /* Enable Time Base and Select Time Base Clock */ 347 lis r0,HID0_EMCP@h /* Enable machine check */ 348#if defined(CONFIG_ENABLE_36BIT_PHYS) 349 ori r0,r0,HID0_ENMAS7@l /* Enable MAS7 */ 350#endif 351#ifndef CONFIG_E500MC 352 ori r0,r0,HID0_TBEN@l /* Enable Timebase */ 353#endif 354 mtspr HID0,r0 355 356#ifndef CONFIG_E500MC 357 li r0,(HID1_ASTME|HID1_ABE)@l /* Addr streaming & broadcast */ 358 mfspr r3,PVR 359 andi. r3,r3, 0xff 360 cmpwi r3,0x50@l /* if we are rev 5.0 or greater set MBDD */ 361 blt 1f 362 /* Set MBDD bit also */ 363 ori r0, r0, HID1_MBDD@l 3641: 365 mtspr HID1,r0 366#endif 367 368#ifdef CONFIG_SYS_FSL_ERRATUM_CPU_A003999 369 mfspr r3,977 370 oris r3,r3,0x0100 371 mtspr 977,r3 372#endif 373 374 /* Enable Branch Prediction */ 375#if defined(CONFIG_BTB) 376 lis r0,BUCSR_ENABLE@h 377 ori r0,r0,BUCSR_ENABLE@l 378 mtspr SPRN_BUCSR,r0 379#endif 380 381#if defined(CONFIG_SYS_INIT_DBCR) 382 lis r1,0xffff 383 ori r1,r1,0xffff 384 mtspr DBSR,r1 /* Clear all status bits */ 385 lis r0,CONFIG_SYS_INIT_DBCR@h /* DBCR0[IDM] must be set */ 386 ori r0,r0,CONFIG_SYS_INIT_DBCR@l 387 mtspr DBCR0,r0 388#endif 389 390#ifdef CONFIG_MPC8569 391#define CONFIG_SYS_LBC_ADDR (CONFIG_SYS_CCSRBAR_DEFAULT + 0x5000) 392#define CONFIG_SYS_LBCR_ADDR (CONFIG_SYS_LBC_ADDR + 0xd0) 393 394 /* MPC8569 Rev.0 silcon needs to set bit 13 of LBCR to allow elBC to 395 * use address space which is more than 12bits, and it must be done in 396 * the 4K boot page. So we set this bit here. 397 */ 398 399 /* create a temp mapping TLB0[0] for LBCR */ 400 create_tlb0_entry 0, \ 401 0, BOOKE_PAGESZ_4K, \ 402 CONFIG_SYS_LBC_ADDR, MAS2_I|MAS2_G, \ 403 CONFIG_SYS_LBC_ADDR, MAS3_SW|MAS3_SR, \ 404 0, r6 405 406 /* Set LBCR register */ 407 lis r4,CONFIG_SYS_LBCR_ADDR@h 408 ori r4,r4,CONFIG_SYS_LBCR_ADDR@l 409 410 lis r5,CONFIG_SYS_LBC_LBCR@h 411 ori r5,r5,CONFIG_SYS_LBC_LBCR@l 412 stw r5,0(r4) 413 isync 414 415 /* invalidate this temp TLB */ 416 lis r4,CONFIG_SYS_LBC_ADDR@h 417 ori r4,r4,CONFIG_SYS_LBC_ADDR@l 418 tlbivax 0,r4 419 isync 420 421#endif /* CONFIG_MPC8569 */ 422 423/* 424 * Search for the TLB that covers the code we're executing, and shrink it 425 * so that it covers only this 4K page. That will ensure that any other 426 * TLB we create won't interfere with it. We assume that the TLB exists, 427 * which is why we don't check the Valid bit of MAS1. We also assume 428 * it is in TLB1. 429 * 430 * This is necessary, for example, when booting from the on-chip ROM, 431 * which (oddly) creates a single 4GB TLB that covers CCSR and DDR. 432 */ 433 bl nexti /* Find our address */ 434nexti: mflr r1 /* R1 = our PC */ 435 li r2, 0 436 mtspr MAS6, r2 /* Assume the current PID and AS are 0 */ 437 isync 438 msync 439 tlbsx 0, r1 /* This must succeed */ 440 441 mfspr r14, MAS0 /* Save ESEL for later */ 442 rlwinm r14, r14, 16, 0xfff 443 444 /* Set the size of the TLB to 4KB */ 445 mfspr r3, MAS1 446 li r2, 0xF00 447 andc r3, r3, r2 /* Clear the TSIZE bits */ 448 ori r3, r3, MAS1_TSIZE(BOOKE_PAGESZ_4K)@l 449 oris r3, r3, MAS1_IPROT@h 450 mtspr MAS1, r3 451 452 /* 453 * Set the base address of the TLB to our PC. We assume that 454 * virtual == physical. We also assume that MAS2_EPN == MAS3_RPN. 455 */ 456 lis r3, MAS2_EPN@h 457 ori r3, r3, MAS2_EPN@l /* R3 = MAS2_EPN */ 458 459 and r1, r1, r3 /* Our PC, rounded down to the nearest page */ 460 461 mfspr r2, MAS2 462 andc r2, r2, r3 463 or r2, r2, r1 464#ifdef CONFIG_SYS_FSL_ERRATUM_A004510 465 cmpwi r27,0 466 beq 1f 467 andi. r15, r2, MAS2_I|MAS2_G /* save the old I/G for later */ 468 rlwinm r2, r2, 0, ~MAS2_I 469 ori r2, r2, MAS2_G 4701: 471#endif 472 mtspr MAS2, r2 /* Set the EPN to our PC base address */ 473 474 mfspr r2, MAS3 475 andc r2, r2, r3 476 or r2, r2, r1 477 mtspr MAS3, r2 /* Set the RPN to our PC base address */ 478 479 isync 480 msync 481 tlbwe 482 483/* 484 * Clear out any other TLB entries that may exist, to avoid conflicts. 485 * Our TLB entry is in r14. 486 */ 487 li r0, TLBIVAX_ALL | TLBIVAX_TLB0 488 tlbivax 0, r0 489 tlbsync 490 491 mfspr r4, SPRN_TLB1CFG 492 rlwinm r4, r4, 0, TLBnCFG_NENTRY_MASK 493 494 li r3, 0 495 mtspr MAS1, r3 4961: cmpw r3, r14 497 rlwinm r5, r3, 16, MAS0_ESEL_MSK 498 addi r3, r3, 1 499 beq 2f /* skip the entry we're executing from */ 500 501 oris r5, r5, MAS0_TLBSEL(1)@h 502 mtspr MAS0, r5 503 504 isync 505 tlbwe 506 isync 507 msync 508 5092: cmpw r3, r4 510 blt 1b 511 512#if defined(CONFIG_SYS_PPC_E500_DEBUG_TLB) && !defined(MINIMAL_SPL) 513/* 514 * TLB entry for debuggging in AS1 515 * Create temporary TLB entry in AS0 to handle debug exception 516 * As on debug exception MSR is cleared i.e. Address space is changed 517 * to 0. A TLB entry (in AS0) is required to handle debug exception generated 518 * in AS1. 519 */ 520 521#if !defined(CONFIG_SYS_RAMBOOT) && !defined(CONFIG_SECURE_BOOT) 522/* 523 * TLB entry is created for IVPR + IVOR15 to map on valid OP code address 524 * bacause flash's virtual address maps to 0xff800000 - 0xffffffff. 525 * and this window is outside of 4K boot window. 526 */ 527 create_tlb1_entry CONFIG_SYS_PPC_E500_DEBUG_TLB, \ 528 0, BOOKE_PAGESZ_4M, \ 529 CONFIG_SYS_MONITOR_BASE & 0xffc00000, MAS2_I|MAS2_G, \ 530 0xffc00000, MAS3_SX|MAS3_SW|MAS3_SR, \ 531 0, r6 532 533#elif !defined(CONFIG_SYS_RAMBOOT) && defined(CONFIG_SECURE_BOOT) 534 create_tlb1_entry CONFIG_SYS_PPC_E500_DEBUG_TLB, \ 535 0, BOOKE_PAGESZ_1M, \ 536 CONFIG_SYS_MONITOR_BASE, MAS2_I|MAS2_G, \ 537 CONFIG_SYS_PBI_FLASH_WINDOW, MAS3_SX|MAS3_SW|MAS3_SR, \ 538 0, r6 539#else 540/* 541 * TLB entry is created for IVPR + IVOR15 to map on valid OP code address 542 * because "nexti" will resize TLB to 4K 543 */ 544 create_tlb1_entry CONFIG_SYS_PPC_E500_DEBUG_TLB, \ 545 0, BOOKE_PAGESZ_256K, \ 546 CONFIG_SYS_MONITOR_BASE & 0xfffc0000, MAS2_I, \ 547 CONFIG_SYS_MONITOR_BASE & 0xfffc0000, MAS3_SX|MAS3_SW|MAS3_SR, \ 548 0, r6 549#endif 550#endif 551 552/* 553 * Relocate CCSR, if necessary. We relocate CCSR if (obviously) the default 554 * location is not where we want it. This typically happens on a 36-bit 555 * system, where we want to move CCSR to near the top of 36-bit address space. 556 * 557 * To move CCSR, we create two temporary TLBs, one for the old location, and 558 * another for the new location. On CoreNet systems, we also need to create 559 * a special, temporary LAW. 560 * 561 * As a general rule, TLB0 is used for short-term TLBs, and TLB1 is used for 562 * long-term TLBs, so we use TLB0 here. 563 */ 564#if (CONFIG_SYS_CCSRBAR_DEFAULT != CONFIG_SYS_CCSRBAR_PHYS) 565 566#if !defined(CONFIG_SYS_CCSRBAR_PHYS_HIGH) || !defined(CONFIG_SYS_CCSRBAR_PHYS_LOW) 567#error "CONFIG_SYS_CCSRBAR_PHYS_HIGH and CONFIG_SYS_CCSRBAR_PHYS_LOW) must be defined." 568#endif 569 570create_ccsr_new_tlb: 571 /* 572 * Create a TLB for the new location of CCSR. Register R8 is reserved 573 * for the virtual address of this TLB (CONFIG_SYS_CCSRBAR). 574 */ 575 lis r8, CONFIG_SYS_CCSRBAR@h 576 ori r8, r8, CONFIG_SYS_CCSRBAR@l 577 lis r9, (CONFIG_SYS_CCSRBAR + 0x1000)@h 578 ori r9, r9, (CONFIG_SYS_CCSRBAR + 0x1000)@l 579 create_tlb0_entry 0, \ 580 0, BOOKE_PAGESZ_4K, \ 581 CONFIG_SYS_CCSRBAR, MAS2_I|MAS2_G, \ 582 CONFIG_SYS_CCSRBAR_PHYS_LOW, MAS3_SW|MAS3_SR, \ 583 CONFIG_SYS_CCSRBAR_PHYS_HIGH, r3 584 /* 585 * Create a TLB for the current location of CCSR. Register R9 is reserved 586 * for the virtual address of this TLB (CONFIG_SYS_CCSRBAR + 0x1000). 587 */ 588create_ccsr_old_tlb: 589 create_tlb0_entry 1, \ 590 0, BOOKE_PAGESZ_4K, \ 591 CONFIG_SYS_CCSRBAR + 0x1000, MAS2_I|MAS2_G, \ 592 CONFIG_SYS_CCSRBAR_DEFAULT, MAS3_SW|MAS3_SR, \ 593 0, r3 /* The default CCSR address is always a 32-bit number */ 594 595 596 /* 597 * We have a TLB for what we think is the current (old) CCSR. Let's 598 * verify that, otherwise we won't be able to move it. 599 * CONFIG_SYS_CCSRBAR_DEFAULT is always a 32-bit number, so we only 600 * need to compare the lower 32 bits of CCSRBAR on CoreNet systems. 601 */ 602verify_old_ccsr: 603 lis r0, CONFIG_SYS_CCSRBAR_DEFAULT@h 604 ori r0, r0, CONFIG_SYS_CCSRBAR_DEFAULT@l 605#ifdef CONFIG_FSL_CORENET 606 lwz r1, 4(r9) /* CCSRBARL */ 607#else 608 lwz r1, 0(r9) /* CCSRBAR, shifted right by 12 */ 609 slwi r1, r1, 12 610#endif 611 612 cmpl 0, r0, r1 613 614 /* 615 * If the value we read from CCSRBARL is not what we expect, then 616 * enter an infinite loop. This will at least allow a debugger to 617 * halt execution and examine TLBs, etc. There's no point in going 618 * on. 619 */ 620infinite_debug_loop: 621 bne infinite_debug_loop 622 623#ifdef CONFIG_FSL_CORENET 624 625#define CCSR_LAWBARH0 (CONFIG_SYS_CCSRBAR + 0x1000) 626#define LAW_EN 0x80000000 627#define LAW_SIZE_4K 0xb 628#define CCSRBAR_LAWAR (LAW_EN | (0x1e << 20) | LAW_SIZE_4K) 629#define CCSRAR_C 0x80000000 /* Commit */ 630 631create_temp_law: 632 /* 633 * On CoreNet systems, we create the temporary LAW using a special LAW 634 * target ID of 0x1e. LAWBARH is at offset 0xc00 in CCSR. 635 */ 636 lis r0, CONFIG_SYS_CCSRBAR_PHYS_HIGH@h 637 ori r0, r0, CONFIG_SYS_CCSRBAR_PHYS_HIGH@l 638 lis r1, CONFIG_SYS_CCSRBAR_PHYS_LOW@h 639 ori r1, r1, CONFIG_SYS_CCSRBAR_PHYS_LOW@l 640 lis r2, CCSRBAR_LAWAR@h 641 ori r2, r2, CCSRBAR_LAWAR@l 642 643 stw r0, 0xc00(r9) /* LAWBARH0 */ 644 stw r1, 0xc04(r9) /* LAWBARL0 */ 645 sync 646 stw r2, 0xc08(r9) /* LAWAR0 */ 647 648 /* 649 * Read back from LAWAR to ensure the update is complete. e500mc 650 * cores also require an isync. 651 */ 652 lwz r0, 0xc08(r9) /* LAWAR0 */ 653 isync 654 655 /* 656 * Read the current CCSRBARH and CCSRBARL using load word instructions. 657 * Follow this with an isync instruction. This forces any outstanding 658 * accesses to configuration space to completion. 659 */ 660read_old_ccsrbar: 661 lwz r0, 0(r9) /* CCSRBARH */ 662 lwz r0, 4(r9) /* CCSRBARL */ 663 isync 664 665 /* 666 * Write the new values for CCSRBARH and CCSRBARL to their old 667 * locations. The CCSRBARH has a shadow register. When the CCSRBARH 668 * has a new value written it loads a CCSRBARH shadow register. When 669 * the CCSRBARL is written, the CCSRBARH shadow register contents 670 * along with the CCSRBARL value are loaded into the CCSRBARH and 671 * CCSRBARL registers, respectively. Follow this with a sync 672 * instruction. 673 */ 674write_new_ccsrbar: 675 lis r0, CONFIG_SYS_CCSRBAR_PHYS_HIGH@h 676 ori r0, r0, CONFIG_SYS_CCSRBAR_PHYS_HIGH@l 677 lis r1, CONFIG_SYS_CCSRBAR_PHYS_LOW@h 678 ori r1, r1, CONFIG_SYS_CCSRBAR_PHYS_LOW@l 679 lis r2, CCSRAR_C@h 680 ori r2, r2, CCSRAR_C@l 681 682 stw r0, 0(r9) /* Write to CCSRBARH */ 683 sync /* Make sure we write to CCSRBARH first */ 684 stw r1, 4(r9) /* Write to CCSRBARL */ 685 sync 686 687 /* 688 * Write a 1 to the commit bit (C) of CCSRAR at the old location. 689 * Follow this with a sync instruction. 690 */ 691 stw r2, 8(r9) 692 sync 693 694 /* Delete the temporary LAW */ 695delete_temp_law: 696 li r1, 0 697 stw r1, 0xc08(r8) 698 sync 699 stw r1, 0xc00(r8) 700 stw r1, 0xc04(r8) 701 sync 702 703#else /* #ifdef CONFIG_FSL_CORENET */ 704 705write_new_ccsrbar: 706 /* 707 * Read the current value of CCSRBAR using a load word instruction 708 * followed by an isync. This forces all accesses to configuration 709 * space to complete. 710 */ 711 sync 712 lwz r0, 0(r9) 713 isync 714 715/* CONFIG_SYS_CCSRBAR_PHYS right shifted by 12 */ 716#define CCSRBAR_PHYS_RS12 ((CONFIG_SYS_CCSRBAR_PHYS_HIGH << 20) | \ 717 (CONFIG_SYS_CCSRBAR_PHYS_LOW >> 12)) 718 719 /* Write the new value to CCSRBAR. */ 720 lis r0, CCSRBAR_PHYS_RS12@h 721 ori r0, r0, CCSRBAR_PHYS_RS12@l 722 stw r0, 0(r9) 723 sync 724 725 /* 726 * The manual says to perform a load of an address that does not 727 * access configuration space or the on-chip SRAM using an existing TLB, 728 * but that doesn't appear to be necessary. We will do the isync, 729 * though. 730 */ 731 isync 732 733 /* 734 * Read the contents of CCSRBAR from its new location, followed by 735 * another isync. 736 */ 737 lwz r0, 0(r8) 738 isync 739 740#endif /* #ifdef CONFIG_FSL_CORENET */ 741 742 /* Delete the temporary TLBs */ 743delete_temp_tlbs: 744 delete_tlb0_entry 0, CONFIG_SYS_CCSRBAR, MAS2_I|MAS2_G, r3 745 delete_tlb0_entry 1, CONFIG_SYS_CCSRBAR + 0x1000, MAS2_I|MAS2_G, r3 746 747#endif /* #if (CONFIG_SYS_CCSRBAR_DEFAULT != CONFIG_SYS_CCSRBAR_PHYS) */ 748 749#ifdef CONFIG_SYS_FSL_QORIQ_CHASSIS2 750create_ccsr_l2_tlb: 751 /* 752 * Create a TLB for the MMR location of CCSR 753 * to access L2CSR0 register 754 */ 755 create_tlb0_entry 0, \ 756 0, BOOKE_PAGESZ_4K, \ 757 CONFIG_SYS_CCSRBAR + 0xC20000, MAS2_I|MAS2_G, \ 758 CONFIG_SYS_CCSRBAR_PHYS_LOW + 0xC20000, MAS3_SW|MAS3_SR, \ 759 CONFIG_SYS_CCSRBAR_PHYS_HIGH, r3 760 761enable_l2_cluster_l2: 762 /* enable L2 cache */ 763 lis r3, (CONFIG_SYS_CCSRBAR + 0xC20000)@h 764 ori r3, r3, (CONFIG_SYS_CCSRBAR + 0xC20000)@l 765 li r4, 33 /* stash id */ 766 stw r4, 4(r3) 767 lis r4, (L2CSR0_L2FI|L2CSR0_L2LFC)@h 768 ori r4, r4, (L2CSR0_L2FI|L2CSR0_L2LFC)@l 769 sync 770 stw r4, 0(r3) /* invalidate L2 */ 7711: sync 772 lwz r0, 0(r3) 773 twi 0, r0, 0 774 isync 775 and. r1, r0, r4 776 bne 1b 777 lis r4, L2CSR0_L2E@h 778 sync 779 stw r4, 0(r3) /* eanble L2 */ 780delete_ccsr_l2_tlb: 781 delete_tlb0_entry 0, CONFIG_SYS_CCSRBAR + 0xC20000, MAS2_I|MAS2_G, r3 782#endif 783 784#ifdef CONFIG_SYS_FSL_ERRATUM_A004510 785#define DCSR_LAWBARH0 (CONFIG_SYS_CCSRBAR + 0x1000) 786#define LAW_SIZE_1M 0x13 787#define DCSRBAR_LAWAR (LAW_EN | (0x1d << 20) | LAW_SIZE_1M) 788 789 cmpwi r27,0 790 beq 9f 791 792 /* 793 * Create a TLB entry for CCSR 794 * 795 * We're executing out of TLB1 entry in r14, and that's the only 796 * TLB entry that exists. To allocate some TLB entries for our 797 * own use, flip a bit high enough that we won't flip it again 798 * via incrementing. 799 */ 800 801 xori r8, r14, 32 802 lis r0, MAS0_TLBSEL(1)@h 803 rlwimi r0, r8, 16, MAS0_ESEL_MSK 804 lis r1, FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_16M)@h 805 ori r1, r1, FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_16M)@l 806 lis r7, CONFIG_SYS_CCSRBAR@h 807 ori r7, r7, CONFIG_SYS_CCSRBAR@l 808 ori r2, r7, MAS2_I|MAS2_G 809 lis r3, FSL_BOOKE_MAS3(CONFIG_SYS_CCSRBAR_PHYS_LOW, 0, (MAS3_SW|MAS3_SR))@h 810 ori r3, r3, FSL_BOOKE_MAS3(CONFIG_SYS_CCSRBAR_PHYS_LOW, 0, (MAS3_SW|MAS3_SR))@l 811 lis r4, CONFIG_SYS_CCSRBAR_PHYS_HIGH@h 812 ori r4, r4, CONFIG_SYS_CCSRBAR_PHYS_HIGH@l 813 mtspr MAS0, r0 814 mtspr MAS1, r1 815 mtspr MAS2, r2 816 mtspr MAS3, r3 817 mtspr MAS7, r4 818 isync 819 tlbwe 820 isync 821 msync 822 823 /* Map DCSR temporarily to physical address zero */ 824 li r0, 0 825 lis r3, DCSRBAR_LAWAR@h 826 ori r3, r3, DCSRBAR_LAWAR@l 827 828 stw r0, 0xc00(r7) /* LAWBARH0 */ 829 stw r0, 0xc04(r7) /* LAWBARL0 */ 830 sync 831 stw r3, 0xc08(r7) /* LAWAR0 */ 832 833 /* Read back from LAWAR to ensure the update is complete. */ 834 lwz r3, 0xc08(r7) /* LAWAR0 */ 835 isync 836 837 /* Create a TLB entry for DCSR at zero */ 838 839 addi r9, r8, 1 840 lis r0, MAS0_TLBSEL(1)@h 841 rlwimi r0, r9, 16, MAS0_ESEL_MSK 842 lis r1, FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_1M)@h 843 ori r1, r1, FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_1M)@l 844 li r6, 0 /* DCSR effective address */ 845 ori r2, r6, MAS2_I|MAS2_G 846 li r3, MAS3_SW|MAS3_SR 847 li r4, 0 848 mtspr MAS0, r0 849 mtspr MAS1, r1 850 mtspr MAS2, r2 851 mtspr MAS3, r3 852 mtspr MAS7, r4 853 isync 854 tlbwe 855 isync 856 msync 857 858 /* enable the timebase */ 859#define CTBENR 0xe2084 860 li r3, 1 861 addis r4, r7, CTBENR@ha 862 stw r3, CTBENR@l(r4) 863 lwz r3, CTBENR@l(r4) 864 twi 0,r3,0 865 isync 866 867 .macro erratum_set_ccsr offset value 868 addis r3, r7, \offset@ha 869 lis r4, \value@h 870 addi r3, r3, \offset@l 871 ori r4, r4, \value@l 872 bl erratum_set_value 873 .endm 874 875 .macro erratum_set_dcsr offset value 876 addis r3, r6, \offset@ha 877 lis r4, \value@h 878 addi r3, r3, \offset@l 879 ori r4, r4, \value@l 880 bl erratum_set_value 881 .endm 882 883 erratum_set_dcsr 0xb0e08 0xe0201800 884 erratum_set_dcsr 0xb0e18 0xe0201800 885 erratum_set_dcsr 0xb0e38 0xe0400000 886 erratum_set_dcsr 0xb0008 0x00900000 887 erratum_set_dcsr 0xb0e40 0xe00a0000 888 erratum_set_ccsr 0x18600 CONFIG_SYS_FSL_CORENET_SNOOPVEC_COREONLY 889 erratum_set_ccsr 0x10f00 0x415e5000 890 erratum_set_ccsr 0x11f00 0x415e5000 891 892 /* Make temp mapping uncacheable again, if it was initially */ 893 bl 2f 8942: mflr r3 895 tlbsx 0, r3 896 mfspr r4, MAS2 897 rlwimi r4, r15, 0, MAS2_I 898 rlwimi r4, r15, 0, MAS2_G 899 mtspr MAS2, r4 900 isync 901 tlbwe 902 isync 903 msync 904 905 /* Clear the cache */ 906 lis r3,(L1CSR1_ICFI|L1CSR1_ICLFR)@h 907 ori r3,r3,(L1CSR1_ICFI|L1CSR1_ICLFR)@l 908 sync 909 isync 910 mtspr SPRN_L1CSR1,r3 911 isync 9122: sync 913 mfspr r4,SPRN_L1CSR1 914 and. r4,r4,r3 915 bne 2b 916 917 lis r3,(L1CSR1_CPE|L1CSR1_ICE)@h 918 ori r3,r3,(L1CSR1_CPE|L1CSR1_ICE)@l 919 sync 920 isync 921 mtspr SPRN_L1CSR1,r3 922 isync 9232: sync 924 mfspr r4,SPRN_L1CSR1 925 and. r4,r4,r3 926 beq 2b 927 928 /* Remove temporary mappings */ 929 lis r0, MAS0_TLBSEL(1)@h 930 rlwimi r0, r9, 16, MAS0_ESEL_MSK 931 li r3, 0 932 mtspr MAS0, r0 933 mtspr MAS1, r3 934 isync 935 tlbwe 936 isync 937 msync 938 939 li r3, 0 940 stw r3, 0xc08(r7) /* LAWAR0 */ 941 lwz r3, 0xc08(r7) 942 isync 943 944 lis r0, MAS0_TLBSEL(1)@h 945 rlwimi r0, r8, 16, MAS0_ESEL_MSK 946 li r3, 0 947 mtspr MAS0, r0 948 mtspr MAS1, r3 949 isync 950 tlbwe 951 isync 952 msync 953 954 b 9f 955 956 /* r3 = addr, r4 = value, clobbers r5, r11, r12 */ 957erratum_set_value: 958 /* Lock two cache lines into I-Cache */ 959 sync 960 mfspr r11, SPRN_L1CSR1 961 rlwinm r11, r11, 0, ~L1CSR1_ICUL 962 sync 963 isync 964 mtspr SPRN_L1CSR1, r11 965 isync 966 967 mflr r12 968 bl 5f 9695: mflr r5 970 addi r5, r5, 2f - 5b 971 icbtls 0, 0, r5 972 addi r5, r5, 64 973 974 sync 975 mfspr r11, SPRN_L1CSR1 9763: andi. r11, r11, L1CSR1_ICUL 977 bne 3b 978 979 icbtls 0, 0, r5 980 addi r5, r5, 64 981 982 sync 983 mfspr r11, SPRN_L1CSR1 9843: andi. r11, r11, L1CSR1_ICUL 985 bne 3b 986 987 b 2f 988 .align 6 989 /* Inside a locked cacheline, wait a while, write, then wait a while */ 9902: sync 991 992 mfspr r5, SPRN_TBRL 993 addis r11, r5, 0x10000@h /* wait 65536 timebase ticks */ 9944: mfspr r5, SPRN_TBRL 995 subf. r5, r5, r11 996 bgt 4b 997 998 stw r4, 0(r3) 999 1000 mfspr r5, SPRN_TBRL 1001 addis r11, r5, 0x10000@h /* wait 65536 timebase ticks */ 10024: mfspr r5, SPRN_TBRL 1003 subf. r5, r5, r11 1004 bgt 4b 1005 1006 sync 1007 1008 /* 1009 * Fill out the rest of this cache line and the next with nops, 1010 * to ensure that nothing outside the locked area will be 1011 * fetched due to a branch. 1012 */ 1013 .rept 19 1014 nop 1015 .endr 1016 1017 sync 1018 mfspr r11, SPRN_L1CSR1 1019 rlwinm r11, r11, 0, ~L1CSR1_ICUL 1020 sync 1021 isync 1022 mtspr SPRN_L1CSR1, r11 1023 isync 1024 1025 mtlr r12 1026 blr 1027 10289: 1029#endif 1030 1031create_init_ram_area: 1032 lis r6,FSL_BOOKE_MAS0(1, 15, 0)@h 1033 ori r6,r6,FSL_BOOKE_MAS0(1, 15, 0)@l 1034 1035#if !defined(CONFIG_SYS_RAMBOOT) && !defined(CONFIG_SECURE_BOOT) 1036 /* create a temp mapping in AS=1 to the 4M boot window */ 1037 create_tlb1_entry 15, \ 1038 1, BOOKE_PAGESZ_4M, \ 1039 CONFIG_SYS_MONITOR_BASE & 0xffc00000, MAS2_I|MAS2_G, \ 1040 0xffc00000, MAS3_SX|MAS3_SW|MAS3_SR, \ 1041 0, r6 1042 1043#elif !defined(CONFIG_SYS_RAMBOOT) && defined(CONFIG_SECURE_BOOT) 1044 /* create a temp mapping in AS = 1 for Flash mapping 1045 * created by PBL for ISBC code 1046 */ 1047 create_tlb1_entry 15, \ 1048 1, BOOKE_PAGESZ_1M, \ 1049 CONFIG_SYS_MONITOR_BASE, MAS2_I|MAS2_G, \ 1050 CONFIG_SYS_PBI_FLASH_WINDOW, MAS3_SX|MAS3_SW|MAS3_SR, \ 1051 0, r6 1052#else 1053 /* 1054 * create a temp mapping in AS=1 to the 1M CONFIG_SYS_MONITOR_BASE space, the main 1055 * image has been relocated to CONFIG_SYS_MONITOR_BASE on the second stage. 1056 */ 1057 create_tlb1_entry 15, \ 1058 1, BOOKE_PAGESZ_1M, \ 1059 CONFIG_SYS_MONITOR_BASE, MAS2_I|MAS2_G, \ 1060 CONFIG_SYS_MONITOR_BASE, MAS3_SX|MAS3_SW|MAS3_SR, \ 1061 0, r6 1062#endif 1063 1064 /* create a temp mapping in AS=1 to the stack */ 1065#if defined(CONFIG_SYS_INIT_RAM_ADDR_PHYS_LOW) && \ 1066 defined(CONFIG_SYS_INIT_RAM_ADDR_PHYS_HIGH) 1067 create_tlb1_entry 14, \ 1068 1, BOOKE_PAGESZ_16K, \ 1069 CONFIG_SYS_INIT_RAM_ADDR, 0, \ 1070 CONFIG_SYS_INIT_RAM_ADDR_PHYS_LOW, MAS3_SX|MAS3_SW|MAS3_SR, \ 1071 CONFIG_SYS_INIT_RAM_ADDR_PHYS_HIGH, r6 1072 1073#else 1074 create_tlb1_entry 14, \ 1075 1, BOOKE_PAGESZ_16K, \ 1076 CONFIG_SYS_INIT_RAM_ADDR, 0, \ 1077 CONFIG_SYS_INIT_RAM_ADDR, MAS3_SX|MAS3_SW|MAS3_SR, \ 1078 0, r6 1079#endif 1080 1081 lis r6,MSR_IS|MSR_DS|MSR_DE@h 1082 ori r6,r6,MSR_IS|MSR_DS|MSR_DE@l 1083 lis r7,switch_as@h 1084 ori r7,r7,switch_as@l 1085 1086 mtspr SPRN_SRR0,r7 1087 mtspr SPRN_SRR1,r6 1088 rfi 1089 1090switch_as: 1091/* L1 DCache is used for initial RAM */ 1092 1093 /* Allocate Initial RAM in data cache. 1094 */ 1095 lis r3,CONFIG_SYS_INIT_RAM_ADDR@h 1096 ori r3,r3,CONFIG_SYS_INIT_RAM_ADDR@l 1097 mfspr r2, L1CFG0 1098 andi. r2, r2, 0x1ff 1099 /* cache size * 1024 / (2 * L1 line size) */ 1100 slwi r2, r2, (10 - 1 - L1_CACHE_SHIFT) 1101 mtctr r2 1102 li r0,0 11031: 1104 dcbz r0,r3 1105 dcbtls 0,r0,r3 1106 addi r3,r3,CONFIG_SYS_CACHELINE_SIZE 1107 bdnz 1b 1108 1109 /* Jump out the last 4K page and continue to 'normal' start */ 1110#ifdef CONFIG_SYS_RAMBOOT 1111 b _start_cont 1112#else 1113 /* Calculate absolute address in FLASH and jump there */ 1114 /*--------------------------------------------------------------*/ 1115 lis r3,CONFIG_SYS_MONITOR_BASE@h 1116 ori r3,r3,CONFIG_SYS_MONITOR_BASE@l 1117 addi r3,r3,_start_cont - _start + _START_OFFSET 1118 mtlr r3 1119 blr 1120#endif 1121 1122 .text 1123 .globl _start 1124_start: 1125 .long 0x27051956 /* U-BOOT Magic Number */ 1126 .globl version_string 1127version_string: 1128 .ascii U_BOOT_VERSION_STRING, "\0" 1129 1130 .align 4 1131 .globl _start_cont 1132_start_cont: 1133 /* Setup the stack in initial RAM,could be L2-as-SRAM or L1 dcache*/ 1134 lis r3,(CONFIG_SYS_INIT_RAM_ADDR)@h 1135 ori r3,r3,((CONFIG_SYS_INIT_SP_OFFSET-16)&~0xf)@l /* Align to 16 */ 1136 li r0,0 1137 stw r0,0(r3) /* Terminate Back Chain */ 1138 stw r0,+4(r3) /* NULL return address. */ 1139 mr r1,r3 /* Transfer to SP(r1) */ 1140 1141 GET_GOT 1142 bl cpu_init_early_f 1143 1144 /* switch back to AS = 0 */ 1145 lis r3,(MSR_CE|MSR_ME|MSR_DE)@h 1146 ori r3,r3,(MSR_CE|MSR_ME|MSR_DE)@l 1147 mtmsr r3 1148 isync 1149 1150 bl cpu_init_f 1151 bl board_init_f 1152 isync 1153 1154 /* NOTREACHED - board_init_f() does not return */ 1155 1156#ifndef CONFIG_NAND_SPL 1157 . = EXC_OFF_SYS_RESET 1158 .globl _start_of_vectors 1159_start_of_vectors: 1160 1161/* Critical input. */ 1162 CRIT_EXCEPTION(0x0100, CriticalInput, CritcalInputException) 1163 1164/* Machine check */ 1165 MCK_EXCEPTION(0x200, MachineCheck, MachineCheckException) 1166 1167/* Data Storage exception. */ 1168 STD_EXCEPTION(0x0300, DataStorage, UnknownException) 1169 1170/* Instruction Storage exception. */ 1171 STD_EXCEPTION(0x0400, InstStorage, UnknownException) 1172 1173/* External Interrupt exception. */ 1174 STD_EXCEPTION(0x0500, ExtInterrupt, ExtIntException) 1175 1176/* Alignment exception. */ 1177 . = 0x0600 1178Alignment: 1179 EXCEPTION_PROLOG(SRR0, SRR1) 1180 mfspr r4,DAR 1181 stw r4,_DAR(r21) 1182 mfspr r5,DSISR 1183 stw r5,_DSISR(r21) 1184 addi r3,r1,STACK_FRAME_OVERHEAD 1185 EXC_XFER_TEMPLATE(Alignment, AlignmentException, MSR_KERNEL, COPY_EE) 1186 1187/* Program check exception */ 1188 . = 0x0700 1189ProgramCheck: 1190 EXCEPTION_PROLOG(SRR0, SRR1) 1191 addi r3,r1,STACK_FRAME_OVERHEAD 1192 EXC_XFER_TEMPLATE(ProgramCheck, ProgramCheckException, 1193 MSR_KERNEL, COPY_EE) 1194 1195 /* No FPU on MPC85xx. This exception is not supposed to happen. 1196 */ 1197 STD_EXCEPTION(0x0800, FPUnavailable, UnknownException) 1198 1199 . = 0x0900 1200/* 1201 * r0 - SYSCALL number 1202 * r3-... arguments 1203 */ 1204SystemCall: 1205 addis r11,r0,0 /* get functions table addr */ 1206 ori r11,r11,0 /* Note: this code is patched in trap_init */ 1207 addis r12,r0,0 /* get number of functions */ 1208 ori r12,r12,0 1209 1210 cmplw 0,r0,r12 1211 bge 1f 1212 1213 rlwinm r0,r0,2,0,31 /* fn_addr = fn_tbl[r0] */ 1214 add r11,r11,r0 1215 lwz r11,0(r11) 1216 1217 li r20,0xd00-4 /* Get stack pointer */ 1218 lwz r12,0(r20) 1219 subi r12,r12,12 /* Adjust stack pointer */ 1220 li r0,0xc00+_end_back-SystemCall 1221 cmplw 0,r0,r12 /* Check stack overflow */ 1222 bgt 1f 1223 stw r12,0(r20) 1224 1225 mflr r0 1226 stw r0,0(r12) 1227 mfspr r0,SRR0 1228 stw r0,4(r12) 1229 mfspr r0,SRR1 1230 stw r0,8(r12) 1231 1232 li r12,0xc00+_back-SystemCall 1233 mtlr r12 1234 mtspr SRR0,r11 1235 12361: SYNC 1237 rfi 1238_back: 1239 1240 mfmsr r11 /* Disable interrupts */ 1241 li r12,0 1242 ori r12,r12,MSR_EE 1243 andc r11,r11,r12 1244 SYNC /* Some chip revs need this... */ 1245 mtmsr r11 1246 SYNC 1247 1248 li r12,0xd00-4 /* restore regs */ 1249 lwz r12,0(r12) 1250 1251 lwz r11,0(r12) 1252 mtlr r11 1253 lwz r11,4(r12) 1254 mtspr SRR0,r11 1255 lwz r11,8(r12) 1256 mtspr SRR1,r11 1257 1258 addi r12,r12,12 /* Adjust stack pointer */ 1259 li r20,0xd00-4 1260 stw r12,0(r20) 1261 1262 SYNC 1263 rfi 1264_end_back: 1265 1266 STD_EXCEPTION(0x0a00, Decrementer, timer_interrupt) 1267 STD_EXCEPTION(0x0b00, IntervalTimer, UnknownException) 1268 STD_EXCEPTION(0x0c00, WatchdogTimer, UnknownException) 1269 1270 STD_EXCEPTION(0x0d00, DataTLBError, UnknownException) 1271 STD_EXCEPTION(0x0e00, InstructionTLBError, UnknownException) 1272 1273 CRIT_EXCEPTION(0x0f00, DebugBreakpoint, DebugException ) 1274 1275 .globl _end_of_vectors 1276_end_of_vectors: 1277 1278 1279 . = . + (0x100 - ( . & 0xff )) /* align for debug */ 1280 1281/* 1282 * This code finishes saving the registers to the exception frame 1283 * and jumps to the appropriate handler for the exception. 1284 * Register r21 is pointer into trap frame, r1 has new stack pointer. 1285 */ 1286 .globl transfer_to_handler 1287transfer_to_handler: 1288 stw r22,_NIP(r21) 1289 lis r22,MSR_POW@h 1290 andc r23,r23,r22 1291 stw r23,_MSR(r21) 1292 SAVE_GPR(7, r21) 1293 SAVE_4GPRS(8, r21) 1294 SAVE_8GPRS(12, r21) 1295 SAVE_8GPRS(24, r21) 1296 1297 mflr r23 1298 andi. r24,r23,0x3f00 /* get vector offset */ 1299 stw r24,TRAP(r21) 1300 li r22,0 1301 stw r22,RESULT(r21) 1302 mtspr SPRG2,r22 /* r1 is now kernel sp */ 1303 1304 lwz r24,0(r23) /* virtual address of handler */ 1305 lwz r23,4(r23) /* where to go when done */ 1306 mtspr SRR0,r24 1307 mtspr SRR1,r20 1308 mtlr r23 1309 SYNC 1310 rfi /* jump to handler, enable MMU */ 1311 1312int_return: 1313 mfmsr r28 /* Disable interrupts */ 1314 li r4,0 1315 ori r4,r4,MSR_EE 1316 andc r28,r28,r4 1317 SYNC /* Some chip revs need this... */ 1318 mtmsr r28 1319 SYNC 1320 lwz r2,_CTR(r1) 1321 lwz r0,_LINK(r1) 1322 mtctr r2 1323 mtlr r0 1324 lwz r2,_XER(r1) 1325 lwz r0,_CCR(r1) 1326 mtspr XER,r2 1327 mtcrf 0xFF,r0 1328 REST_10GPRS(3, r1) 1329 REST_10GPRS(13, r1) 1330 REST_8GPRS(23, r1) 1331 REST_GPR(31, r1) 1332 lwz r2,_NIP(r1) /* Restore environment */ 1333 lwz r0,_MSR(r1) 1334 mtspr SRR0,r2 1335 mtspr SRR1,r0 1336 lwz r0,GPR0(r1) 1337 lwz r2,GPR2(r1) 1338 lwz r1,GPR1(r1) 1339 SYNC 1340 rfi 1341 1342crit_return: 1343 mfmsr r28 /* Disable interrupts */ 1344 li r4,0 1345 ori r4,r4,MSR_EE 1346 andc r28,r28,r4 1347 SYNC /* Some chip revs need this... */ 1348 mtmsr r28 1349 SYNC 1350 lwz r2,_CTR(r1) 1351 lwz r0,_LINK(r1) 1352 mtctr r2 1353 mtlr r0 1354 lwz r2,_XER(r1) 1355 lwz r0,_CCR(r1) 1356 mtspr XER,r2 1357 mtcrf 0xFF,r0 1358 REST_10GPRS(3, r1) 1359 REST_10GPRS(13, r1) 1360 REST_8GPRS(23, r1) 1361 REST_GPR(31, r1) 1362 lwz r2,_NIP(r1) /* Restore environment */ 1363 lwz r0,_MSR(r1) 1364 mtspr SPRN_CSRR0,r2 1365 mtspr SPRN_CSRR1,r0 1366 lwz r0,GPR0(r1) 1367 lwz r2,GPR2(r1) 1368 lwz r1,GPR1(r1) 1369 SYNC 1370 rfci 1371 1372mck_return: 1373 mfmsr r28 /* Disable interrupts */ 1374 li r4,0 1375 ori r4,r4,MSR_EE 1376 andc r28,r28,r4 1377 SYNC /* Some chip revs need this... */ 1378 mtmsr r28 1379 SYNC 1380 lwz r2,_CTR(r1) 1381 lwz r0,_LINK(r1) 1382 mtctr r2 1383 mtlr r0 1384 lwz r2,_XER(r1) 1385 lwz r0,_CCR(r1) 1386 mtspr XER,r2 1387 mtcrf 0xFF,r0 1388 REST_10GPRS(3, r1) 1389 REST_10GPRS(13, r1) 1390 REST_8GPRS(23, r1) 1391 REST_GPR(31, r1) 1392 lwz r2,_NIP(r1) /* Restore environment */ 1393 lwz r0,_MSR(r1) 1394 mtspr SPRN_MCSRR0,r2 1395 mtspr SPRN_MCSRR1,r0 1396 lwz r0,GPR0(r1) 1397 lwz r2,GPR2(r1) 1398 lwz r1,GPR1(r1) 1399 SYNC 1400 rfmci 1401 1402/* Cache functions. 1403*/ 1404.globl flush_icache 1405flush_icache: 1406.globl invalidate_icache 1407invalidate_icache: 1408 mfspr r0,L1CSR1 1409 ori r0,r0,L1CSR1_ICFI 1410 msync 1411 isync 1412 mtspr L1CSR1,r0 1413 isync 1414 blr /* entire I cache */ 1415 1416.globl invalidate_dcache 1417invalidate_dcache: 1418 mfspr r0,L1CSR0 1419 ori r0,r0,L1CSR0_DCFI 1420 msync 1421 isync 1422 mtspr L1CSR0,r0 1423 isync 1424 blr 1425 1426 .globl icache_enable 1427icache_enable: 1428 mflr r8 1429 bl invalidate_icache 1430 mtlr r8 1431 isync 1432 mfspr r4,L1CSR1 1433 ori r4,r4,0x0001 1434 oris r4,r4,0x0001 1435 mtspr L1CSR1,r4 1436 isync 1437 blr 1438 1439 .globl icache_disable 1440icache_disable: 1441 mfspr r0,L1CSR1 1442 lis r3,0 1443 ori r3,r3,L1CSR1_ICE 1444 andc r0,r0,r3 1445 mtspr L1CSR1,r0 1446 isync 1447 blr 1448 1449 .globl icache_status 1450icache_status: 1451 mfspr r3,L1CSR1 1452 andi. r3,r3,L1CSR1_ICE 1453 blr 1454 1455 .globl dcache_enable 1456dcache_enable: 1457 mflr r8 1458 bl invalidate_dcache 1459 mtlr r8 1460 isync 1461 mfspr r0,L1CSR0 1462 ori r0,r0,0x0001 1463 oris r0,r0,0x0001 1464 msync 1465 isync 1466 mtspr L1CSR0,r0 1467 isync 1468 blr 1469 1470 .globl dcache_disable 1471dcache_disable: 1472 mfspr r3,L1CSR0 1473 lis r4,0 1474 ori r4,r4,L1CSR0_DCE 1475 andc r3,r3,r4 1476 mtspr L1CSR0,r3 1477 isync 1478 blr 1479 1480 .globl dcache_status 1481dcache_status: 1482 mfspr r3,L1CSR0 1483 andi. r3,r3,L1CSR0_DCE 1484 blr 1485 1486 .globl get_pir 1487get_pir: 1488 mfspr r3,PIR 1489 blr 1490 1491 .globl get_pvr 1492get_pvr: 1493 mfspr r3,PVR 1494 blr 1495 1496 .globl get_svr 1497get_svr: 1498 mfspr r3,SVR 1499 blr 1500 1501 .globl wr_tcr 1502wr_tcr: 1503 mtspr TCR,r3 1504 blr 1505 1506/*------------------------------------------------------------------------------- */ 1507/* Function: in8 */ 1508/* Description: Input 8 bits */ 1509/*------------------------------------------------------------------------------- */ 1510 .globl in8 1511in8: 1512 lbz r3,0x0000(r3) 1513 blr 1514 1515/*------------------------------------------------------------------------------- */ 1516/* Function: out8 */ 1517/* Description: Output 8 bits */ 1518/*------------------------------------------------------------------------------- */ 1519 .globl out8 1520out8: 1521 stb r4,0x0000(r3) 1522 sync 1523 blr 1524 1525/*------------------------------------------------------------------------------- */ 1526/* Function: out16 */ 1527/* Description: Output 16 bits */ 1528/*------------------------------------------------------------------------------- */ 1529 .globl out16 1530out16: 1531 sth r4,0x0000(r3) 1532 sync 1533 blr 1534 1535/*------------------------------------------------------------------------------- */ 1536/* Function: out16r */ 1537/* Description: Byte reverse and output 16 bits */ 1538/*------------------------------------------------------------------------------- */ 1539 .globl out16r 1540out16r: 1541 sthbrx r4,r0,r3 1542 sync 1543 blr 1544 1545/*------------------------------------------------------------------------------- */ 1546/* Function: out32 */ 1547/* Description: Output 32 bits */ 1548/*------------------------------------------------------------------------------- */ 1549 .globl out32 1550out32: 1551 stw r4,0x0000(r3) 1552 sync 1553 blr 1554 1555/*------------------------------------------------------------------------------- */ 1556/* Function: out32r */ 1557/* Description: Byte reverse and output 32 bits */ 1558/*------------------------------------------------------------------------------- */ 1559 .globl out32r 1560out32r: 1561 stwbrx r4,r0,r3 1562 sync 1563 blr 1564 1565/*------------------------------------------------------------------------------- */ 1566/* Function: in16 */ 1567/* Description: Input 16 bits */ 1568/*------------------------------------------------------------------------------- */ 1569 .globl in16 1570in16: 1571 lhz r3,0x0000(r3) 1572 blr 1573 1574/*------------------------------------------------------------------------------- */ 1575/* Function: in16r */ 1576/* Description: Input 16 bits and byte reverse */ 1577/*------------------------------------------------------------------------------- */ 1578 .globl in16r 1579in16r: 1580 lhbrx r3,r0,r3 1581 blr 1582 1583/*------------------------------------------------------------------------------- */ 1584/* Function: in32 */ 1585/* Description: Input 32 bits */ 1586/*------------------------------------------------------------------------------- */ 1587 .globl in32 1588in32: 1589 lwz 3,0x0000(3) 1590 blr 1591 1592/*------------------------------------------------------------------------------- */ 1593/* Function: in32r */ 1594/* Description: Input 32 bits and byte reverse */ 1595/*------------------------------------------------------------------------------- */ 1596 .globl in32r 1597in32r: 1598 lwbrx r3,r0,r3 1599 blr 1600#endif /* !CONFIG_NAND_SPL */ 1601 1602/*------------------------------------------------------------------------------*/ 1603 1604/* 1605 * void write_tlb(mas0, mas1, mas2, mas3, mas7) 1606 */ 1607 .globl write_tlb 1608write_tlb: 1609 mtspr MAS0,r3 1610 mtspr MAS1,r4 1611 mtspr MAS2,r5 1612 mtspr MAS3,r6 1613#ifdef CONFIG_ENABLE_36BIT_PHYS 1614 mtspr MAS7,r7 1615#endif 1616 li r3,0 1617#ifdef CONFIG_SYS_BOOK3E_HV 1618 mtspr MAS8,r3 1619#endif 1620 isync 1621 tlbwe 1622 msync 1623 isync 1624 blr 1625 1626/* 1627 * void relocate_code (addr_sp, gd, addr_moni) 1628 * 1629 * This "function" does not return, instead it continues in RAM 1630 * after relocating the monitor code. 1631 * 1632 * r3 = dest 1633 * r4 = src 1634 * r5 = length in bytes 1635 * r6 = cachelinesize 1636 */ 1637 .globl relocate_code 1638relocate_code: 1639 mr r1,r3 /* Set new stack pointer */ 1640 mr r9,r4 /* Save copy of Init Data pointer */ 1641 mr r10,r5 /* Save copy of Destination Address */ 1642 1643 GET_GOT 1644 mr r3,r5 /* Destination Address */ 1645 lis r4,CONFIG_SYS_MONITOR_BASE@h /* Source Address */ 1646 ori r4,r4,CONFIG_SYS_MONITOR_BASE@l 1647 lwz r5,GOT(__init_end) 1648 sub r5,r5,r4 1649 li r6,CONFIG_SYS_CACHELINE_SIZE /* Cache Line Size */ 1650 1651 /* 1652 * Fix GOT pointer: 1653 * 1654 * New GOT-PTR = (old GOT-PTR - CONFIG_SYS_MONITOR_BASE) + Destination Address 1655 * 1656 * Offset: 1657 */ 1658 sub r15,r10,r4 1659 1660 /* First our own GOT */ 1661 add r12,r12,r15 1662 /* the the one used by the C code */ 1663 add r30,r30,r15 1664 1665 /* 1666 * Now relocate code 1667 */ 1668 1669 cmplw cr1,r3,r4 1670 addi r0,r5,3 1671 srwi. r0,r0,2 1672 beq cr1,4f /* In place copy is not necessary */ 1673 beq 7f /* Protect against 0 count */ 1674 mtctr r0 1675 bge cr1,2f 1676 1677 la r8,-4(r4) 1678 la r7,-4(r3) 16791: lwzu r0,4(r8) 1680 stwu r0,4(r7) 1681 bdnz 1b 1682 b 4f 1683 16842: slwi r0,r0,2 1685 add r8,r4,r0 1686 add r7,r3,r0 16873: lwzu r0,-4(r8) 1688 stwu r0,-4(r7) 1689 bdnz 3b 1690 1691/* 1692 * Now flush the cache: note that we must start from a cache aligned 1693 * address. Otherwise we might miss one cache line. 1694 */ 16954: cmpwi r6,0 1696 add r5,r3,r5 1697 beq 7f /* Always flush prefetch queue in any case */ 1698 subi r0,r6,1 1699 andc r3,r3,r0 1700 mr r4,r3 17015: dcbst 0,r4 1702 add r4,r4,r6 1703 cmplw r4,r5 1704 blt 5b 1705 sync /* Wait for all dcbst to complete on bus */ 1706 mr r4,r3 17076: icbi 0,r4 1708 add r4,r4,r6 1709 cmplw r4,r5 1710 blt 6b 17117: sync /* Wait for all icbi to complete on bus */ 1712 isync 1713 1714/* 1715 * We are done. Do not return, instead branch to second part of board 1716 * initialization, now running from RAM. 1717 */ 1718 1719 addi r0,r10,in_ram - _start + _START_OFFSET 1720 1721 /* 1722 * As IVPR is going to point RAM address, 1723 * Make sure IVOR15 has valid opcode to support debugger 1724 */ 1725 mtspr IVOR15,r0 1726 1727 /* 1728 * Re-point the IVPR at RAM 1729 */ 1730 mtspr IVPR,r10 1731 1732 mtlr r0 1733 blr /* NEVER RETURNS! */ 1734 .globl in_ram 1735in_ram: 1736 1737 /* 1738 * Relocation Function, r12 point to got2+0x8000 1739 * 1740 * Adjust got2 pointers, no need to check for 0, this code 1741 * already puts a few entries in the table. 1742 */ 1743 li r0,__got2_entries@sectoff@l 1744 la r3,GOT(_GOT2_TABLE_) 1745 lwz r11,GOT(_GOT2_TABLE_) 1746 mtctr r0 1747 sub r11,r3,r11 1748 addi r3,r3,-4 17491: lwzu r0,4(r3) 1750 cmpwi r0,0 1751 beq- 2f 1752 add r0,r0,r11 1753 stw r0,0(r3) 17542: bdnz 1b 1755 1756 /* 1757 * Now adjust the fixups and the pointers to the fixups 1758 * in case we need to move ourselves again. 1759 */ 1760 li r0,__fixup_entries@sectoff@l 1761 lwz r3,GOT(_FIXUP_TABLE_) 1762 cmpwi r0,0 1763 mtctr r0 1764 addi r3,r3,-4 1765 beq 4f 17663: lwzu r4,4(r3) 1767 lwzux r0,r4,r11 1768 cmpwi r0,0 1769 add r0,r0,r11 1770 stw r4,0(r3) 1771 beq- 5f 1772 stw r0,0(r4) 17735: bdnz 3b 17744: 1775clear_bss: 1776 /* 1777 * Now clear BSS segment 1778 */ 1779 lwz r3,GOT(__bss_start) 1780 lwz r4,GOT(__bss_end__) 1781 1782 cmplw 0,r3,r4 1783 beq 6f 1784 1785 li r0,0 17865: 1787 stw r0,0(r3) 1788 addi r3,r3,4 1789 cmplw 0,r3,r4 1790 bne 5b 17916: 1792 1793 mr r3,r9 /* Init Data pointer */ 1794 mr r4,r10 /* Destination Address */ 1795 bl board_init_r 1796 1797#ifndef CONFIG_NAND_SPL 1798 /* 1799 * Copy exception vector code to low memory 1800 * 1801 * r3: dest_addr 1802 * r7: source address, r8: end address, r9: target address 1803 */ 1804 .globl trap_init 1805trap_init: 1806 mflr r4 /* save link register */ 1807 GET_GOT 1808 lwz r7,GOT(_start_of_vectors) 1809 lwz r8,GOT(_end_of_vectors) 1810 1811 li r9,0x100 /* reset vector always at 0x100 */ 1812 1813 cmplw 0,r7,r8 1814 bgelr /* return if r7>=r8 - just in case */ 18151: 1816 lwz r0,0(r7) 1817 stw r0,0(r9) 1818 addi r7,r7,4 1819 addi r9,r9,4 1820 cmplw 0,r7,r8 1821 bne 1b 1822 1823 /* 1824 * relocate `hdlr' and `int_return' entries 1825 */ 1826 li r7,.L_CriticalInput - _start + _START_OFFSET 1827 bl trap_reloc 1828 li r7,.L_MachineCheck - _start + _START_OFFSET 1829 bl trap_reloc 1830 li r7,.L_DataStorage - _start + _START_OFFSET 1831 bl trap_reloc 1832 li r7,.L_InstStorage - _start + _START_OFFSET 1833 bl trap_reloc 1834 li r7,.L_ExtInterrupt - _start + _START_OFFSET 1835 bl trap_reloc 1836 li r7,.L_Alignment - _start + _START_OFFSET 1837 bl trap_reloc 1838 li r7,.L_ProgramCheck - _start + _START_OFFSET 1839 bl trap_reloc 1840 li r7,.L_FPUnavailable - _start + _START_OFFSET 1841 bl trap_reloc 1842 li r7,.L_Decrementer - _start + _START_OFFSET 1843 bl trap_reloc 1844 li r7,.L_IntervalTimer - _start + _START_OFFSET 1845 li r8,_end_of_vectors - _start + _START_OFFSET 18462: 1847 bl trap_reloc 1848 addi r7,r7,0x100 /* next exception vector */ 1849 cmplw 0,r7,r8 1850 blt 2b 1851 1852 /* Update IVORs as per relocated vector table address */ 1853 li r7,0x0100 1854 mtspr IVOR0,r7 /* 0: Critical input */ 1855 li r7,0x0200 1856 mtspr IVOR1,r7 /* 1: Machine check */ 1857 li r7,0x0300 1858 mtspr IVOR2,r7 /* 2: Data storage */ 1859 li r7,0x0400 1860 mtspr IVOR3,r7 /* 3: Instruction storage */ 1861 li r7,0x0500 1862 mtspr IVOR4,r7 /* 4: External interrupt */ 1863 li r7,0x0600 1864 mtspr IVOR5,r7 /* 5: Alignment */ 1865 li r7,0x0700 1866 mtspr IVOR6,r7 /* 6: Program check */ 1867 li r7,0x0800 1868 mtspr IVOR7,r7 /* 7: floating point unavailable */ 1869 li r7,0x0900 1870 mtspr IVOR8,r7 /* 8: System call */ 1871 /* 9: Auxiliary processor unavailable(unsupported) */ 1872 li r7,0x0a00 1873 mtspr IVOR10,r7 /* 10: Decrementer */ 1874 li r7,0x0b00 1875 mtspr IVOR11,r7 /* 11: Interval timer */ 1876 li r7,0x0c00 1877 mtspr IVOR12,r7 /* 12: Watchdog timer */ 1878 li r7,0x0d00 1879 mtspr IVOR13,r7 /* 13: Data TLB error */ 1880 li r7,0x0e00 1881 mtspr IVOR14,r7 /* 14: Instruction TLB error */ 1882 li r7,0x0f00 1883 mtspr IVOR15,r7 /* 15: Debug */ 1884 1885 lis r7,0x0 1886 mtspr IVPR,r7 1887 1888 mtlr r4 /* restore link register */ 1889 blr 1890 1891.globl unlock_ram_in_cache 1892unlock_ram_in_cache: 1893 /* invalidate the INIT_RAM section */ 1894 lis r3,(CONFIG_SYS_INIT_RAM_ADDR & ~(CONFIG_SYS_CACHELINE_SIZE-1))@h 1895 ori r3,r3,(CONFIG_SYS_INIT_RAM_ADDR & ~(CONFIG_SYS_CACHELINE_SIZE-1))@l 1896 mfspr r4,L1CFG0 1897 andi. r4,r4,0x1ff 1898 slwi r4,r4,(10 - 1 - L1_CACHE_SHIFT) 1899 mtctr r4 19001: dcbi r0,r3 1901 addi r3,r3,CONFIG_SYS_CACHELINE_SIZE 1902 bdnz 1b 1903 sync 1904 1905 /* Invalidate the TLB entries for the cache */ 1906 lis r3,CONFIG_SYS_INIT_RAM_ADDR@h 1907 ori r3,r3,CONFIG_SYS_INIT_RAM_ADDR@l 1908 tlbivax 0,r3 1909 addi r3,r3,0x1000 1910 tlbivax 0,r3 1911 addi r3,r3,0x1000 1912 tlbivax 0,r3 1913 addi r3,r3,0x1000 1914 tlbivax 0,r3 1915 isync 1916 blr 1917 1918.globl flush_dcache 1919flush_dcache: 1920 mfspr r3,SPRN_L1CFG0 1921 1922 rlwinm r5,r3,9,3 /* Extract cache block size */ 1923 twlgti r5,1 /* Only 32 and 64 byte cache blocks 1924 * are currently defined. 1925 */ 1926 li r4,32 1927 subfic r6,r5,2 /* r6 = log2(1KiB / cache block size) - 1928 * log2(number of ways) 1929 */ 1930 slw r5,r4,r5 /* r5 = cache block size */ 1931 1932 rlwinm r7,r3,0,0xff /* Extract number of KiB in the cache */ 1933 mulli r7,r7,13 /* An 8-way cache will require 13 1934 * loads per set. 1935 */ 1936 slw r7,r7,r6 1937 1938 /* save off HID0 and set DCFA */ 1939 mfspr r8,SPRN_HID0 1940 ori r9,r8,HID0_DCFA@l 1941 mtspr SPRN_HID0,r9 1942 isync 1943 1944 lis r4,0 1945 mtctr r7 1946 19471: lwz r3,0(r4) /* Load... */ 1948 add r4,r4,r5 1949 bdnz 1b 1950 1951 msync 1952 lis r4,0 1953 mtctr r7 1954 19551: dcbf 0,r4 /* ...and flush. */ 1956 add r4,r4,r5 1957 bdnz 1b 1958 1959 /* restore HID0 */ 1960 mtspr SPRN_HID0,r8 1961 isync 1962 1963 blr 1964 1965.globl setup_ivors 1966setup_ivors: 1967 1968#include "fixed_ivor.S" 1969 blr 1970#endif /* !CONFIG_NAND_SPL */ 1971