1*4882a593Smuzhiyun/* SPDX-License-Identifier: GPL-2.0-only */ 2*4882a593Smuzhiyun/* 3*4882a593Smuzhiyun * linux/arch/arm/mm/proc-xsc3.S 4*4882a593Smuzhiyun * 5*4882a593Smuzhiyun * Original Author: Matthew Gilbert 6*4882a593Smuzhiyun * Current Maintainer: Lennert Buytenhek <buytenh@wantstofly.org> 7*4882a593Smuzhiyun * 8*4882a593Smuzhiyun * Copyright 2004 (C) Intel Corp. 9*4882a593Smuzhiyun * Copyright 2005 (C) MontaVista Software, Inc. 10*4882a593Smuzhiyun * 11*4882a593Smuzhiyun * MMU functions for the Intel XScale3 Core (XSC3). The XSC3 core is 12*4882a593Smuzhiyun * an extension to Intel's original XScale core that adds the following 13*4882a593Smuzhiyun * features: 14*4882a593Smuzhiyun * 15*4882a593Smuzhiyun * - ARMv6 Supersections 16*4882a593Smuzhiyun * - Low Locality Reference pages (replaces mini-cache) 17*4882a593Smuzhiyun * - 36-bit addressing 18*4882a593Smuzhiyun * - L2 cache 19*4882a593Smuzhiyun * - Cache coherency if chipset supports it 20*4882a593Smuzhiyun * 21*4882a593Smuzhiyun * Based on original XScale code by Nicolas Pitre. 22*4882a593Smuzhiyun */ 23*4882a593Smuzhiyun 24*4882a593Smuzhiyun#include <linux/linkage.h> 25*4882a593Smuzhiyun#include <linux/init.h> 26*4882a593Smuzhiyun#include <linux/pgtable.h> 27*4882a593Smuzhiyun#include <asm/assembler.h> 28*4882a593Smuzhiyun#include <asm/hwcap.h> 29*4882a593Smuzhiyun#include <asm/pgtable-hwdef.h> 30*4882a593Smuzhiyun#include <asm/page.h> 31*4882a593Smuzhiyun#include <asm/ptrace.h> 32*4882a593Smuzhiyun#include "proc-macros.S" 33*4882a593Smuzhiyun 34*4882a593Smuzhiyun/* 35*4882a593Smuzhiyun * This is the maximum size of an area which will be flushed. If the 36*4882a593Smuzhiyun * area is larger than this, then we flush the whole cache. 37*4882a593Smuzhiyun */ 38*4882a593Smuzhiyun#define MAX_AREA_SIZE 32768 39*4882a593Smuzhiyun 40*4882a593Smuzhiyun/* 41*4882a593Smuzhiyun * The cache line size of the L1 I, L1 D and unified L2 cache. 42*4882a593Smuzhiyun */ 43*4882a593Smuzhiyun#define CACHELINESIZE 32 44*4882a593Smuzhiyun 45*4882a593Smuzhiyun/* 46*4882a593Smuzhiyun * The size of the L1 D cache. 47*4882a593Smuzhiyun */ 48*4882a593Smuzhiyun#define CACHESIZE 32768 49*4882a593Smuzhiyun 50*4882a593Smuzhiyun/* 51*4882a593Smuzhiyun * This macro is used to wait for a CP15 write and is needed when we 52*4882a593Smuzhiyun * have to ensure that the last operation to the coprocessor was 53*4882a593Smuzhiyun * completed before continuing with operation. 54*4882a593Smuzhiyun */ 55*4882a593Smuzhiyun .macro cpwait_ret, lr, rd 56*4882a593Smuzhiyun mrc p15, 0, \rd, c2, c0, 0 @ arbitrary read of cp15 57*4882a593Smuzhiyun sub pc, \lr, \rd, LSR #32 @ wait for completion and 58*4882a593Smuzhiyun @ flush instruction pipeline 59*4882a593Smuzhiyun .endm 60*4882a593Smuzhiyun 61*4882a593Smuzhiyun/* 62*4882a593Smuzhiyun * This macro cleans and invalidates the entire L1 D cache. 63*4882a593Smuzhiyun */ 64*4882a593Smuzhiyun 65*4882a593Smuzhiyun .macro clean_d_cache rd, rs 66*4882a593Smuzhiyun mov \rd, #0x1f00 67*4882a593Smuzhiyun orr \rd, \rd, #0x00e0 68*4882a593Smuzhiyun1: mcr p15, 0, \rd, c7, c14, 2 @ clean/invalidate L1 D line 69*4882a593Smuzhiyun adds \rd, \rd, #0x40000000 70*4882a593Smuzhiyun bcc 1b 71*4882a593Smuzhiyun subs \rd, \rd, #0x20 72*4882a593Smuzhiyun bpl 1b 73*4882a593Smuzhiyun .endm 74*4882a593Smuzhiyun 75*4882a593Smuzhiyun .text 76*4882a593Smuzhiyun 77*4882a593Smuzhiyun/* 78*4882a593Smuzhiyun * cpu_xsc3_proc_init() 79*4882a593Smuzhiyun * 80*4882a593Smuzhiyun * Nothing too exciting at the moment 81*4882a593Smuzhiyun */ 82*4882a593SmuzhiyunENTRY(cpu_xsc3_proc_init) 83*4882a593Smuzhiyun ret lr 84*4882a593Smuzhiyun 85*4882a593Smuzhiyun/* 86*4882a593Smuzhiyun * cpu_xsc3_proc_fin() 87*4882a593Smuzhiyun */ 88*4882a593SmuzhiyunENTRY(cpu_xsc3_proc_fin) 89*4882a593Smuzhiyun mrc p15, 0, r0, c1, c0, 0 @ ctrl register 90*4882a593Smuzhiyun bic r0, r0, #0x1800 @ ...IZ........... 91*4882a593Smuzhiyun bic r0, r0, #0x0006 @ .............CA. 92*4882a593Smuzhiyun mcr p15, 0, r0, c1, c0, 0 @ disable caches 93*4882a593Smuzhiyun ret lr 94*4882a593Smuzhiyun 95*4882a593Smuzhiyun/* 96*4882a593Smuzhiyun * cpu_xsc3_reset(loc) 97*4882a593Smuzhiyun * 98*4882a593Smuzhiyun * Perform a soft reset of the system. Put the CPU into the 99*4882a593Smuzhiyun * same state as it would be if it had been reset, and branch 100*4882a593Smuzhiyun * to what would be the reset vector. 101*4882a593Smuzhiyun * 102*4882a593Smuzhiyun * loc: location to jump to for soft reset 103*4882a593Smuzhiyun */ 104*4882a593Smuzhiyun .align 5 105*4882a593Smuzhiyun .pushsection .idmap.text, "ax" 106*4882a593SmuzhiyunENTRY(cpu_xsc3_reset) 107*4882a593Smuzhiyun mov r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE 108*4882a593Smuzhiyun msr cpsr_c, r1 @ reset CPSR 109*4882a593Smuzhiyun mrc p15, 0, r1, c1, c0, 0 @ ctrl register 110*4882a593Smuzhiyun bic r1, r1, #0x3900 @ ..VIZ..S........ 111*4882a593Smuzhiyun bic r1, r1, #0x0086 @ ........B....CA. 112*4882a593Smuzhiyun mcr p15, 0, r1, c1, c0, 0 @ ctrl register 113*4882a593Smuzhiyun mcr p15, 0, ip, c7, c7, 0 @ invalidate L1 caches and BTB 114*4882a593Smuzhiyun bic r1, r1, #0x0001 @ ...............M 115*4882a593Smuzhiyun mcr p15, 0, r1, c1, c0, 0 @ ctrl register 116*4882a593Smuzhiyun @ CAUTION: MMU turned off from this point. We count on the pipeline 117*4882a593Smuzhiyun @ already containing those two last instructions to survive. 118*4882a593Smuzhiyun mcr p15, 0, ip, c8, c7, 0 @ invalidate I and D TLBs 119*4882a593Smuzhiyun ret r0 120*4882a593SmuzhiyunENDPROC(cpu_xsc3_reset) 121*4882a593Smuzhiyun .popsection 122*4882a593Smuzhiyun 123*4882a593Smuzhiyun/* 124*4882a593Smuzhiyun * cpu_xsc3_do_idle() 125*4882a593Smuzhiyun * 126*4882a593Smuzhiyun * Cause the processor to idle 127*4882a593Smuzhiyun * 128*4882a593Smuzhiyun * For now we do nothing but go to idle mode for every case 129*4882a593Smuzhiyun * 130*4882a593Smuzhiyun * XScale supports clock switching, but using idle mode support 131*4882a593Smuzhiyun * allows external hardware to react to system state changes. 132*4882a593Smuzhiyun */ 133*4882a593Smuzhiyun .align 5 134*4882a593Smuzhiyun 135*4882a593SmuzhiyunENTRY(cpu_xsc3_do_idle) 136*4882a593Smuzhiyun mov r0, #1 137*4882a593Smuzhiyun mcr p14, 0, r0, c7, c0, 0 @ go to idle 138*4882a593Smuzhiyun ret lr 139*4882a593Smuzhiyun 140*4882a593Smuzhiyun/* ================================= CACHE ================================ */ 141*4882a593Smuzhiyun 142*4882a593Smuzhiyun/* 143*4882a593Smuzhiyun * flush_icache_all() 144*4882a593Smuzhiyun * 145*4882a593Smuzhiyun * Unconditionally clean and invalidate the entire icache. 146*4882a593Smuzhiyun */ 147*4882a593SmuzhiyunENTRY(xsc3_flush_icache_all) 148*4882a593Smuzhiyun mov r0, #0 149*4882a593Smuzhiyun mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 150*4882a593Smuzhiyun ret lr 151*4882a593SmuzhiyunENDPROC(xsc3_flush_icache_all) 152*4882a593Smuzhiyun 153*4882a593Smuzhiyun/* 154*4882a593Smuzhiyun * flush_user_cache_all() 155*4882a593Smuzhiyun * 156*4882a593Smuzhiyun * Invalidate all cache entries in a particular address 157*4882a593Smuzhiyun * space. 158*4882a593Smuzhiyun */ 159*4882a593SmuzhiyunENTRY(xsc3_flush_user_cache_all) 160*4882a593Smuzhiyun /* FALLTHROUGH */ 161*4882a593Smuzhiyun 162*4882a593Smuzhiyun/* 163*4882a593Smuzhiyun * flush_kern_cache_all() 164*4882a593Smuzhiyun * 165*4882a593Smuzhiyun * Clean and invalidate the entire cache. 166*4882a593Smuzhiyun */ 167*4882a593SmuzhiyunENTRY(xsc3_flush_kern_cache_all) 168*4882a593Smuzhiyun mov r2, #VM_EXEC 169*4882a593Smuzhiyun mov ip, #0 170*4882a593Smuzhiyun__flush_whole_cache: 171*4882a593Smuzhiyun clean_d_cache r0, r1 172*4882a593Smuzhiyun tst r2, #VM_EXEC 173*4882a593Smuzhiyun mcrne p15, 0, ip, c7, c5, 0 @ invalidate L1 I cache and BTB 174*4882a593Smuzhiyun mcrne p15, 0, ip, c7, c10, 4 @ data write barrier 175*4882a593Smuzhiyun mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush 176*4882a593Smuzhiyun ret lr 177*4882a593Smuzhiyun 178*4882a593Smuzhiyun/* 179*4882a593Smuzhiyun * flush_user_cache_range(start, end, vm_flags) 180*4882a593Smuzhiyun * 181*4882a593Smuzhiyun * Invalidate a range of cache entries in the specified 182*4882a593Smuzhiyun * address space. 183*4882a593Smuzhiyun * 184*4882a593Smuzhiyun * - start - start address (may not be aligned) 185*4882a593Smuzhiyun * - end - end address (exclusive, may not be aligned) 186*4882a593Smuzhiyun * - vma - vma_area_struct describing address space 187*4882a593Smuzhiyun */ 188*4882a593Smuzhiyun .align 5 189*4882a593SmuzhiyunENTRY(xsc3_flush_user_cache_range) 190*4882a593Smuzhiyun mov ip, #0 191*4882a593Smuzhiyun sub r3, r1, r0 @ calculate total size 192*4882a593Smuzhiyun cmp r3, #MAX_AREA_SIZE 193*4882a593Smuzhiyun bhs __flush_whole_cache 194*4882a593Smuzhiyun 195*4882a593Smuzhiyun1: tst r2, #VM_EXEC 196*4882a593Smuzhiyun mcrne p15, 0, r0, c7, c5, 1 @ invalidate L1 I line 197*4882a593Smuzhiyun mcr p15, 0, r0, c7, c14, 1 @ clean/invalidate L1 D line 198*4882a593Smuzhiyun add r0, r0, #CACHELINESIZE 199*4882a593Smuzhiyun cmp r0, r1 200*4882a593Smuzhiyun blo 1b 201*4882a593Smuzhiyun tst r2, #VM_EXEC 202*4882a593Smuzhiyun mcrne p15, 0, ip, c7, c5, 6 @ invalidate BTB 203*4882a593Smuzhiyun mcrne p15, 0, ip, c7, c10, 4 @ data write barrier 204*4882a593Smuzhiyun mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush 205*4882a593Smuzhiyun ret lr 206*4882a593Smuzhiyun 207*4882a593Smuzhiyun/* 208*4882a593Smuzhiyun * coherent_kern_range(start, end) 209*4882a593Smuzhiyun * 210*4882a593Smuzhiyun * Ensure coherency between the I cache and the D cache in the 211*4882a593Smuzhiyun * region described by start. If you have non-snooping 212*4882a593Smuzhiyun * Harvard caches, you need to implement this function. 213*4882a593Smuzhiyun * 214*4882a593Smuzhiyun * - start - virtual start address 215*4882a593Smuzhiyun * - end - virtual end address 216*4882a593Smuzhiyun * 217*4882a593Smuzhiyun * Note: single I-cache line invalidation isn't used here since 218*4882a593Smuzhiyun * it also trashes the mini I-cache used by JTAG debuggers. 219*4882a593Smuzhiyun */ 220*4882a593SmuzhiyunENTRY(xsc3_coherent_kern_range) 221*4882a593Smuzhiyun/* FALLTHROUGH */ 222*4882a593SmuzhiyunENTRY(xsc3_coherent_user_range) 223*4882a593Smuzhiyun bic r0, r0, #CACHELINESIZE - 1 224*4882a593Smuzhiyun1: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line 225*4882a593Smuzhiyun add r0, r0, #CACHELINESIZE 226*4882a593Smuzhiyun cmp r0, r1 227*4882a593Smuzhiyun blo 1b 228*4882a593Smuzhiyun mov r0, #0 229*4882a593Smuzhiyun mcr p15, 0, r0, c7, c5, 0 @ invalidate L1 I cache and BTB 230*4882a593Smuzhiyun mcr p15, 0, r0, c7, c10, 4 @ data write barrier 231*4882a593Smuzhiyun mcr p15, 0, r0, c7, c5, 4 @ prefetch flush 232*4882a593Smuzhiyun ret lr 233*4882a593Smuzhiyun 234*4882a593Smuzhiyun/* 235*4882a593Smuzhiyun * flush_kern_dcache_area(void *addr, size_t size) 236*4882a593Smuzhiyun * 237*4882a593Smuzhiyun * Ensure no D cache aliasing occurs, either with itself or 238*4882a593Smuzhiyun * the I cache. 239*4882a593Smuzhiyun * 240*4882a593Smuzhiyun * - addr - kernel address 241*4882a593Smuzhiyun * - size - region size 242*4882a593Smuzhiyun */ 243*4882a593SmuzhiyunENTRY(xsc3_flush_kern_dcache_area) 244*4882a593Smuzhiyun add r1, r0, r1 245*4882a593Smuzhiyun1: mcr p15, 0, r0, c7, c14, 1 @ clean/invalidate L1 D line 246*4882a593Smuzhiyun add r0, r0, #CACHELINESIZE 247*4882a593Smuzhiyun cmp r0, r1 248*4882a593Smuzhiyun blo 1b 249*4882a593Smuzhiyun mov r0, #0 250*4882a593Smuzhiyun mcr p15, 0, r0, c7, c5, 0 @ invalidate L1 I cache and BTB 251*4882a593Smuzhiyun mcr p15, 0, r0, c7, c10, 4 @ data write barrier 252*4882a593Smuzhiyun mcr p15, 0, r0, c7, c5, 4 @ prefetch flush 253*4882a593Smuzhiyun ret lr 254*4882a593Smuzhiyun 255*4882a593Smuzhiyun/* 256*4882a593Smuzhiyun * dma_inv_range(start, end) 257*4882a593Smuzhiyun * 258*4882a593Smuzhiyun * Invalidate (discard) the specified virtual address range. 259*4882a593Smuzhiyun * May not write back any entries. If 'start' or 'end' 260*4882a593Smuzhiyun * are not cache line aligned, those lines must be written 261*4882a593Smuzhiyun * back. 262*4882a593Smuzhiyun * 263*4882a593Smuzhiyun * - start - virtual start address 264*4882a593Smuzhiyun * - end - virtual end address 265*4882a593Smuzhiyun */ 266*4882a593Smuzhiyunxsc3_dma_inv_range: 267*4882a593Smuzhiyun tst r0, #CACHELINESIZE - 1 268*4882a593Smuzhiyun bic r0, r0, #CACHELINESIZE - 1 269*4882a593Smuzhiyun mcrne p15, 0, r0, c7, c10, 1 @ clean L1 D line 270*4882a593Smuzhiyun tst r1, #CACHELINESIZE - 1 271*4882a593Smuzhiyun mcrne p15, 0, r1, c7, c10, 1 @ clean L1 D line 272*4882a593Smuzhiyun1: mcr p15, 0, r0, c7, c6, 1 @ invalidate L1 D line 273*4882a593Smuzhiyun add r0, r0, #CACHELINESIZE 274*4882a593Smuzhiyun cmp r0, r1 275*4882a593Smuzhiyun blo 1b 276*4882a593Smuzhiyun mcr p15, 0, r0, c7, c10, 4 @ data write barrier 277*4882a593Smuzhiyun ret lr 278*4882a593Smuzhiyun 279*4882a593Smuzhiyun/* 280*4882a593Smuzhiyun * dma_clean_range(start, end) 281*4882a593Smuzhiyun * 282*4882a593Smuzhiyun * Clean the specified virtual address range. 283*4882a593Smuzhiyun * 284*4882a593Smuzhiyun * - start - virtual start address 285*4882a593Smuzhiyun * - end - virtual end address 286*4882a593Smuzhiyun */ 287*4882a593Smuzhiyunxsc3_dma_clean_range: 288*4882a593Smuzhiyun bic r0, r0, #CACHELINESIZE - 1 289*4882a593Smuzhiyun1: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line 290*4882a593Smuzhiyun add r0, r0, #CACHELINESIZE 291*4882a593Smuzhiyun cmp r0, r1 292*4882a593Smuzhiyun blo 1b 293*4882a593Smuzhiyun mcr p15, 0, r0, c7, c10, 4 @ data write barrier 294*4882a593Smuzhiyun ret lr 295*4882a593Smuzhiyun 296*4882a593Smuzhiyun/* 297*4882a593Smuzhiyun * dma_flush_range(start, end) 298*4882a593Smuzhiyun * 299*4882a593Smuzhiyun * Clean and invalidate the specified virtual address range. 300*4882a593Smuzhiyun * 301*4882a593Smuzhiyun * - start - virtual start address 302*4882a593Smuzhiyun * - end - virtual end address 303*4882a593Smuzhiyun */ 304*4882a593SmuzhiyunENTRY(xsc3_dma_flush_range) 305*4882a593Smuzhiyun bic r0, r0, #CACHELINESIZE - 1 306*4882a593Smuzhiyun1: mcr p15, 0, r0, c7, c14, 1 @ clean/invalidate L1 D line 307*4882a593Smuzhiyun add r0, r0, #CACHELINESIZE 308*4882a593Smuzhiyun cmp r0, r1 309*4882a593Smuzhiyun blo 1b 310*4882a593Smuzhiyun mcr p15, 0, r0, c7, c10, 4 @ data write barrier 311*4882a593Smuzhiyun ret lr 312*4882a593Smuzhiyun 313*4882a593Smuzhiyun/* 314*4882a593Smuzhiyun * dma_map_area(start, size, dir) 315*4882a593Smuzhiyun * - start - kernel virtual start address 316*4882a593Smuzhiyun * - size - size of region 317*4882a593Smuzhiyun * - dir - DMA direction 318*4882a593Smuzhiyun */ 319*4882a593SmuzhiyunENTRY(xsc3_dma_map_area) 320*4882a593Smuzhiyun add r1, r1, r0 321*4882a593Smuzhiyun cmp r2, #DMA_TO_DEVICE 322*4882a593Smuzhiyun beq xsc3_dma_clean_range 323*4882a593Smuzhiyun bcs xsc3_dma_inv_range 324*4882a593Smuzhiyun b xsc3_dma_flush_range 325*4882a593SmuzhiyunENDPROC(xsc3_dma_map_area) 326*4882a593Smuzhiyun 327*4882a593Smuzhiyun/* 328*4882a593Smuzhiyun * dma_unmap_area(start, size, dir) 329*4882a593Smuzhiyun * - start - kernel virtual start address 330*4882a593Smuzhiyun * - size - size of region 331*4882a593Smuzhiyun * - dir - DMA direction 332*4882a593Smuzhiyun */ 333*4882a593SmuzhiyunENTRY(xsc3_dma_unmap_area) 334*4882a593Smuzhiyun ret lr 335*4882a593SmuzhiyunENDPROC(xsc3_dma_unmap_area) 336*4882a593Smuzhiyun 337*4882a593Smuzhiyun .globl xsc3_flush_kern_cache_louis 338*4882a593Smuzhiyun .equ xsc3_flush_kern_cache_louis, xsc3_flush_kern_cache_all 339*4882a593Smuzhiyun 340*4882a593Smuzhiyun @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 341*4882a593Smuzhiyun define_cache_functions xsc3 342*4882a593Smuzhiyun 343*4882a593SmuzhiyunENTRY(cpu_xsc3_dcache_clean_area) 344*4882a593Smuzhiyun1: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line 345*4882a593Smuzhiyun add r0, r0, #CACHELINESIZE 346*4882a593Smuzhiyun subs r1, r1, #CACHELINESIZE 347*4882a593Smuzhiyun bhi 1b 348*4882a593Smuzhiyun ret lr 349*4882a593Smuzhiyun 350*4882a593Smuzhiyun/* =============================== PageTable ============================== */ 351*4882a593Smuzhiyun 352*4882a593Smuzhiyun/* 353*4882a593Smuzhiyun * cpu_xsc3_switch_mm(pgd) 354*4882a593Smuzhiyun * 355*4882a593Smuzhiyun * Set the translation base pointer to be as described by pgd. 356*4882a593Smuzhiyun * 357*4882a593Smuzhiyun * pgd: new page tables 358*4882a593Smuzhiyun */ 359*4882a593Smuzhiyun .align 5 360*4882a593SmuzhiyunENTRY(cpu_xsc3_switch_mm) 361*4882a593Smuzhiyun clean_d_cache r1, r2 362*4882a593Smuzhiyun mcr p15, 0, ip, c7, c5, 0 @ invalidate L1 I cache and BTB 363*4882a593Smuzhiyun mcr p15, 0, ip, c7, c10, 4 @ data write barrier 364*4882a593Smuzhiyun mcr p15, 0, ip, c7, c5, 4 @ prefetch flush 365*4882a593Smuzhiyun orr r0, r0, #0x18 @ cache the page table in L2 366*4882a593Smuzhiyun mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 367*4882a593Smuzhiyun mcr p15, 0, ip, c8, c7, 0 @ invalidate I and D TLBs 368*4882a593Smuzhiyun cpwait_ret lr, ip 369*4882a593Smuzhiyun 370*4882a593Smuzhiyun/* 371*4882a593Smuzhiyun * cpu_xsc3_set_pte_ext(ptep, pte, ext) 372*4882a593Smuzhiyun * 373*4882a593Smuzhiyun * Set a PTE and flush it out 374*4882a593Smuzhiyun */ 375*4882a593Smuzhiyuncpu_xsc3_mt_table: 376*4882a593Smuzhiyun .long 0x00 @ L_PTE_MT_UNCACHED 377*4882a593Smuzhiyun .long PTE_EXT_TEX(1) @ L_PTE_MT_BUFFERABLE 378*4882a593Smuzhiyun .long PTE_EXT_TEX(5) | PTE_CACHEABLE @ L_PTE_MT_WRITETHROUGH 379*4882a593Smuzhiyun .long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEBACK 380*4882a593Smuzhiyun .long PTE_EXT_TEX(1) | PTE_BUFFERABLE @ L_PTE_MT_DEV_SHARED 381*4882a593Smuzhiyun .long 0x00 @ unused 382*4882a593Smuzhiyun .long 0x00 @ L_PTE_MT_MINICACHE (not present) 383*4882a593Smuzhiyun .long PTE_EXT_TEX(5) | PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEALLOC (not present?) 384*4882a593Smuzhiyun .long 0x00 @ unused 385*4882a593Smuzhiyun .long PTE_EXT_TEX(1) @ L_PTE_MT_DEV_WC 386*4882a593Smuzhiyun .long 0x00 @ unused 387*4882a593Smuzhiyun .long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_DEV_CACHED 388*4882a593Smuzhiyun .long PTE_EXT_TEX(2) @ L_PTE_MT_DEV_NONSHARED 389*4882a593Smuzhiyun .long 0x00 @ unused 390*4882a593Smuzhiyun .long 0x00 @ unused 391*4882a593Smuzhiyun .long 0x00 @ unused 392*4882a593Smuzhiyun 393*4882a593Smuzhiyun .align 5 394*4882a593SmuzhiyunENTRY(cpu_xsc3_set_pte_ext) 395*4882a593Smuzhiyun xscale_set_pte_ext_prologue 396*4882a593Smuzhiyun 397*4882a593Smuzhiyun tst r1, #L_PTE_SHARED @ shared? 398*4882a593Smuzhiyun and r1, r1, #L_PTE_MT_MASK 399*4882a593Smuzhiyun adr ip, cpu_xsc3_mt_table 400*4882a593Smuzhiyun ldr ip, [ip, r1] 401*4882a593Smuzhiyun orrne r2, r2, #PTE_EXT_COHERENT @ interlock: mask in coherent bit 402*4882a593Smuzhiyun bic r2, r2, #0x0c @ clear old C,B bits 403*4882a593Smuzhiyun orr r2, r2, ip 404*4882a593Smuzhiyun 405*4882a593Smuzhiyun xscale_set_pte_ext_epilogue 406*4882a593Smuzhiyun ret lr 407*4882a593Smuzhiyun 408*4882a593Smuzhiyun .ltorg 409*4882a593Smuzhiyun .align 410*4882a593Smuzhiyun 411*4882a593Smuzhiyun.globl cpu_xsc3_suspend_size 412*4882a593Smuzhiyun.equ cpu_xsc3_suspend_size, 4 * 6 413*4882a593Smuzhiyun#ifdef CONFIG_ARM_CPU_SUSPEND 414*4882a593SmuzhiyunENTRY(cpu_xsc3_do_suspend) 415*4882a593Smuzhiyun stmfd sp!, {r4 - r9, lr} 416*4882a593Smuzhiyun mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode 417*4882a593Smuzhiyun mrc p15, 0, r5, c15, c1, 0 @ CP access reg 418*4882a593Smuzhiyun mrc p15, 0, r6, c13, c0, 0 @ PID 419*4882a593Smuzhiyun mrc p15, 0, r7, c3, c0, 0 @ domain ID 420*4882a593Smuzhiyun mrc p15, 0, r8, c1, c0, 1 @ auxiliary control reg 421*4882a593Smuzhiyun mrc p15, 0, r9, c1, c0, 0 @ control reg 422*4882a593Smuzhiyun bic r4, r4, #2 @ clear frequency change bit 423*4882a593Smuzhiyun stmia r0, {r4 - r9} @ store cp regs 424*4882a593Smuzhiyun ldmia sp!, {r4 - r9, pc} 425*4882a593SmuzhiyunENDPROC(cpu_xsc3_do_suspend) 426*4882a593Smuzhiyun 427*4882a593SmuzhiyunENTRY(cpu_xsc3_do_resume) 428*4882a593Smuzhiyun ldmia r0, {r4 - r9} @ load cp regs 429*4882a593Smuzhiyun mov ip, #0 430*4882a593Smuzhiyun mcr p15, 0, ip, c7, c7, 0 @ invalidate I & D caches, BTB 431*4882a593Smuzhiyun mcr p15, 0, ip, c7, c10, 4 @ drain write (&fill) buffer 432*4882a593Smuzhiyun mcr p15, 0, ip, c7, c5, 4 @ flush prefetch buffer 433*4882a593Smuzhiyun mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 434*4882a593Smuzhiyun mcr p14, 0, r4, c6, c0, 0 @ clock configuration, turbo mode. 435*4882a593Smuzhiyun mcr p15, 0, r5, c15, c1, 0 @ CP access reg 436*4882a593Smuzhiyun mcr p15, 0, r6, c13, c0, 0 @ PID 437*4882a593Smuzhiyun mcr p15, 0, r7, c3, c0, 0 @ domain ID 438*4882a593Smuzhiyun orr r1, r1, #0x18 @ cache the page table in L2 439*4882a593Smuzhiyun mcr p15, 0, r1, c2, c0, 0 @ translation table base addr 440*4882a593Smuzhiyun mcr p15, 0, r8, c1, c0, 1 @ auxiliary control reg 441*4882a593Smuzhiyun mov r0, r9 @ control register 442*4882a593Smuzhiyun b cpu_resume_mmu 443*4882a593SmuzhiyunENDPROC(cpu_xsc3_do_resume) 444*4882a593Smuzhiyun#endif 445*4882a593Smuzhiyun 446*4882a593Smuzhiyun .type __xsc3_setup, #function 447*4882a593Smuzhiyun__xsc3_setup: 448*4882a593Smuzhiyun mov r0, #PSR_F_BIT|PSR_I_BIT|SVC_MODE 449*4882a593Smuzhiyun msr cpsr_c, r0 450*4882a593Smuzhiyun mcr p15, 0, ip, c7, c7, 0 @ invalidate L1 caches and BTB 451*4882a593Smuzhiyun mcr p15, 0, ip, c7, c10, 4 @ data write barrier 452*4882a593Smuzhiyun mcr p15, 0, ip, c7, c5, 4 @ prefetch flush 453*4882a593Smuzhiyun mcr p15, 0, ip, c8, c7, 0 @ invalidate I and D TLBs 454*4882a593Smuzhiyun orr r4, r4, #0x18 @ cache the page table in L2 455*4882a593Smuzhiyun mcr p15, 0, r4, c2, c0, 0 @ load page table pointer 456*4882a593Smuzhiyun 457*4882a593Smuzhiyun mov r0, #1 << 6 @ cp6 access for early sched_clock 458*4882a593Smuzhiyun mcr p15, 0, r0, c15, c1, 0 @ write CP access register 459*4882a593Smuzhiyun 460*4882a593Smuzhiyun mrc p15, 0, r0, c1, c0, 1 @ get auxiliary control reg 461*4882a593Smuzhiyun and r0, r0, #2 @ preserve bit P bit setting 462*4882a593Smuzhiyun orr r0, r0, #(1 << 10) @ enable L2 for LLR cache 463*4882a593Smuzhiyun mcr p15, 0, r0, c1, c0, 1 @ set auxiliary control reg 464*4882a593Smuzhiyun 465*4882a593Smuzhiyun adr r5, xsc3_crval 466*4882a593Smuzhiyun ldmia r5, {r5, r6} 467*4882a593Smuzhiyun 468*4882a593Smuzhiyun#ifdef CONFIG_CACHE_XSC3L2 469*4882a593Smuzhiyun mrc p15, 1, r0, c0, c0, 1 @ get L2 present information 470*4882a593Smuzhiyun ands r0, r0, #0xf8 471*4882a593Smuzhiyun orrne r6, r6, #(1 << 26) @ enable L2 if present 472*4882a593Smuzhiyun#endif 473*4882a593Smuzhiyun 474*4882a593Smuzhiyun mrc p15, 0, r0, c1, c0, 0 @ get control register 475*4882a593Smuzhiyun bic r0, r0, r5 @ ..V. ..R. .... ..A. 476*4882a593Smuzhiyun orr r0, r0, r6 @ ..VI Z..S .... .C.M (mmu) 477*4882a593Smuzhiyun @ ...I Z..S .... .... (uc) 478*4882a593Smuzhiyun ret lr 479*4882a593Smuzhiyun 480*4882a593Smuzhiyun .size __xsc3_setup, . - __xsc3_setup 481*4882a593Smuzhiyun 482*4882a593Smuzhiyun .type xsc3_crval, #object 483*4882a593Smuzhiyunxsc3_crval: 484*4882a593Smuzhiyun crval clear=0x04002202, mmuset=0x00003905, ucset=0x00001900 485*4882a593Smuzhiyun 486*4882a593Smuzhiyun __INITDATA 487*4882a593Smuzhiyun 488*4882a593Smuzhiyun @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) 489*4882a593Smuzhiyun define_processor_functions xsc3, dabort=v5t_early_abort, pabort=legacy_pabort, suspend=1 490*4882a593Smuzhiyun 491*4882a593Smuzhiyun .section ".rodata" 492*4882a593Smuzhiyun 493*4882a593Smuzhiyun string cpu_arch_name, "armv5te" 494*4882a593Smuzhiyun string cpu_elf_name, "v5" 495*4882a593Smuzhiyun string cpu_xsc3_name, "XScale-V3 based processor" 496*4882a593Smuzhiyun 497*4882a593Smuzhiyun .align 498*4882a593Smuzhiyun 499*4882a593Smuzhiyun .section ".proc.info.init", "a" 500*4882a593Smuzhiyun 501*4882a593Smuzhiyun.macro xsc3_proc_info name:req, cpu_val:req, cpu_mask:req 502*4882a593Smuzhiyun .type __\name\()_proc_info,#object 503*4882a593Smuzhiyun__\name\()_proc_info: 504*4882a593Smuzhiyun .long \cpu_val 505*4882a593Smuzhiyun .long \cpu_mask 506*4882a593Smuzhiyun .long PMD_TYPE_SECT | \ 507*4882a593Smuzhiyun PMD_SECT_BUFFERABLE | \ 508*4882a593Smuzhiyun PMD_SECT_CACHEABLE | \ 509*4882a593Smuzhiyun PMD_SECT_AP_WRITE | \ 510*4882a593Smuzhiyun PMD_SECT_AP_READ 511*4882a593Smuzhiyun .long PMD_TYPE_SECT | \ 512*4882a593Smuzhiyun PMD_SECT_AP_WRITE | \ 513*4882a593Smuzhiyun PMD_SECT_AP_READ 514*4882a593Smuzhiyun initfn __xsc3_setup, __\name\()_proc_info 515*4882a593Smuzhiyun .long cpu_arch_name 516*4882a593Smuzhiyun .long cpu_elf_name 517*4882a593Smuzhiyun .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 518*4882a593Smuzhiyun .long cpu_xsc3_name 519*4882a593Smuzhiyun .long xsc3_processor_functions 520*4882a593Smuzhiyun .long v4wbi_tlb_fns 521*4882a593Smuzhiyun .long xsc3_mc_user_fns 522*4882a593Smuzhiyun .long xsc3_cache_fns 523*4882a593Smuzhiyun .size __\name\()_proc_info, . - __\name\()_proc_info 524*4882a593Smuzhiyun.endm 525*4882a593Smuzhiyun 526*4882a593Smuzhiyun xsc3_proc_info xsc3, 0x69056000, 0xffffe000 527*4882a593Smuzhiyun 528*4882a593Smuzhiyun/* Note: PXA935 changed its implementor ID from Intel to Marvell */ 529*4882a593Smuzhiyun xsc3_proc_info xsc3_pxa935, 0x56056000, 0xffffe000 530