1*4882a593Smuzhiyun/* SPDX-License-Identifier: GPL-2.0-or-later */ 2*4882a593Smuzhiyun/* 3*4882a593Smuzhiyun * linux/arch/arm/mm/proc-feroceon.S: MMU functions for Feroceon 4*4882a593Smuzhiyun * 5*4882a593Smuzhiyun * Heavily based on proc-arm926.S 6*4882a593Smuzhiyun * Maintainer: Assaf Hoffman <hoffman@marvell.com> 7*4882a593Smuzhiyun */ 8*4882a593Smuzhiyun 9*4882a593Smuzhiyun#include <linux/linkage.h> 10*4882a593Smuzhiyun#include <linux/init.h> 11*4882a593Smuzhiyun#include <linux/pgtable.h> 12*4882a593Smuzhiyun#include <asm/assembler.h> 13*4882a593Smuzhiyun#include <asm/hwcap.h> 14*4882a593Smuzhiyun#include <asm/pgtable-hwdef.h> 15*4882a593Smuzhiyun#include <asm/page.h> 16*4882a593Smuzhiyun#include <asm/ptrace.h> 17*4882a593Smuzhiyun#include "proc-macros.S" 18*4882a593Smuzhiyun 19*4882a593Smuzhiyun/* 20*4882a593Smuzhiyun * This is the maximum size of an area which will be invalidated 21*4882a593Smuzhiyun * using the single invalidate entry instructions. Anything larger 22*4882a593Smuzhiyun * than this, and we go for the whole cache. 23*4882a593Smuzhiyun * 24*4882a593Smuzhiyun * This value should be chosen such that we choose the cheapest 25*4882a593Smuzhiyun * alternative. 26*4882a593Smuzhiyun */ 27*4882a593Smuzhiyun#define CACHE_DLIMIT 16384 28*4882a593Smuzhiyun 29*4882a593Smuzhiyun/* 30*4882a593Smuzhiyun * the cache line size of the I and D cache 31*4882a593Smuzhiyun */ 32*4882a593Smuzhiyun#define CACHE_DLINESIZE 32 33*4882a593Smuzhiyun 34*4882a593Smuzhiyun .bss 35*4882a593Smuzhiyun .align 3 36*4882a593Smuzhiyun__cache_params_loc: 37*4882a593Smuzhiyun .space 8 38*4882a593Smuzhiyun 39*4882a593Smuzhiyun .text 40*4882a593Smuzhiyun__cache_params: 41*4882a593Smuzhiyun .word __cache_params_loc 42*4882a593Smuzhiyun 43*4882a593Smuzhiyun/* 44*4882a593Smuzhiyun * cpu_feroceon_proc_init() 45*4882a593Smuzhiyun */ 46*4882a593SmuzhiyunENTRY(cpu_feroceon_proc_init) 47*4882a593Smuzhiyun mrc p15, 0, r0, c0, c0, 1 @ read cache type register 48*4882a593Smuzhiyun ldr r1, __cache_params 49*4882a593Smuzhiyun mov r2, #(16 << 5) 50*4882a593Smuzhiyun tst r0, #(1 << 16) @ get way 51*4882a593Smuzhiyun mov r0, r0, lsr #18 @ get cache size order 52*4882a593Smuzhiyun movne r3, #((4 - 1) << 30) @ 4-way 53*4882a593Smuzhiyun and r0, r0, #0xf 54*4882a593Smuzhiyun moveq r3, #0 @ 1-way 55*4882a593Smuzhiyun mov r2, r2, lsl r0 @ actual cache size 56*4882a593Smuzhiyun movne r2, r2, lsr #2 @ turned into # of sets 57*4882a593Smuzhiyun sub r2, r2, #(1 << 5) 58*4882a593Smuzhiyun stmia r1, {r2, r3} 59*4882a593Smuzhiyun ret lr 60*4882a593Smuzhiyun 61*4882a593Smuzhiyun/* 62*4882a593Smuzhiyun * cpu_feroceon_proc_fin() 63*4882a593Smuzhiyun */ 64*4882a593SmuzhiyunENTRY(cpu_feroceon_proc_fin) 65*4882a593Smuzhiyun#if defined(CONFIG_CACHE_FEROCEON_L2) && \ 66*4882a593Smuzhiyun !defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH) 67*4882a593Smuzhiyun mov r0, #0 68*4882a593Smuzhiyun mcr p15, 1, r0, c15, c9, 0 @ clean L2 69*4882a593Smuzhiyun mcr p15, 0, r0, c7, c10, 4 @ drain WB 70*4882a593Smuzhiyun#endif 71*4882a593Smuzhiyun 72*4882a593Smuzhiyun mrc p15, 0, r0, c1, c0, 0 @ ctrl register 73*4882a593Smuzhiyun bic r0, r0, #0x1000 @ ...i............ 74*4882a593Smuzhiyun bic r0, r0, #0x000e @ ............wca. 75*4882a593Smuzhiyun mcr p15, 0, r0, c1, c0, 0 @ disable caches 76*4882a593Smuzhiyun ret lr 77*4882a593Smuzhiyun 78*4882a593Smuzhiyun/* 79*4882a593Smuzhiyun * cpu_feroceon_reset(loc) 80*4882a593Smuzhiyun * 81*4882a593Smuzhiyun * Perform a soft reset of the system. Put the CPU into the 82*4882a593Smuzhiyun * same state as it would be if it had been reset, and branch 83*4882a593Smuzhiyun * to what would be the reset vector. 84*4882a593Smuzhiyun * 85*4882a593Smuzhiyun * loc: location to jump to for soft reset 86*4882a593Smuzhiyun */ 87*4882a593Smuzhiyun .align 5 88*4882a593Smuzhiyun .pushsection .idmap.text, "ax" 89*4882a593SmuzhiyunENTRY(cpu_feroceon_reset) 90*4882a593Smuzhiyun mov ip, #0 91*4882a593Smuzhiyun mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 92*4882a593Smuzhiyun mcr p15, 0, ip, c7, c10, 4 @ drain WB 93*4882a593Smuzhiyun#ifdef CONFIG_MMU 94*4882a593Smuzhiyun mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 95*4882a593Smuzhiyun#endif 96*4882a593Smuzhiyun mrc p15, 0, ip, c1, c0, 0 @ ctrl register 97*4882a593Smuzhiyun bic ip, ip, #0x000f @ ............wcam 98*4882a593Smuzhiyun bic ip, ip, #0x1100 @ ...i...s........ 99*4882a593Smuzhiyun mcr p15, 0, ip, c1, c0, 0 @ ctrl register 100*4882a593Smuzhiyun ret r0 101*4882a593SmuzhiyunENDPROC(cpu_feroceon_reset) 102*4882a593Smuzhiyun .popsection 103*4882a593Smuzhiyun 104*4882a593Smuzhiyun/* 105*4882a593Smuzhiyun * cpu_feroceon_do_idle() 106*4882a593Smuzhiyun * 107*4882a593Smuzhiyun * Called with IRQs disabled 108*4882a593Smuzhiyun */ 109*4882a593Smuzhiyun .align 5 110*4882a593SmuzhiyunENTRY(cpu_feroceon_do_idle) 111*4882a593Smuzhiyun mov r0, #0 112*4882a593Smuzhiyun mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer 113*4882a593Smuzhiyun mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 114*4882a593Smuzhiyun ret lr 115*4882a593Smuzhiyun 116*4882a593Smuzhiyun/* 117*4882a593Smuzhiyun * flush_icache_all() 118*4882a593Smuzhiyun * 119*4882a593Smuzhiyun * Unconditionally clean and invalidate the entire icache. 120*4882a593Smuzhiyun */ 121*4882a593SmuzhiyunENTRY(feroceon_flush_icache_all) 122*4882a593Smuzhiyun mov r0, #0 123*4882a593Smuzhiyun mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 124*4882a593Smuzhiyun ret lr 125*4882a593SmuzhiyunENDPROC(feroceon_flush_icache_all) 126*4882a593Smuzhiyun 127*4882a593Smuzhiyun/* 128*4882a593Smuzhiyun * flush_user_cache_all() 129*4882a593Smuzhiyun * 130*4882a593Smuzhiyun * Clean and invalidate all cache entries in a particular 131*4882a593Smuzhiyun * address space. 132*4882a593Smuzhiyun */ 133*4882a593Smuzhiyun .align 5 134*4882a593SmuzhiyunENTRY(feroceon_flush_user_cache_all) 135*4882a593Smuzhiyun /* FALLTHROUGH */ 136*4882a593Smuzhiyun 137*4882a593Smuzhiyun/* 138*4882a593Smuzhiyun * flush_kern_cache_all() 139*4882a593Smuzhiyun * 140*4882a593Smuzhiyun * Clean and invalidate the entire cache. 141*4882a593Smuzhiyun */ 142*4882a593SmuzhiyunENTRY(feroceon_flush_kern_cache_all) 143*4882a593Smuzhiyun mov r2, #VM_EXEC 144*4882a593Smuzhiyun 145*4882a593Smuzhiyun__flush_whole_cache: 146*4882a593Smuzhiyun ldr r1, __cache_params 147*4882a593Smuzhiyun ldmia r1, {r1, r3} 148*4882a593Smuzhiyun1: orr ip, r1, r3 149*4882a593Smuzhiyun2: mcr p15, 0, ip, c7, c14, 2 @ clean + invalidate D set/way 150*4882a593Smuzhiyun subs ip, ip, #(1 << 30) @ next way 151*4882a593Smuzhiyun bcs 2b 152*4882a593Smuzhiyun subs r1, r1, #(1 << 5) @ next set 153*4882a593Smuzhiyun bcs 1b 154*4882a593Smuzhiyun 155*4882a593Smuzhiyun tst r2, #VM_EXEC 156*4882a593Smuzhiyun mov ip, #0 157*4882a593Smuzhiyun mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 158*4882a593Smuzhiyun mcrne p15, 0, ip, c7, c10, 4 @ drain WB 159*4882a593Smuzhiyun ret lr 160*4882a593Smuzhiyun 161*4882a593Smuzhiyun/* 162*4882a593Smuzhiyun * flush_user_cache_range(start, end, flags) 163*4882a593Smuzhiyun * 164*4882a593Smuzhiyun * Clean and invalidate a range of cache entries in the 165*4882a593Smuzhiyun * specified address range. 166*4882a593Smuzhiyun * 167*4882a593Smuzhiyun * - start - start address (inclusive) 168*4882a593Smuzhiyun * - end - end address (exclusive) 169*4882a593Smuzhiyun * - flags - vm_flags describing address space 170*4882a593Smuzhiyun */ 171*4882a593Smuzhiyun .align 5 172*4882a593SmuzhiyunENTRY(feroceon_flush_user_cache_range) 173*4882a593Smuzhiyun sub r3, r1, r0 @ calculate total size 174*4882a593Smuzhiyun cmp r3, #CACHE_DLIMIT 175*4882a593Smuzhiyun bgt __flush_whole_cache 176*4882a593Smuzhiyun1: tst r2, #VM_EXEC 177*4882a593Smuzhiyun mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry 178*4882a593Smuzhiyun mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry 179*4882a593Smuzhiyun add r0, r0, #CACHE_DLINESIZE 180*4882a593Smuzhiyun mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry 181*4882a593Smuzhiyun mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry 182*4882a593Smuzhiyun add r0, r0, #CACHE_DLINESIZE 183*4882a593Smuzhiyun cmp r0, r1 184*4882a593Smuzhiyun blo 1b 185*4882a593Smuzhiyun tst r2, #VM_EXEC 186*4882a593Smuzhiyun mov ip, #0 187*4882a593Smuzhiyun mcrne p15, 0, ip, c7, c10, 4 @ drain WB 188*4882a593Smuzhiyun ret lr 189*4882a593Smuzhiyun 190*4882a593Smuzhiyun/* 191*4882a593Smuzhiyun * coherent_kern_range(start, end) 192*4882a593Smuzhiyun * 193*4882a593Smuzhiyun * Ensure coherency between the Icache and the Dcache in the 194*4882a593Smuzhiyun * region described by start, end. If you have non-snooping 195*4882a593Smuzhiyun * Harvard caches, you need to implement this function. 196*4882a593Smuzhiyun * 197*4882a593Smuzhiyun * - start - virtual start address 198*4882a593Smuzhiyun * - end - virtual end address 199*4882a593Smuzhiyun */ 200*4882a593Smuzhiyun .align 5 201*4882a593SmuzhiyunENTRY(feroceon_coherent_kern_range) 202*4882a593Smuzhiyun /* FALLTHROUGH */ 203*4882a593Smuzhiyun 204*4882a593Smuzhiyun/* 205*4882a593Smuzhiyun * coherent_user_range(start, end) 206*4882a593Smuzhiyun * 207*4882a593Smuzhiyun * Ensure coherency between the Icache and the Dcache in the 208*4882a593Smuzhiyun * region described by start, end. If you have non-snooping 209*4882a593Smuzhiyun * Harvard caches, you need to implement this function. 210*4882a593Smuzhiyun * 211*4882a593Smuzhiyun * - start - virtual start address 212*4882a593Smuzhiyun * - end - virtual end address 213*4882a593Smuzhiyun */ 214*4882a593SmuzhiyunENTRY(feroceon_coherent_user_range) 215*4882a593Smuzhiyun bic r0, r0, #CACHE_DLINESIZE - 1 216*4882a593Smuzhiyun1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 217*4882a593Smuzhiyun mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry 218*4882a593Smuzhiyun add r0, r0, #CACHE_DLINESIZE 219*4882a593Smuzhiyun cmp r0, r1 220*4882a593Smuzhiyun blo 1b 221*4882a593Smuzhiyun mcr p15, 0, r0, c7, c10, 4 @ drain WB 222*4882a593Smuzhiyun mov r0, #0 223*4882a593Smuzhiyun ret lr 224*4882a593Smuzhiyun 225*4882a593Smuzhiyun/* 226*4882a593Smuzhiyun * flush_kern_dcache_area(void *addr, size_t size) 227*4882a593Smuzhiyun * 228*4882a593Smuzhiyun * Ensure no D cache aliasing occurs, either with itself or 229*4882a593Smuzhiyun * the I cache 230*4882a593Smuzhiyun * 231*4882a593Smuzhiyun * - addr - kernel address 232*4882a593Smuzhiyun * - size - region size 233*4882a593Smuzhiyun */ 234*4882a593Smuzhiyun .align 5 235*4882a593SmuzhiyunENTRY(feroceon_flush_kern_dcache_area) 236*4882a593Smuzhiyun add r1, r0, r1 237*4882a593Smuzhiyun1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 238*4882a593Smuzhiyun add r0, r0, #CACHE_DLINESIZE 239*4882a593Smuzhiyun cmp r0, r1 240*4882a593Smuzhiyun blo 1b 241*4882a593Smuzhiyun mov r0, #0 242*4882a593Smuzhiyun mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 243*4882a593Smuzhiyun mcr p15, 0, r0, c7, c10, 4 @ drain WB 244*4882a593Smuzhiyun ret lr 245*4882a593Smuzhiyun 246*4882a593Smuzhiyun .align 5 247*4882a593SmuzhiyunENTRY(feroceon_range_flush_kern_dcache_area) 248*4882a593Smuzhiyun mrs r2, cpsr 249*4882a593Smuzhiyun add r1, r0, #PAGE_SZ - CACHE_DLINESIZE @ top addr is inclusive 250*4882a593Smuzhiyun orr r3, r2, #PSR_I_BIT 251*4882a593Smuzhiyun msr cpsr_c, r3 @ disable interrupts 252*4882a593Smuzhiyun mcr p15, 5, r0, c15, c15, 0 @ D clean/inv range start 253*4882a593Smuzhiyun mcr p15, 5, r1, c15, c15, 1 @ D clean/inv range top 254*4882a593Smuzhiyun msr cpsr_c, r2 @ restore interrupts 255*4882a593Smuzhiyun mov r0, #0 256*4882a593Smuzhiyun mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 257*4882a593Smuzhiyun mcr p15, 0, r0, c7, c10, 4 @ drain WB 258*4882a593Smuzhiyun ret lr 259*4882a593Smuzhiyun 260*4882a593Smuzhiyun/* 261*4882a593Smuzhiyun * dma_inv_range(start, end) 262*4882a593Smuzhiyun * 263*4882a593Smuzhiyun * Invalidate (discard) the specified virtual address range. 264*4882a593Smuzhiyun * May not write back any entries. If 'start' or 'end' 265*4882a593Smuzhiyun * are not cache line aligned, those lines must be written 266*4882a593Smuzhiyun * back. 267*4882a593Smuzhiyun * 268*4882a593Smuzhiyun * - start - virtual start address 269*4882a593Smuzhiyun * - end - virtual end address 270*4882a593Smuzhiyun * 271*4882a593Smuzhiyun * (same as v4wb) 272*4882a593Smuzhiyun */ 273*4882a593Smuzhiyun .align 5 274*4882a593Smuzhiyunferoceon_dma_inv_range: 275*4882a593Smuzhiyun tst r0, #CACHE_DLINESIZE - 1 276*4882a593Smuzhiyun bic r0, r0, #CACHE_DLINESIZE - 1 277*4882a593Smuzhiyun mcrne p15, 0, r0, c7, c10, 1 @ clean D entry 278*4882a593Smuzhiyun tst r1, #CACHE_DLINESIZE - 1 279*4882a593Smuzhiyun mcrne p15, 0, r1, c7, c10, 1 @ clean D entry 280*4882a593Smuzhiyun1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry 281*4882a593Smuzhiyun add r0, r0, #CACHE_DLINESIZE 282*4882a593Smuzhiyun cmp r0, r1 283*4882a593Smuzhiyun blo 1b 284*4882a593Smuzhiyun mcr p15, 0, r0, c7, c10, 4 @ drain WB 285*4882a593Smuzhiyun ret lr 286*4882a593Smuzhiyun 287*4882a593Smuzhiyun .align 5 288*4882a593Smuzhiyunferoceon_range_dma_inv_range: 289*4882a593Smuzhiyun mrs r2, cpsr 290*4882a593Smuzhiyun tst r0, #CACHE_DLINESIZE - 1 291*4882a593Smuzhiyun mcrne p15, 0, r0, c7, c10, 1 @ clean D entry 292*4882a593Smuzhiyun tst r1, #CACHE_DLINESIZE - 1 293*4882a593Smuzhiyun mcrne p15, 0, r1, c7, c10, 1 @ clean D entry 294*4882a593Smuzhiyun cmp r1, r0 295*4882a593Smuzhiyun subne r1, r1, #1 @ top address is inclusive 296*4882a593Smuzhiyun orr r3, r2, #PSR_I_BIT 297*4882a593Smuzhiyun msr cpsr_c, r3 @ disable interrupts 298*4882a593Smuzhiyun mcr p15, 5, r0, c15, c14, 0 @ D inv range start 299*4882a593Smuzhiyun mcr p15, 5, r1, c15, c14, 1 @ D inv range top 300*4882a593Smuzhiyun msr cpsr_c, r2 @ restore interrupts 301*4882a593Smuzhiyun ret lr 302*4882a593Smuzhiyun 303*4882a593Smuzhiyun/* 304*4882a593Smuzhiyun * dma_clean_range(start, end) 305*4882a593Smuzhiyun * 306*4882a593Smuzhiyun * Clean the specified virtual address range. 307*4882a593Smuzhiyun * 308*4882a593Smuzhiyun * - start - virtual start address 309*4882a593Smuzhiyun * - end - virtual end address 310*4882a593Smuzhiyun * 311*4882a593Smuzhiyun * (same as v4wb) 312*4882a593Smuzhiyun */ 313*4882a593Smuzhiyun .align 5 314*4882a593Smuzhiyunferoceon_dma_clean_range: 315*4882a593Smuzhiyun bic r0, r0, #CACHE_DLINESIZE - 1 316*4882a593Smuzhiyun1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 317*4882a593Smuzhiyun add r0, r0, #CACHE_DLINESIZE 318*4882a593Smuzhiyun cmp r0, r1 319*4882a593Smuzhiyun blo 1b 320*4882a593Smuzhiyun mcr p15, 0, r0, c7, c10, 4 @ drain WB 321*4882a593Smuzhiyun ret lr 322*4882a593Smuzhiyun 323*4882a593Smuzhiyun .align 5 324*4882a593Smuzhiyunferoceon_range_dma_clean_range: 325*4882a593Smuzhiyun mrs r2, cpsr 326*4882a593Smuzhiyun cmp r1, r0 327*4882a593Smuzhiyun subne r1, r1, #1 @ top address is inclusive 328*4882a593Smuzhiyun orr r3, r2, #PSR_I_BIT 329*4882a593Smuzhiyun msr cpsr_c, r3 @ disable interrupts 330*4882a593Smuzhiyun mcr p15, 5, r0, c15, c13, 0 @ D clean range start 331*4882a593Smuzhiyun mcr p15, 5, r1, c15, c13, 1 @ D clean range top 332*4882a593Smuzhiyun msr cpsr_c, r2 @ restore interrupts 333*4882a593Smuzhiyun mcr p15, 0, r0, c7, c10, 4 @ drain WB 334*4882a593Smuzhiyun ret lr 335*4882a593Smuzhiyun 336*4882a593Smuzhiyun/* 337*4882a593Smuzhiyun * dma_flush_range(start, end) 338*4882a593Smuzhiyun * 339*4882a593Smuzhiyun * Clean and invalidate the specified virtual address range. 340*4882a593Smuzhiyun * 341*4882a593Smuzhiyun * - start - virtual start address 342*4882a593Smuzhiyun * - end - virtual end address 343*4882a593Smuzhiyun */ 344*4882a593Smuzhiyun .align 5 345*4882a593SmuzhiyunENTRY(feroceon_dma_flush_range) 346*4882a593Smuzhiyun bic r0, r0, #CACHE_DLINESIZE - 1 347*4882a593Smuzhiyun1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 348*4882a593Smuzhiyun add r0, r0, #CACHE_DLINESIZE 349*4882a593Smuzhiyun cmp r0, r1 350*4882a593Smuzhiyun blo 1b 351*4882a593Smuzhiyun mcr p15, 0, r0, c7, c10, 4 @ drain WB 352*4882a593Smuzhiyun ret lr 353*4882a593Smuzhiyun 354*4882a593Smuzhiyun .align 5 355*4882a593SmuzhiyunENTRY(feroceon_range_dma_flush_range) 356*4882a593Smuzhiyun mrs r2, cpsr 357*4882a593Smuzhiyun cmp r1, r0 358*4882a593Smuzhiyun subne r1, r1, #1 @ top address is inclusive 359*4882a593Smuzhiyun orr r3, r2, #PSR_I_BIT 360*4882a593Smuzhiyun msr cpsr_c, r3 @ disable interrupts 361*4882a593Smuzhiyun mcr p15, 5, r0, c15, c15, 0 @ D clean/inv range start 362*4882a593Smuzhiyun mcr p15, 5, r1, c15, c15, 1 @ D clean/inv range top 363*4882a593Smuzhiyun msr cpsr_c, r2 @ restore interrupts 364*4882a593Smuzhiyun mcr p15, 0, r0, c7, c10, 4 @ drain WB 365*4882a593Smuzhiyun ret lr 366*4882a593Smuzhiyun 367*4882a593Smuzhiyun/* 368*4882a593Smuzhiyun * dma_map_area(start, size, dir) 369*4882a593Smuzhiyun * - start - kernel virtual start address 370*4882a593Smuzhiyun * - size - size of region 371*4882a593Smuzhiyun * - dir - DMA direction 372*4882a593Smuzhiyun */ 373*4882a593SmuzhiyunENTRY(feroceon_dma_map_area) 374*4882a593Smuzhiyun add r1, r1, r0 375*4882a593Smuzhiyun cmp r2, #DMA_TO_DEVICE 376*4882a593Smuzhiyun beq feroceon_dma_clean_range 377*4882a593Smuzhiyun bcs feroceon_dma_inv_range 378*4882a593Smuzhiyun b feroceon_dma_flush_range 379*4882a593SmuzhiyunENDPROC(feroceon_dma_map_area) 380*4882a593Smuzhiyun 381*4882a593Smuzhiyun/* 382*4882a593Smuzhiyun * dma_map_area(start, size, dir) 383*4882a593Smuzhiyun * - start - kernel virtual start address 384*4882a593Smuzhiyun * - size - size of region 385*4882a593Smuzhiyun * - dir - DMA direction 386*4882a593Smuzhiyun */ 387*4882a593SmuzhiyunENTRY(feroceon_range_dma_map_area) 388*4882a593Smuzhiyun add r1, r1, r0 389*4882a593Smuzhiyun cmp r2, #DMA_TO_DEVICE 390*4882a593Smuzhiyun beq feroceon_range_dma_clean_range 391*4882a593Smuzhiyun bcs feroceon_range_dma_inv_range 392*4882a593Smuzhiyun b feroceon_range_dma_flush_range 393*4882a593SmuzhiyunENDPROC(feroceon_range_dma_map_area) 394*4882a593Smuzhiyun 395*4882a593Smuzhiyun/* 396*4882a593Smuzhiyun * dma_unmap_area(start, size, dir) 397*4882a593Smuzhiyun * - start - kernel virtual start address 398*4882a593Smuzhiyun * - size - size of region 399*4882a593Smuzhiyun * - dir - DMA direction 400*4882a593Smuzhiyun */ 401*4882a593SmuzhiyunENTRY(feroceon_dma_unmap_area) 402*4882a593Smuzhiyun ret lr 403*4882a593SmuzhiyunENDPROC(feroceon_dma_unmap_area) 404*4882a593Smuzhiyun 405*4882a593Smuzhiyun .globl feroceon_flush_kern_cache_louis 406*4882a593Smuzhiyun .equ feroceon_flush_kern_cache_louis, feroceon_flush_kern_cache_all 407*4882a593Smuzhiyun 408*4882a593Smuzhiyun @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 409*4882a593Smuzhiyun define_cache_functions feroceon 410*4882a593Smuzhiyun 411*4882a593Smuzhiyun.macro range_alias basename 412*4882a593Smuzhiyun .globl feroceon_range_\basename 413*4882a593Smuzhiyun .type feroceon_range_\basename , %function 414*4882a593Smuzhiyun .equ feroceon_range_\basename , feroceon_\basename 415*4882a593Smuzhiyun.endm 416*4882a593Smuzhiyun 417*4882a593Smuzhiyun/* 418*4882a593Smuzhiyun * Most of the cache functions are unchanged for this case. 419*4882a593Smuzhiyun * Export suitable alias symbols for the unchanged functions: 420*4882a593Smuzhiyun */ 421*4882a593Smuzhiyun range_alias flush_icache_all 422*4882a593Smuzhiyun range_alias flush_user_cache_all 423*4882a593Smuzhiyun range_alias flush_kern_cache_all 424*4882a593Smuzhiyun range_alias flush_kern_cache_louis 425*4882a593Smuzhiyun range_alias flush_user_cache_range 426*4882a593Smuzhiyun range_alias coherent_kern_range 427*4882a593Smuzhiyun range_alias coherent_user_range 428*4882a593Smuzhiyun range_alias dma_unmap_area 429*4882a593Smuzhiyun 430*4882a593Smuzhiyun define_cache_functions feroceon_range 431*4882a593Smuzhiyun 432*4882a593Smuzhiyun .align 5 433*4882a593SmuzhiyunENTRY(cpu_feroceon_dcache_clean_area) 434*4882a593Smuzhiyun#if defined(CONFIG_CACHE_FEROCEON_L2) && \ 435*4882a593Smuzhiyun !defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH) 436*4882a593Smuzhiyun mov r2, r0 437*4882a593Smuzhiyun mov r3, r1 438*4882a593Smuzhiyun#endif 439*4882a593Smuzhiyun1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 440*4882a593Smuzhiyun add r0, r0, #CACHE_DLINESIZE 441*4882a593Smuzhiyun subs r1, r1, #CACHE_DLINESIZE 442*4882a593Smuzhiyun bhi 1b 443*4882a593Smuzhiyun#if defined(CONFIG_CACHE_FEROCEON_L2) && \ 444*4882a593Smuzhiyun !defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH) 445*4882a593Smuzhiyun1: mcr p15, 1, r2, c15, c9, 1 @ clean L2 entry 446*4882a593Smuzhiyun add r2, r2, #CACHE_DLINESIZE 447*4882a593Smuzhiyun subs r3, r3, #CACHE_DLINESIZE 448*4882a593Smuzhiyun bhi 1b 449*4882a593Smuzhiyun#endif 450*4882a593Smuzhiyun mcr p15, 0, r0, c7, c10, 4 @ drain WB 451*4882a593Smuzhiyun ret lr 452*4882a593Smuzhiyun 453*4882a593Smuzhiyun/* =============================== PageTable ============================== */ 454*4882a593Smuzhiyun 455*4882a593Smuzhiyun/* 456*4882a593Smuzhiyun * cpu_feroceon_switch_mm(pgd) 457*4882a593Smuzhiyun * 458*4882a593Smuzhiyun * Set the translation base pointer to be as described by pgd. 459*4882a593Smuzhiyun * 460*4882a593Smuzhiyun * pgd: new page tables 461*4882a593Smuzhiyun */ 462*4882a593Smuzhiyun .align 5 463*4882a593SmuzhiyunENTRY(cpu_feroceon_switch_mm) 464*4882a593Smuzhiyun#ifdef CONFIG_MMU 465*4882a593Smuzhiyun /* 466*4882a593Smuzhiyun * Note: we wish to call __flush_whole_cache but we need to preserve 467*4882a593Smuzhiyun * lr to do so. The only way without touching main memory is to 468*4882a593Smuzhiyun * use r2 which is normally used to test the VM_EXEC flag, and 469*4882a593Smuzhiyun * compensate locally for the skipped ops if it is not set. 470*4882a593Smuzhiyun */ 471*4882a593Smuzhiyun mov r2, lr @ abuse r2 to preserve lr 472*4882a593Smuzhiyun bl __flush_whole_cache 473*4882a593Smuzhiyun @ if r2 contains the VM_EXEC bit then the next 2 ops are done already 474*4882a593Smuzhiyun tst r2, #VM_EXEC 475*4882a593Smuzhiyun mcreq p15, 0, ip, c7, c5, 0 @ invalidate I cache 476*4882a593Smuzhiyun mcreq p15, 0, ip, c7, c10, 4 @ drain WB 477*4882a593Smuzhiyun 478*4882a593Smuzhiyun mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 479*4882a593Smuzhiyun mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 480*4882a593Smuzhiyun ret r2 481*4882a593Smuzhiyun#else 482*4882a593Smuzhiyun ret lr 483*4882a593Smuzhiyun#endif 484*4882a593Smuzhiyun 485*4882a593Smuzhiyun/* 486*4882a593Smuzhiyun * cpu_feroceon_set_pte_ext(ptep, pte, ext) 487*4882a593Smuzhiyun * 488*4882a593Smuzhiyun * Set a PTE and flush it out 489*4882a593Smuzhiyun */ 490*4882a593Smuzhiyun .align 5 491*4882a593SmuzhiyunENTRY(cpu_feroceon_set_pte_ext) 492*4882a593Smuzhiyun#ifdef CONFIG_MMU 493*4882a593Smuzhiyun armv3_set_pte_ext wc_disable=0 494*4882a593Smuzhiyun mov r0, r0 495*4882a593Smuzhiyun mcr p15, 0, r0, c7, c10, 1 @ clean D entry 496*4882a593Smuzhiyun#if defined(CONFIG_CACHE_FEROCEON_L2) && \ 497*4882a593Smuzhiyun !defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH) 498*4882a593Smuzhiyun mcr p15, 1, r0, c15, c9, 1 @ clean L2 entry 499*4882a593Smuzhiyun#endif 500*4882a593Smuzhiyun mcr p15, 0, r0, c7, c10, 4 @ drain WB 501*4882a593Smuzhiyun#endif 502*4882a593Smuzhiyun ret lr 503*4882a593Smuzhiyun 504*4882a593Smuzhiyun/* Suspend/resume support: taken from arch/arm/mm/proc-arm926.S */ 505*4882a593Smuzhiyun.globl cpu_feroceon_suspend_size 506*4882a593Smuzhiyun.equ cpu_feroceon_suspend_size, 4 * 3 507*4882a593Smuzhiyun#ifdef CONFIG_ARM_CPU_SUSPEND 508*4882a593SmuzhiyunENTRY(cpu_feroceon_do_suspend) 509*4882a593Smuzhiyun stmfd sp!, {r4 - r6, lr} 510*4882a593Smuzhiyun mrc p15, 0, r4, c13, c0, 0 @ PID 511*4882a593Smuzhiyun mrc p15, 0, r5, c3, c0, 0 @ Domain ID 512*4882a593Smuzhiyun mrc p15, 0, r6, c1, c0, 0 @ Control register 513*4882a593Smuzhiyun stmia r0, {r4 - r6} 514*4882a593Smuzhiyun ldmfd sp!, {r4 - r6, pc} 515*4882a593SmuzhiyunENDPROC(cpu_feroceon_do_suspend) 516*4882a593Smuzhiyun 517*4882a593SmuzhiyunENTRY(cpu_feroceon_do_resume) 518*4882a593Smuzhiyun mov ip, #0 519*4882a593Smuzhiyun mcr p15, 0, ip, c8, c7, 0 @ invalidate I+D TLBs 520*4882a593Smuzhiyun mcr p15, 0, ip, c7, c7, 0 @ invalidate I+D caches 521*4882a593Smuzhiyun ldmia r0, {r4 - r6} 522*4882a593Smuzhiyun mcr p15, 0, r4, c13, c0, 0 @ PID 523*4882a593Smuzhiyun mcr p15, 0, r5, c3, c0, 0 @ Domain ID 524*4882a593Smuzhiyun mcr p15, 0, r1, c2, c0, 0 @ TTB address 525*4882a593Smuzhiyun mov r0, r6 @ control register 526*4882a593Smuzhiyun b cpu_resume_mmu 527*4882a593SmuzhiyunENDPROC(cpu_feroceon_do_resume) 528*4882a593Smuzhiyun#endif 529*4882a593Smuzhiyun 530*4882a593Smuzhiyun .type __feroceon_setup, #function 531*4882a593Smuzhiyun__feroceon_setup: 532*4882a593Smuzhiyun mov r0, #0 533*4882a593Smuzhiyun mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4 534*4882a593Smuzhiyun mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4 535*4882a593Smuzhiyun#ifdef CONFIG_MMU 536*4882a593Smuzhiyun mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4 537*4882a593Smuzhiyun#endif 538*4882a593Smuzhiyun 539*4882a593Smuzhiyun adr r5, feroceon_crval 540*4882a593Smuzhiyun ldmia r5, {r5, r6} 541*4882a593Smuzhiyun mrc p15, 0, r0, c1, c0 @ get control register v4 542*4882a593Smuzhiyun bic r0, r0, r5 543*4882a593Smuzhiyun orr r0, r0, r6 544*4882a593Smuzhiyun ret lr 545*4882a593Smuzhiyun .size __feroceon_setup, . - __feroceon_setup 546*4882a593Smuzhiyun 547*4882a593Smuzhiyun /* 548*4882a593Smuzhiyun * B 549*4882a593Smuzhiyun * R P 550*4882a593Smuzhiyun * .RVI UFRS BLDP WCAM 551*4882a593Smuzhiyun * .011 .001 ..11 0101 552*4882a593Smuzhiyun * 553*4882a593Smuzhiyun */ 554*4882a593Smuzhiyun .type feroceon_crval, #object 555*4882a593Smuzhiyunferoceon_crval: 556*4882a593Smuzhiyun crval clear=0x0000773f, mmuset=0x00003135, ucset=0x00001134 557*4882a593Smuzhiyun 558*4882a593Smuzhiyun __INITDATA 559*4882a593Smuzhiyun 560*4882a593Smuzhiyun @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) 561*4882a593Smuzhiyun define_processor_functions feroceon, dabort=v5t_early_abort, pabort=legacy_pabort 562*4882a593Smuzhiyun 563*4882a593Smuzhiyun .section ".rodata" 564*4882a593Smuzhiyun 565*4882a593Smuzhiyun string cpu_arch_name, "armv5te" 566*4882a593Smuzhiyun string cpu_elf_name, "v5" 567*4882a593Smuzhiyun string cpu_feroceon_name, "Feroceon" 568*4882a593Smuzhiyun string cpu_88fr531_name, "Feroceon 88FR531-vd" 569*4882a593Smuzhiyun string cpu_88fr571_name, "Feroceon 88FR571-vd" 570*4882a593Smuzhiyun string cpu_88fr131_name, "Feroceon 88FR131" 571*4882a593Smuzhiyun 572*4882a593Smuzhiyun .align 573*4882a593Smuzhiyun 574*4882a593Smuzhiyun .section ".proc.info.init", "a" 575*4882a593Smuzhiyun 576*4882a593Smuzhiyun.macro feroceon_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache:req 577*4882a593Smuzhiyun .type __\name\()_proc_info,#object 578*4882a593Smuzhiyun__\name\()_proc_info: 579*4882a593Smuzhiyun .long \cpu_val 580*4882a593Smuzhiyun .long \cpu_mask 581*4882a593Smuzhiyun .long PMD_TYPE_SECT | \ 582*4882a593Smuzhiyun PMD_SECT_BUFFERABLE | \ 583*4882a593Smuzhiyun PMD_SECT_CACHEABLE | \ 584*4882a593Smuzhiyun PMD_BIT4 | \ 585*4882a593Smuzhiyun PMD_SECT_AP_WRITE | \ 586*4882a593Smuzhiyun PMD_SECT_AP_READ 587*4882a593Smuzhiyun .long PMD_TYPE_SECT | \ 588*4882a593Smuzhiyun PMD_BIT4 | \ 589*4882a593Smuzhiyun PMD_SECT_AP_WRITE | \ 590*4882a593Smuzhiyun PMD_SECT_AP_READ 591*4882a593Smuzhiyun initfn __feroceon_setup, __\name\()_proc_info 592*4882a593Smuzhiyun .long cpu_arch_name 593*4882a593Smuzhiyun .long cpu_elf_name 594*4882a593Smuzhiyun .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 595*4882a593Smuzhiyun .long \cpu_name 596*4882a593Smuzhiyun .long feroceon_processor_functions 597*4882a593Smuzhiyun .long v4wbi_tlb_fns 598*4882a593Smuzhiyun .long feroceon_user_fns 599*4882a593Smuzhiyun .long \cache 600*4882a593Smuzhiyun .size __\name\()_proc_info, . - __\name\()_proc_info 601*4882a593Smuzhiyun.endm 602*4882a593Smuzhiyun 603*4882a593Smuzhiyun#ifdef CONFIG_CPU_FEROCEON_OLD_ID 604*4882a593Smuzhiyun feroceon_proc_info feroceon_old_id, 0x41009260, 0xff00fff0, \ 605*4882a593Smuzhiyun cpu_name=cpu_feroceon_name, cache=feroceon_cache_fns 606*4882a593Smuzhiyun#endif 607*4882a593Smuzhiyun 608*4882a593Smuzhiyun feroceon_proc_info 88fr531, 0x56055310, 0xfffffff0, cpu_88fr531_name, \ 609*4882a593Smuzhiyun cache=feroceon_cache_fns 610*4882a593Smuzhiyun feroceon_proc_info 88fr571, 0x56155710, 0xfffffff0, cpu_88fr571_name, \ 611*4882a593Smuzhiyun cache=feroceon_range_cache_fns 612*4882a593Smuzhiyun feroceon_proc_info 88fr131, 0x56251310, 0xfffffff0, cpu_88fr131_name, \ 613*4882a593Smuzhiyun cache=feroceon_range_cache_fns 614