1*4882a593Smuzhiyun/* SPDX-License-Identifier: GPL-2.0-or-later */ 2*4882a593Smuzhiyun/* 3*4882a593Smuzhiyun * linux/arch/arm/mm/proc-arm926.S: MMU functions for ARM926EJ-S 4*4882a593Smuzhiyun * 5*4882a593Smuzhiyun * Copyright (C) 1999-2001 ARM Limited 6*4882a593Smuzhiyun * Copyright (C) 2000 Deep Blue Solutions Ltd. 7*4882a593Smuzhiyun * hacked for non-paged-MM by Hyok S. Choi, 2003. 8*4882a593Smuzhiyun * 9*4882a593Smuzhiyun * These are the low level assembler for performing cache and TLB 10*4882a593Smuzhiyun * functions on the arm926. 11*4882a593Smuzhiyun * 12*4882a593Smuzhiyun * CONFIG_CPU_ARM926_CPU_IDLE -> nohlt 13*4882a593Smuzhiyun */ 14*4882a593Smuzhiyun#include <linux/linkage.h> 15*4882a593Smuzhiyun#include <linux/init.h> 16*4882a593Smuzhiyun#include <linux/pgtable.h> 17*4882a593Smuzhiyun#include <asm/assembler.h> 18*4882a593Smuzhiyun#include <asm/hwcap.h> 19*4882a593Smuzhiyun#include <asm/pgtable-hwdef.h> 20*4882a593Smuzhiyun#include <asm/page.h> 21*4882a593Smuzhiyun#include <asm/ptrace.h> 22*4882a593Smuzhiyun#include "proc-macros.S" 23*4882a593Smuzhiyun 24*4882a593Smuzhiyun/* 25*4882a593Smuzhiyun * This is the maximum size of an area which will be invalidated 26*4882a593Smuzhiyun * using the single invalidate entry instructions. Anything larger 27*4882a593Smuzhiyun * than this, and we go for the whole cache. 28*4882a593Smuzhiyun * 29*4882a593Smuzhiyun * This value should be chosen such that we choose the cheapest 30*4882a593Smuzhiyun * alternative. 31*4882a593Smuzhiyun */ 32*4882a593Smuzhiyun#define CACHE_DLIMIT 16384 33*4882a593Smuzhiyun 34*4882a593Smuzhiyun/* 35*4882a593Smuzhiyun * the cache line size of the I and D cache 36*4882a593Smuzhiyun */ 37*4882a593Smuzhiyun#define CACHE_DLINESIZE 32 38*4882a593Smuzhiyun 39*4882a593Smuzhiyun .text 40*4882a593Smuzhiyun/* 41*4882a593Smuzhiyun * cpu_arm926_proc_init() 42*4882a593Smuzhiyun */ 43*4882a593SmuzhiyunENTRY(cpu_arm926_proc_init) 44*4882a593Smuzhiyun ret lr 45*4882a593Smuzhiyun 46*4882a593Smuzhiyun/* 47*4882a593Smuzhiyun * cpu_arm926_proc_fin() 48*4882a593Smuzhiyun */ 49*4882a593SmuzhiyunENTRY(cpu_arm926_proc_fin) 50*4882a593Smuzhiyun mrc p15, 0, r0, c1, c0, 0 @ ctrl register 51*4882a593Smuzhiyun bic r0, r0, #0x1000 @ ...i............ 52*4882a593Smuzhiyun bic r0, r0, #0x000e @ ............wca. 53*4882a593Smuzhiyun mcr p15, 0, r0, c1, c0, 0 @ disable caches 54*4882a593Smuzhiyun ret lr 55*4882a593Smuzhiyun 56*4882a593Smuzhiyun/* 57*4882a593Smuzhiyun * cpu_arm926_reset(loc) 58*4882a593Smuzhiyun * 59*4882a593Smuzhiyun * Perform a soft reset of the system. Put the CPU into the 60*4882a593Smuzhiyun * same state as it would be if it had been reset, and branch 61*4882a593Smuzhiyun * to what would be the reset vector. 62*4882a593Smuzhiyun * 63*4882a593Smuzhiyun * loc: location to jump to for soft reset 64*4882a593Smuzhiyun */ 65*4882a593Smuzhiyun .align 5 66*4882a593Smuzhiyun .pushsection .idmap.text, "ax" 67*4882a593SmuzhiyunENTRY(cpu_arm926_reset) 68*4882a593Smuzhiyun mov ip, #0 69*4882a593Smuzhiyun mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 70*4882a593Smuzhiyun mcr p15, 0, ip, c7, c10, 4 @ drain WB 71*4882a593Smuzhiyun#ifdef CONFIG_MMU 72*4882a593Smuzhiyun mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 73*4882a593Smuzhiyun#endif 74*4882a593Smuzhiyun mrc p15, 0, ip, c1, c0, 0 @ ctrl register 75*4882a593Smuzhiyun bic ip, ip, #0x000f @ ............wcam 76*4882a593Smuzhiyun bic ip, ip, #0x1100 @ ...i...s........ 77*4882a593Smuzhiyun mcr p15, 0, ip, c1, c0, 0 @ ctrl register 78*4882a593Smuzhiyun ret r0 79*4882a593SmuzhiyunENDPROC(cpu_arm926_reset) 80*4882a593Smuzhiyun .popsection 81*4882a593Smuzhiyun 82*4882a593Smuzhiyun/* 83*4882a593Smuzhiyun * cpu_arm926_do_idle() 84*4882a593Smuzhiyun * 85*4882a593Smuzhiyun * Called with IRQs disabled 86*4882a593Smuzhiyun */ 87*4882a593Smuzhiyun .align 10 88*4882a593SmuzhiyunENTRY(cpu_arm926_do_idle) 89*4882a593Smuzhiyun mov r0, #0 90*4882a593Smuzhiyun mrc p15, 0, r1, c1, c0, 0 @ Read control register 91*4882a593Smuzhiyun mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer 92*4882a593Smuzhiyun bic r2, r1, #1 << 12 93*4882a593Smuzhiyun mrs r3, cpsr @ Disable FIQs while Icache 94*4882a593Smuzhiyun orr ip, r3, #PSR_F_BIT @ is disabled 95*4882a593Smuzhiyun msr cpsr_c, ip 96*4882a593Smuzhiyun mcr p15, 0, r2, c1, c0, 0 @ Disable I cache 97*4882a593Smuzhiyun mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 98*4882a593Smuzhiyun mcr p15, 0, r1, c1, c0, 0 @ Restore ICache enable 99*4882a593Smuzhiyun msr cpsr_c, r3 @ Restore FIQ state 100*4882a593Smuzhiyun ret lr 101*4882a593Smuzhiyun 102*4882a593Smuzhiyun/* 103*4882a593Smuzhiyun * flush_icache_all() 104*4882a593Smuzhiyun * 105*4882a593Smuzhiyun * Unconditionally clean and invalidate the entire icache. 106*4882a593Smuzhiyun */ 107*4882a593SmuzhiyunENTRY(arm926_flush_icache_all) 108*4882a593Smuzhiyun mov r0, #0 109*4882a593Smuzhiyun mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 110*4882a593Smuzhiyun ret lr 111*4882a593SmuzhiyunENDPROC(arm926_flush_icache_all) 112*4882a593Smuzhiyun 113*4882a593Smuzhiyun/* 114*4882a593Smuzhiyun * flush_user_cache_all() 115*4882a593Smuzhiyun * 116*4882a593Smuzhiyun * Clean and invalidate all cache entries in a particular 117*4882a593Smuzhiyun * address space. 118*4882a593Smuzhiyun */ 119*4882a593SmuzhiyunENTRY(arm926_flush_user_cache_all) 120*4882a593Smuzhiyun /* FALLTHROUGH */ 121*4882a593Smuzhiyun 122*4882a593Smuzhiyun/* 123*4882a593Smuzhiyun * flush_kern_cache_all() 124*4882a593Smuzhiyun * 125*4882a593Smuzhiyun * Clean and invalidate the entire cache. 126*4882a593Smuzhiyun */ 127*4882a593SmuzhiyunENTRY(arm926_flush_kern_cache_all) 128*4882a593Smuzhiyun mov r2, #VM_EXEC 129*4882a593Smuzhiyun mov ip, #0 130*4882a593Smuzhiyun__flush_whole_cache: 131*4882a593Smuzhiyun#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH 132*4882a593Smuzhiyun mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache 133*4882a593Smuzhiyun#else 134*4882a593Smuzhiyun1: mrc p15, 0, APSR_nzcv, c7, c14, 3 @ test,clean,invalidate 135*4882a593Smuzhiyun bne 1b 136*4882a593Smuzhiyun#endif 137*4882a593Smuzhiyun tst r2, #VM_EXEC 138*4882a593Smuzhiyun mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 139*4882a593Smuzhiyun mcrne p15, 0, ip, c7, c10, 4 @ drain WB 140*4882a593Smuzhiyun ret lr 141*4882a593Smuzhiyun 142*4882a593Smuzhiyun/* 143*4882a593Smuzhiyun * flush_user_cache_range(start, end, flags) 144*4882a593Smuzhiyun * 145*4882a593Smuzhiyun * Clean and invalidate a range of cache entries in the 146*4882a593Smuzhiyun * specified address range. 147*4882a593Smuzhiyun * 148*4882a593Smuzhiyun * - start - start address (inclusive) 149*4882a593Smuzhiyun * - end - end address (exclusive) 150*4882a593Smuzhiyun * - flags - vm_flags describing address space 151*4882a593Smuzhiyun */ 152*4882a593SmuzhiyunENTRY(arm926_flush_user_cache_range) 153*4882a593Smuzhiyun mov ip, #0 154*4882a593Smuzhiyun sub r3, r1, r0 @ calculate total size 155*4882a593Smuzhiyun cmp r3, #CACHE_DLIMIT 156*4882a593Smuzhiyun bgt __flush_whole_cache 157*4882a593Smuzhiyun1: tst r2, #VM_EXEC 158*4882a593Smuzhiyun#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH 159*4882a593Smuzhiyun mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry 160*4882a593Smuzhiyun mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry 161*4882a593Smuzhiyun add r0, r0, #CACHE_DLINESIZE 162*4882a593Smuzhiyun mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry 163*4882a593Smuzhiyun mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry 164*4882a593Smuzhiyun add r0, r0, #CACHE_DLINESIZE 165*4882a593Smuzhiyun#else 166*4882a593Smuzhiyun mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry 167*4882a593Smuzhiyun mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry 168*4882a593Smuzhiyun add r0, r0, #CACHE_DLINESIZE 169*4882a593Smuzhiyun mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry 170*4882a593Smuzhiyun mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry 171*4882a593Smuzhiyun add r0, r0, #CACHE_DLINESIZE 172*4882a593Smuzhiyun#endif 173*4882a593Smuzhiyun cmp r0, r1 174*4882a593Smuzhiyun blo 1b 175*4882a593Smuzhiyun tst r2, #VM_EXEC 176*4882a593Smuzhiyun mcrne p15, 0, ip, c7, c10, 4 @ drain WB 177*4882a593Smuzhiyun ret lr 178*4882a593Smuzhiyun 179*4882a593Smuzhiyun/* 180*4882a593Smuzhiyun * coherent_kern_range(start, end) 181*4882a593Smuzhiyun * 182*4882a593Smuzhiyun * Ensure coherency between the Icache and the Dcache in the 183*4882a593Smuzhiyun * region described by start, end. If you have non-snooping 184*4882a593Smuzhiyun * Harvard caches, you need to implement this function. 185*4882a593Smuzhiyun * 186*4882a593Smuzhiyun * - start - virtual start address 187*4882a593Smuzhiyun * - end - virtual end address 188*4882a593Smuzhiyun */ 189*4882a593SmuzhiyunENTRY(arm926_coherent_kern_range) 190*4882a593Smuzhiyun /* FALLTHROUGH */ 191*4882a593Smuzhiyun 192*4882a593Smuzhiyun/* 193*4882a593Smuzhiyun * coherent_user_range(start, end) 194*4882a593Smuzhiyun * 195*4882a593Smuzhiyun * Ensure coherency between the Icache and the Dcache in the 196*4882a593Smuzhiyun * region described by start, end. If you have non-snooping 197*4882a593Smuzhiyun * Harvard caches, you need to implement this function. 198*4882a593Smuzhiyun * 199*4882a593Smuzhiyun * - start - virtual start address 200*4882a593Smuzhiyun * - end - virtual end address 201*4882a593Smuzhiyun */ 202*4882a593SmuzhiyunENTRY(arm926_coherent_user_range) 203*4882a593Smuzhiyun bic r0, r0, #CACHE_DLINESIZE - 1 204*4882a593Smuzhiyun1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 205*4882a593Smuzhiyun mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry 206*4882a593Smuzhiyun add r0, r0, #CACHE_DLINESIZE 207*4882a593Smuzhiyun cmp r0, r1 208*4882a593Smuzhiyun blo 1b 209*4882a593Smuzhiyun mcr p15, 0, r0, c7, c10, 4 @ drain WB 210*4882a593Smuzhiyun mov r0, #0 211*4882a593Smuzhiyun ret lr 212*4882a593Smuzhiyun 213*4882a593Smuzhiyun/* 214*4882a593Smuzhiyun * flush_kern_dcache_area(void *addr, size_t size) 215*4882a593Smuzhiyun * 216*4882a593Smuzhiyun * Ensure no D cache aliasing occurs, either with itself or 217*4882a593Smuzhiyun * the I cache 218*4882a593Smuzhiyun * 219*4882a593Smuzhiyun * - addr - kernel address 220*4882a593Smuzhiyun * - size - region size 221*4882a593Smuzhiyun */ 222*4882a593SmuzhiyunENTRY(arm926_flush_kern_dcache_area) 223*4882a593Smuzhiyun add r1, r0, r1 224*4882a593Smuzhiyun1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 225*4882a593Smuzhiyun add r0, r0, #CACHE_DLINESIZE 226*4882a593Smuzhiyun cmp r0, r1 227*4882a593Smuzhiyun blo 1b 228*4882a593Smuzhiyun mov r0, #0 229*4882a593Smuzhiyun mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 230*4882a593Smuzhiyun mcr p15, 0, r0, c7, c10, 4 @ drain WB 231*4882a593Smuzhiyun ret lr 232*4882a593Smuzhiyun 233*4882a593Smuzhiyun/* 234*4882a593Smuzhiyun * dma_inv_range(start, end) 235*4882a593Smuzhiyun * 236*4882a593Smuzhiyun * Invalidate (discard) the specified virtual address range. 237*4882a593Smuzhiyun * May not write back any entries. If 'start' or 'end' 238*4882a593Smuzhiyun * are not cache line aligned, those lines must be written 239*4882a593Smuzhiyun * back. 240*4882a593Smuzhiyun * 241*4882a593Smuzhiyun * - start - virtual start address 242*4882a593Smuzhiyun * - end - virtual end address 243*4882a593Smuzhiyun * 244*4882a593Smuzhiyun * (same as v4wb) 245*4882a593Smuzhiyun */ 246*4882a593Smuzhiyunarm926_dma_inv_range: 247*4882a593Smuzhiyun#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 248*4882a593Smuzhiyun tst r0, #CACHE_DLINESIZE - 1 249*4882a593Smuzhiyun mcrne p15, 0, r0, c7, c10, 1 @ clean D entry 250*4882a593Smuzhiyun tst r1, #CACHE_DLINESIZE - 1 251*4882a593Smuzhiyun mcrne p15, 0, r1, c7, c10, 1 @ clean D entry 252*4882a593Smuzhiyun#endif 253*4882a593Smuzhiyun bic r0, r0, #CACHE_DLINESIZE - 1 254*4882a593Smuzhiyun1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry 255*4882a593Smuzhiyun add r0, r0, #CACHE_DLINESIZE 256*4882a593Smuzhiyun cmp r0, r1 257*4882a593Smuzhiyun blo 1b 258*4882a593Smuzhiyun mcr p15, 0, r0, c7, c10, 4 @ drain WB 259*4882a593Smuzhiyun ret lr 260*4882a593Smuzhiyun 261*4882a593Smuzhiyun/* 262*4882a593Smuzhiyun * dma_clean_range(start, end) 263*4882a593Smuzhiyun * 264*4882a593Smuzhiyun * Clean the specified virtual address range. 265*4882a593Smuzhiyun * 266*4882a593Smuzhiyun * - start - virtual start address 267*4882a593Smuzhiyun * - end - virtual end address 268*4882a593Smuzhiyun * 269*4882a593Smuzhiyun * (same as v4wb) 270*4882a593Smuzhiyun */ 271*4882a593Smuzhiyunarm926_dma_clean_range: 272*4882a593Smuzhiyun#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 273*4882a593Smuzhiyun bic r0, r0, #CACHE_DLINESIZE - 1 274*4882a593Smuzhiyun1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 275*4882a593Smuzhiyun add r0, r0, #CACHE_DLINESIZE 276*4882a593Smuzhiyun cmp r0, r1 277*4882a593Smuzhiyun blo 1b 278*4882a593Smuzhiyun#endif 279*4882a593Smuzhiyun mcr p15, 0, r0, c7, c10, 4 @ drain WB 280*4882a593Smuzhiyun ret lr 281*4882a593Smuzhiyun 282*4882a593Smuzhiyun/* 283*4882a593Smuzhiyun * dma_flush_range(start, end) 284*4882a593Smuzhiyun * 285*4882a593Smuzhiyun * Clean and invalidate the specified virtual address range. 286*4882a593Smuzhiyun * 287*4882a593Smuzhiyun * - start - virtual start address 288*4882a593Smuzhiyun * - end - virtual end address 289*4882a593Smuzhiyun */ 290*4882a593SmuzhiyunENTRY(arm926_dma_flush_range) 291*4882a593Smuzhiyun bic r0, r0, #CACHE_DLINESIZE - 1 292*4882a593Smuzhiyun1: 293*4882a593Smuzhiyun#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 294*4882a593Smuzhiyun mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 295*4882a593Smuzhiyun#else 296*4882a593Smuzhiyun mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry 297*4882a593Smuzhiyun#endif 298*4882a593Smuzhiyun add r0, r0, #CACHE_DLINESIZE 299*4882a593Smuzhiyun cmp r0, r1 300*4882a593Smuzhiyun blo 1b 301*4882a593Smuzhiyun mcr p15, 0, r0, c7, c10, 4 @ drain WB 302*4882a593Smuzhiyun ret lr 303*4882a593Smuzhiyun 304*4882a593Smuzhiyun/* 305*4882a593Smuzhiyun * dma_map_area(start, size, dir) 306*4882a593Smuzhiyun * - start - kernel virtual start address 307*4882a593Smuzhiyun * - size - size of region 308*4882a593Smuzhiyun * - dir - DMA direction 309*4882a593Smuzhiyun */ 310*4882a593SmuzhiyunENTRY(arm926_dma_map_area) 311*4882a593Smuzhiyun add r1, r1, r0 312*4882a593Smuzhiyun cmp r2, #DMA_TO_DEVICE 313*4882a593Smuzhiyun beq arm926_dma_clean_range 314*4882a593Smuzhiyun bcs arm926_dma_inv_range 315*4882a593Smuzhiyun b arm926_dma_flush_range 316*4882a593SmuzhiyunENDPROC(arm926_dma_map_area) 317*4882a593Smuzhiyun 318*4882a593Smuzhiyun/* 319*4882a593Smuzhiyun * dma_unmap_area(start, size, dir) 320*4882a593Smuzhiyun * - start - kernel virtual start address 321*4882a593Smuzhiyun * - size - size of region 322*4882a593Smuzhiyun * - dir - DMA direction 323*4882a593Smuzhiyun */ 324*4882a593SmuzhiyunENTRY(arm926_dma_unmap_area) 325*4882a593Smuzhiyun ret lr 326*4882a593SmuzhiyunENDPROC(arm926_dma_unmap_area) 327*4882a593Smuzhiyun 328*4882a593Smuzhiyun .globl arm926_flush_kern_cache_louis 329*4882a593Smuzhiyun .equ arm926_flush_kern_cache_louis, arm926_flush_kern_cache_all 330*4882a593Smuzhiyun 331*4882a593Smuzhiyun @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 332*4882a593Smuzhiyun define_cache_functions arm926 333*4882a593Smuzhiyun 334*4882a593SmuzhiyunENTRY(cpu_arm926_dcache_clean_area) 335*4882a593Smuzhiyun#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 336*4882a593Smuzhiyun1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 337*4882a593Smuzhiyun add r0, r0, #CACHE_DLINESIZE 338*4882a593Smuzhiyun subs r1, r1, #CACHE_DLINESIZE 339*4882a593Smuzhiyun bhi 1b 340*4882a593Smuzhiyun#endif 341*4882a593Smuzhiyun mcr p15, 0, r0, c7, c10, 4 @ drain WB 342*4882a593Smuzhiyun ret lr 343*4882a593Smuzhiyun 344*4882a593Smuzhiyun/* =============================== PageTable ============================== */ 345*4882a593Smuzhiyun 346*4882a593Smuzhiyun/* 347*4882a593Smuzhiyun * cpu_arm926_switch_mm(pgd) 348*4882a593Smuzhiyun * 349*4882a593Smuzhiyun * Set the translation base pointer to be as described by pgd. 350*4882a593Smuzhiyun * 351*4882a593Smuzhiyun * pgd: new page tables 352*4882a593Smuzhiyun */ 353*4882a593Smuzhiyun .align 5 354*4882a593SmuzhiyunENTRY(cpu_arm926_switch_mm) 355*4882a593Smuzhiyun#ifdef CONFIG_MMU 356*4882a593Smuzhiyun mov ip, #0 357*4882a593Smuzhiyun#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH 358*4882a593Smuzhiyun mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache 359*4882a593Smuzhiyun#else 360*4882a593Smuzhiyun@ && 'Clean & Invalidate whole DCache' 361*4882a593Smuzhiyun1: mrc p15, 0, APSR_nzcv, c7, c14, 3 @ test,clean,invalidate 362*4882a593Smuzhiyun bne 1b 363*4882a593Smuzhiyun#endif 364*4882a593Smuzhiyun mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache 365*4882a593Smuzhiyun mcr p15, 0, ip, c7, c10, 4 @ drain WB 366*4882a593Smuzhiyun mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 367*4882a593Smuzhiyun mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 368*4882a593Smuzhiyun#endif 369*4882a593Smuzhiyun ret lr 370*4882a593Smuzhiyun 371*4882a593Smuzhiyun/* 372*4882a593Smuzhiyun * cpu_arm926_set_pte_ext(ptep, pte, ext) 373*4882a593Smuzhiyun * 374*4882a593Smuzhiyun * Set a PTE and flush it out 375*4882a593Smuzhiyun */ 376*4882a593Smuzhiyun .align 5 377*4882a593SmuzhiyunENTRY(cpu_arm926_set_pte_ext) 378*4882a593Smuzhiyun#ifdef CONFIG_MMU 379*4882a593Smuzhiyun armv3_set_pte_ext 380*4882a593Smuzhiyun mov r0, r0 381*4882a593Smuzhiyun#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 382*4882a593Smuzhiyun mcr p15, 0, r0, c7, c10, 1 @ clean D entry 383*4882a593Smuzhiyun#endif 384*4882a593Smuzhiyun mcr p15, 0, r0, c7, c10, 4 @ drain WB 385*4882a593Smuzhiyun#endif 386*4882a593Smuzhiyun ret lr 387*4882a593Smuzhiyun 388*4882a593Smuzhiyun/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ 389*4882a593Smuzhiyun.globl cpu_arm926_suspend_size 390*4882a593Smuzhiyun.equ cpu_arm926_suspend_size, 4 * 3 391*4882a593Smuzhiyun#ifdef CONFIG_ARM_CPU_SUSPEND 392*4882a593SmuzhiyunENTRY(cpu_arm926_do_suspend) 393*4882a593Smuzhiyun stmfd sp!, {r4 - r6, lr} 394*4882a593Smuzhiyun mrc p15, 0, r4, c13, c0, 0 @ PID 395*4882a593Smuzhiyun mrc p15, 0, r5, c3, c0, 0 @ Domain ID 396*4882a593Smuzhiyun mrc p15, 0, r6, c1, c0, 0 @ Control register 397*4882a593Smuzhiyun stmia r0, {r4 - r6} 398*4882a593Smuzhiyun ldmfd sp!, {r4 - r6, pc} 399*4882a593SmuzhiyunENDPROC(cpu_arm926_do_suspend) 400*4882a593Smuzhiyun 401*4882a593SmuzhiyunENTRY(cpu_arm926_do_resume) 402*4882a593Smuzhiyun mov ip, #0 403*4882a593Smuzhiyun mcr p15, 0, ip, c8, c7, 0 @ invalidate I+D TLBs 404*4882a593Smuzhiyun mcr p15, 0, ip, c7, c7, 0 @ invalidate I+D caches 405*4882a593Smuzhiyun ldmia r0, {r4 - r6} 406*4882a593Smuzhiyun mcr p15, 0, r4, c13, c0, 0 @ PID 407*4882a593Smuzhiyun mcr p15, 0, r5, c3, c0, 0 @ Domain ID 408*4882a593Smuzhiyun mcr p15, 0, r1, c2, c0, 0 @ TTB address 409*4882a593Smuzhiyun mov r0, r6 @ control register 410*4882a593Smuzhiyun b cpu_resume_mmu 411*4882a593SmuzhiyunENDPROC(cpu_arm926_do_resume) 412*4882a593Smuzhiyun#endif 413*4882a593Smuzhiyun 414*4882a593Smuzhiyun .type __arm926_setup, #function 415*4882a593Smuzhiyun__arm926_setup: 416*4882a593Smuzhiyun mov r0, #0 417*4882a593Smuzhiyun mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4 418*4882a593Smuzhiyun mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4 419*4882a593Smuzhiyun#ifdef CONFIG_MMU 420*4882a593Smuzhiyun mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4 421*4882a593Smuzhiyun#endif 422*4882a593Smuzhiyun 423*4882a593Smuzhiyun 424*4882a593Smuzhiyun#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH 425*4882a593Smuzhiyun mov r0, #4 @ disable write-back on caches explicitly 426*4882a593Smuzhiyun mcr p15, 7, r0, c15, c0, 0 427*4882a593Smuzhiyun#endif 428*4882a593Smuzhiyun 429*4882a593Smuzhiyun adr r5, arm926_crval 430*4882a593Smuzhiyun ldmia r5, {r5, r6} 431*4882a593Smuzhiyun mrc p15, 0, r0, c1, c0 @ get control register v4 432*4882a593Smuzhiyun bic r0, r0, r5 433*4882a593Smuzhiyun orr r0, r0, r6 434*4882a593Smuzhiyun#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN 435*4882a593Smuzhiyun orr r0, r0, #0x4000 @ .1.. .... .... .... 436*4882a593Smuzhiyun#endif 437*4882a593Smuzhiyun ret lr 438*4882a593Smuzhiyun .size __arm926_setup, . - __arm926_setup 439*4882a593Smuzhiyun 440*4882a593Smuzhiyun /* 441*4882a593Smuzhiyun * R 442*4882a593Smuzhiyun * .RVI ZFRS BLDP WCAM 443*4882a593Smuzhiyun * .011 0001 ..11 0101 444*4882a593Smuzhiyun * 445*4882a593Smuzhiyun */ 446*4882a593Smuzhiyun .type arm926_crval, #object 447*4882a593Smuzhiyunarm926_crval: 448*4882a593Smuzhiyun crval clear=0x00007f3f, mmuset=0x00003135, ucset=0x00001134 449*4882a593Smuzhiyun 450*4882a593Smuzhiyun __INITDATA 451*4882a593Smuzhiyun 452*4882a593Smuzhiyun @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) 453*4882a593Smuzhiyun define_processor_functions arm926, dabort=v5tj_early_abort, pabort=legacy_pabort, suspend=1 454*4882a593Smuzhiyun 455*4882a593Smuzhiyun .section ".rodata" 456*4882a593Smuzhiyun 457*4882a593Smuzhiyun string cpu_arch_name, "armv5tej" 458*4882a593Smuzhiyun string cpu_elf_name, "v5" 459*4882a593Smuzhiyun string cpu_arm926_name, "ARM926EJ-S" 460*4882a593Smuzhiyun 461*4882a593Smuzhiyun .align 462*4882a593Smuzhiyun 463*4882a593Smuzhiyun .section ".proc.info.init", "a" 464*4882a593Smuzhiyun 465*4882a593Smuzhiyun .type __arm926_proc_info,#object 466*4882a593Smuzhiyun__arm926_proc_info: 467*4882a593Smuzhiyun .long 0x41069260 @ ARM926EJ-S (v5TEJ) 468*4882a593Smuzhiyun .long 0xff0ffff0 469*4882a593Smuzhiyun .long PMD_TYPE_SECT | \ 470*4882a593Smuzhiyun PMD_SECT_BUFFERABLE | \ 471*4882a593Smuzhiyun PMD_SECT_CACHEABLE | \ 472*4882a593Smuzhiyun PMD_BIT4 | \ 473*4882a593Smuzhiyun PMD_SECT_AP_WRITE | \ 474*4882a593Smuzhiyun PMD_SECT_AP_READ 475*4882a593Smuzhiyun .long PMD_TYPE_SECT | \ 476*4882a593Smuzhiyun PMD_BIT4 | \ 477*4882a593Smuzhiyun PMD_SECT_AP_WRITE | \ 478*4882a593Smuzhiyun PMD_SECT_AP_READ 479*4882a593Smuzhiyun initfn __arm926_setup, __arm926_proc_info 480*4882a593Smuzhiyun .long cpu_arch_name 481*4882a593Smuzhiyun .long cpu_elf_name 482*4882a593Smuzhiyun .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_JAVA 483*4882a593Smuzhiyun .long cpu_arm926_name 484*4882a593Smuzhiyun .long arm926_processor_functions 485*4882a593Smuzhiyun .long v4wbi_tlb_fns 486*4882a593Smuzhiyun .long v4wb_user_fns 487*4882a593Smuzhiyun .long arm926_cache_fns 488*4882a593Smuzhiyun .size __arm926_proc_info, . - __arm926_proc_info 489