1*4882a593Smuzhiyun/* SPDX-License-Identifier: GPL-2.0-or-later */ 2*4882a593Smuzhiyun/* 3*4882a593Smuzhiyun * linux/arch/arm/mm/proc-arm1022.S: MMU functions for ARM1022E 4*4882a593Smuzhiyun * 5*4882a593Smuzhiyun * Copyright (C) 2000 ARM Limited 6*4882a593Smuzhiyun * Copyright (C) 2000 Deep Blue Solutions Ltd. 7*4882a593Smuzhiyun * hacked for non-paged-MM by Hyok S. Choi, 2003. 8*4882a593Smuzhiyun * 9*4882a593Smuzhiyun * These are the low level assembler for performing cache and TLB 10*4882a593Smuzhiyun * functions on the ARM1022E. 11*4882a593Smuzhiyun */ 12*4882a593Smuzhiyun#include <linux/linkage.h> 13*4882a593Smuzhiyun#include <linux/init.h> 14*4882a593Smuzhiyun#include <linux/pgtable.h> 15*4882a593Smuzhiyun#include <asm/assembler.h> 16*4882a593Smuzhiyun#include <asm/asm-offsets.h> 17*4882a593Smuzhiyun#include <asm/hwcap.h> 18*4882a593Smuzhiyun#include <asm/pgtable-hwdef.h> 19*4882a593Smuzhiyun#include <asm/ptrace.h> 20*4882a593Smuzhiyun 21*4882a593Smuzhiyun#include "proc-macros.S" 22*4882a593Smuzhiyun 23*4882a593Smuzhiyun/* 24*4882a593Smuzhiyun * This is the maximum size of an area which will be invalidated 25*4882a593Smuzhiyun * using the single invalidate entry instructions. Anything larger 26*4882a593Smuzhiyun * than this, and we go for the whole cache. 27*4882a593Smuzhiyun * 28*4882a593Smuzhiyun * This value should be chosen such that we choose the cheapest 29*4882a593Smuzhiyun * alternative. 30*4882a593Smuzhiyun */ 31*4882a593Smuzhiyun#define MAX_AREA_SIZE 32768 32*4882a593Smuzhiyun 33*4882a593Smuzhiyun/* 34*4882a593Smuzhiyun * The size of one data cache line. 35*4882a593Smuzhiyun */ 36*4882a593Smuzhiyun#define CACHE_DLINESIZE 32 37*4882a593Smuzhiyun 38*4882a593Smuzhiyun/* 39*4882a593Smuzhiyun * The number of data cache segments. 40*4882a593Smuzhiyun */ 41*4882a593Smuzhiyun#define CACHE_DSEGMENTS 16 42*4882a593Smuzhiyun 43*4882a593Smuzhiyun/* 44*4882a593Smuzhiyun * The number of lines in a cache segment. 45*4882a593Smuzhiyun */ 46*4882a593Smuzhiyun#define CACHE_DENTRIES 64 47*4882a593Smuzhiyun 48*4882a593Smuzhiyun/* 49*4882a593Smuzhiyun * This is the size at which it becomes more efficient to 50*4882a593Smuzhiyun * clean the whole cache, rather than using the individual 51*4882a593Smuzhiyun * cache line maintenance instructions. 52*4882a593Smuzhiyun */ 53*4882a593Smuzhiyun#define CACHE_DLIMIT 32768 54*4882a593Smuzhiyun 55*4882a593Smuzhiyun .text 56*4882a593Smuzhiyun/* 57*4882a593Smuzhiyun * cpu_arm1022_proc_init() 58*4882a593Smuzhiyun */ 59*4882a593SmuzhiyunENTRY(cpu_arm1022_proc_init) 60*4882a593Smuzhiyun ret lr 61*4882a593Smuzhiyun 62*4882a593Smuzhiyun/* 63*4882a593Smuzhiyun * cpu_arm1022_proc_fin() 64*4882a593Smuzhiyun */ 65*4882a593SmuzhiyunENTRY(cpu_arm1022_proc_fin) 66*4882a593Smuzhiyun mrc p15, 0, r0, c1, c0, 0 @ ctrl register 67*4882a593Smuzhiyun bic r0, r0, #0x1000 @ ...i............ 68*4882a593Smuzhiyun bic r0, r0, #0x000e @ ............wca. 69*4882a593Smuzhiyun mcr p15, 0, r0, c1, c0, 0 @ disable caches 70*4882a593Smuzhiyun ret lr 71*4882a593Smuzhiyun 72*4882a593Smuzhiyun/* 73*4882a593Smuzhiyun * cpu_arm1022_reset(loc) 74*4882a593Smuzhiyun * 75*4882a593Smuzhiyun * Perform a soft reset of the system. Put the CPU into the 76*4882a593Smuzhiyun * same state as it would be if it had been reset, and branch 77*4882a593Smuzhiyun * to what would be the reset vector. 78*4882a593Smuzhiyun * 79*4882a593Smuzhiyun * loc: location to jump to for soft reset 80*4882a593Smuzhiyun */ 81*4882a593Smuzhiyun .align 5 82*4882a593Smuzhiyun .pushsection .idmap.text, "ax" 83*4882a593SmuzhiyunENTRY(cpu_arm1022_reset) 84*4882a593Smuzhiyun mov ip, #0 85*4882a593Smuzhiyun mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 86*4882a593Smuzhiyun mcr p15, 0, ip, c7, c10, 4 @ drain WB 87*4882a593Smuzhiyun#ifdef CONFIG_MMU 88*4882a593Smuzhiyun mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 89*4882a593Smuzhiyun#endif 90*4882a593Smuzhiyun mrc p15, 0, ip, c1, c0, 0 @ ctrl register 91*4882a593Smuzhiyun bic ip, ip, #0x000f @ ............wcam 92*4882a593Smuzhiyun bic ip, ip, #0x1100 @ ...i...s........ 93*4882a593Smuzhiyun mcr p15, 0, ip, c1, c0, 0 @ ctrl register 94*4882a593Smuzhiyun ret r0 95*4882a593SmuzhiyunENDPROC(cpu_arm1022_reset) 96*4882a593Smuzhiyun .popsection 97*4882a593Smuzhiyun 98*4882a593Smuzhiyun/* 99*4882a593Smuzhiyun * cpu_arm1022_do_idle() 100*4882a593Smuzhiyun */ 101*4882a593Smuzhiyun .align 5 102*4882a593SmuzhiyunENTRY(cpu_arm1022_do_idle) 103*4882a593Smuzhiyun mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 104*4882a593Smuzhiyun ret lr 105*4882a593Smuzhiyun 106*4882a593Smuzhiyun/* ================================= CACHE ================================ */ 107*4882a593Smuzhiyun 108*4882a593Smuzhiyun .align 5 109*4882a593Smuzhiyun 110*4882a593Smuzhiyun/* 111*4882a593Smuzhiyun * flush_icache_all() 112*4882a593Smuzhiyun * 113*4882a593Smuzhiyun * Unconditionally clean and invalidate the entire icache. 114*4882a593Smuzhiyun */ 115*4882a593SmuzhiyunENTRY(arm1022_flush_icache_all) 116*4882a593Smuzhiyun#ifndef CONFIG_CPU_ICACHE_DISABLE 117*4882a593Smuzhiyun mov r0, #0 118*4882a593Smuzhiyun mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 119*4882a593Smuzhiyun#endif 120*4882a593Smuzhiyun ret lr 121*4882a593SmuzhiyunENDPROC(arm1022_flush_icache_all) 122*4882a593Smuzhiyun 123*4882a593Smuzhiyun/* 124*4882a593Smuzhiyun * flush_user_cache_all() 125*4882a593Smuzhiyun * 126*4882a593Smuzhiyun * Invalidate all cache entries in a particular address 127*4882a593Smuzhiyun * space. 128*4882a593Smuzhiyun */ 129*4882a593SmuzhiyunENTRY(arm1022_flush_user_cache_all) 130*4882a593Smuzhiyun /* FALLTHROUGH */ 131*4882a593Smuzhiyun/* 132*4882a593Smuzhiyun * flush_kern_cache_all() 133*4882a593Smuzhiyun * 134*4882a593Smuzhiyun * Clean and invalidate the entire cache. 135*4882a593Smuzhiyun */ 136*4882a593SmuzhiyunENTRY(arm1022_flush_kern_cache_all) 137*4882a593Smuzhiyun mov r2, #VM_EXEC 138*4882a593Smuzhiyun mov ip, #0 139*4882a593Smuzhiyun__flush_whole_cache: 140*4882a593Smuzhiyun#ifndef CONFIG_CPU_DCACHE_DISABLE 141*4882a593Smuzhiyun mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 16 segments 142*4882a593Smuzhiyun1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries 143*4882a593Smuzhiyun2: mcr p15, 0, r3, c7, c14, 2 @ clean+invalidate D index 144*4882a593Smuzhiyun subs r3, r3, #1 << 26 145*4882a593Smuzhiyun bcs 2b @ entries 63 to 0 146*4882a593Smuzhiyun subs r1, r1, #1 << 5 147*4882a593Smuzhiyun bcs 1b @ segments 15 to 0 148*4882a593Smuzhiyun#endif 149*4882a593Smuzhiyun tst r2, #VM_EXEC 150*4882a593Smuzhiyun#ifndef CONFIG_CPU_ICACHE_DISABLE 151*4882a593Smuzhiyun mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 152*4882a593Smuzhiyun#endif 153*4882a593Smuzhiyun mcrne p15, 0, ip, c7, c10, 4 @ drain WB 154*4882a593Smuzhiyun ret lr 155*4882a593Smuzhiyun 156*4882a593Smuzhiyun/* 157*4882a593Smuzhiyun * flush_user_cache_range(start, end, flags) 158*4882a593Smuzhiyun * 159*4882a593Smuzhiyun * Invalidate a range of cache entries in the specified 160*4882a593Smuzhiyun * address space. 161*4882a593Smuzhiyun * 162*4882a593Smuzhiyun * - start - start address (inclusive) 163*4882a593Smuzhiyun * - end - end address (exclusive) 164*4882a593Smuzhiyun * - flags - vm_flags for this space 165*4882a593Smuzhiyun */ 166*4882a593SmuzhiyunENTRY(arm1022_flush_user_cache_range) 167*4882a593Smuzhiyun mov ip, #0 168*4882a593Smuzhiyun sub r3, r1, r0 @ calculate total size 169*4882a593Smuzhiyun cmp r3, #CACHE_DLIMIT 170*4882a593Smuzhiyun bhs __flush_whole_cache 171*4882a593Smuzhiyun 172*4882a593Smuzhiyun#ifndef CONFIG_CPU_DCACHE_DISABLE 173*4882a593Smuzhiyun1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 174*4882a593Smuzhiyun add r0, r0, #CACHE_DLINESIZE 175*4882a593Smuzhiyun cmp r0, r1 176*4882a593Smuzhiyun blo 1b 177*4882a593Smuzhiyun#endif 178*4882a593Smuzhiyun tst r2, #VM_EXEC 179*4882a593Smuzhiyun#ifndef CONFIG_CPU_ICACHE_DISABLE 180*4882a593Smuzhiyun mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 181*4882a593Smuzhiyun#endif 182*4882a593Smuzhiyun mcrne p15, 0, ip, c7, c10, 4 @ drain WB 183*4882a593Smuzhiyun ret lr 184*4882a593Smuzhiyun 185*4882a593Smuzhiyun/* 186*4882a593Smuzhiyun * coherent_kern_range(start, end) 187*4882a593Smuzhiyun * 188*4882a593Smuzhiyun * Ensure coherency between the Icache and the Dcache in the 189*4882a593Smuzhiyun * region described by start. If you have non-snooping 190*4882a593Smuzhiyun * Harvard caches, you need to implement this function. 191*4882a593Smuzhiyun * 192*4882a593Smuzhiyun * - start - virtual start address 193*4882a593Smuzhiyun * - end - virtual end address 194*4882a593Smuzhiyun */ 195*4882a593SmuzhiyunENTRY(arm1022_coherent_kern_range) 196*4882a593Smuzhiyun /* FALLTHROUGH */ 197*4882a593Smuzhiyun 198*4882a593Smuzhiyun/* 199*4882a593Smuzhiyun * coherent_user_range(start, end) 200*4882a593Smuzhiyun * 201*4882a593Smuzhiyun * Ensure coherency between the Icache and the Dcache in the 202*4882a593Smuzhiyun * region described by start. If you have non-snooping 203*4882a593Smuzhiyun * Harvard caches, you need to implement this function. 204*4882a593Smuzhiyun * 205*4882a593Smuzhiyun * - start - virtual start address 206*4882a593Smuzhiyun * - end - virtual end address 207*4882a593Smuzhiyun */ 208*4882a593SmuzhiyunENTRY(arm1022_coherent_user_range) 209*4882a593Smuzhiyun mov ip, #0 210*4882a593Smuzhiyun bic r0, r0, #CACHE_DLINESIZE - 1 211*4882a593Smuzhiyun1: 212*4882a593Smuzhiyun#ifndef CONFIG_CPU_DCACHE_DISABLE 213*4882a593Smuzhiyun mcr p15, 0, r0, c7, c10, 1 @ clean D entry 214*4882a593Smuzhiyun#endif 215*4882a593Smuzhiyun#ifndef CONFIG_CPU_ICACHE_DISABLE 216*4882a593Smuzhiyun mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry 217*4882a593Smuzhiyun#endif 218*4882a593Smuzhiyun add r0, r0, #CACHE_DLINESIZE 219*4882a593Smuzhiyun cmp r0, r1 220*4882a593Smuzhiyun blo 1b 221*4882a593Smuzhiyun mcr p15, 0, ip, c7, c10, 4 @ drain WB 222*4882a593Smuzhiyun mov r0, #0 223*4882a593Smuzhiyun ret lr 224*4882a593Smuzhiyun 225*4882a593Smuzhiyun/* 226*4882a593Smuzhiyun * flush_kern_dcache_area(void *addr, size_t size) 227*4882a593Smuzhiyun * 228*4882a593Smuzhiyun * Ensure no D cache aliasing occurs, either with itself or 229*4882a593Smuzhiyun * the I cache 230*4882a593Smuzhiyun * 231*4882a593Smuzhiyun * - addr - kernel address 232*4882a593Smuzhiyun * - size - region size 233*4882a593Smuzhiyun */ 234*4882a593SmuzhiyunENTRY(arm1022_flush_kern_dcache_area) 235*4882a593Smuzhiyun mov ip, #0 236*4882a593Smuzhiyun#ifndef CONFIG_CPU_DCACHE_DISABLE 237*4882a593Smuzhiyun add r1, r0, r1 238*4882a593Smuzhiyun1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 239*4882a593Smuzhiyun add r0, r0, #CACHE_DLINESIZE 240*4882a593Smuzhiyun cmp r0, r1 241*4882a593Smuzhiyun blo 1b 242*4882a593Smuzhiyun#endif 243*4882a593Smuzhiyun mcr p15, 0, ip, c7, c10, 4 @ drain WB 244*4882a593Smuzhiyun ret lr 245*4882a593Smuzhiyun 246*4882a593Smuzhiyun/* 247*4882a593Smuzhiyun * dma_inv_range(start, end) 248*4882a593Smuzhiyun * 249*4882a593Smuzhiyun * Invalidate (discard) the specified virtual address range. 250*4882a593Smuzhiyun * May not write back any entries. If 'start' or 'end' 251*4882a593Smuzhiyun * are not cache line aligned, those lines must be written 252*4882a593Smuzhiyun * back. 253*4882a593Smuzhiyun * 254*4882a593Smuzhiyun * - start - virtual start address 255*4882a593Smuzhiyun * - end - virtual end address 256*4882a593Smuzhiyun * 257*4882a593Smuzhiyun * (same as v4wb) 258*4882a593Smuzhiyun */ 259*4882a593Smuzhiyunarm1022_dma_inv_range: 260*4882a593Smuzhiyun mov ip, #0 261*4882a593Smuzhiyun#ifndef CONFIG_CPU_DCACHE_DISABLE 262*4882a593Smuzhiyun tst r0, #CACHE_DLINESIZE - 1 263*4882a593Smuzhiyun bic r0, r0, #CACHE_DLINESIZE - 1 264*4882a593Smuzhiyun mcrne p15, 0, r0, c7, c10, 1 @ clean D entry 265*4882a593Smuzhiyun tst r1, #CACHE_DLINESIZE - 1 266*4882a593Smuzhiyun mcrne p15, 0, r1, c7, c10, 1 @ clean D entry 267*4882a593Smuzhiyun1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry 268*4882a593Smuzhiyun add r0, r0, #CACHE_DLINESIZE 269*4882a593Smuzhiyun cmp r0, r1 270*4882a593Smuzhiyun blo 1b 271*4882a593Smuzhiyun#endif 272*4882a593Smuzhiyun mcr p15, 0, ip, c7, c10, 4 @ drain WB 273*4882a593Smuzhiyun ret lr 274*4882a593Smuzhiyun 275*4882a593Smuzhiyun/* 276*4882a593Smuzhiyun * dma_clean_range(start, end) 277*4882a593Smuzhiyun * 278*4882a593Smuzhiyun * Clean the specified virtual address range. 279*4882a593Smuzhiyun * 280*4882a593Smuzhiyun * - start - virtual start address 281*4882a593Smuzhiyun * - end - virtual end address 282*4882a593Smuzhiyun * 283*4882a593Smuzhiyun * (same as v4wb) 284*4882a593Smuzhiyun */ 285*4882a593Smuzhiyunarm1022_dma_clean_range: 286*4882a593Smuzhiyun mov ip, #0 287*4882a593Smuzhiyun#ifndef CONFIG_CPU_DCACHE_DISABLE 288*4882a593Smuzhiyun bic r0, r0, #CACHE_DLINESIZE - 1 289*4882a593Smuzhiyun1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 290*4882a593Smuzhiyun add r0, r0, #CACHE_DLINESIZE 291*4882a593Smuzhiyun cmp r0, r1 292*4882a593Smuzhiyun blo 1b 293*4882a593Smuzhiyun#endif 294*4882a593Smuzhiyun mcr p15, 0, ip, c7, c10, 4 @ drain WB 295*4882a593Smuzhiyun ret lr 296*4882a593Smuzhiyun 297*4882a593Smuzhiyun/* 298*4882a593Smuzhiyun * dma_flush_range(start, end) 299*4882a593Smuzhiyun * 300*4882a593Smuzhiyun * Clean and invalidate the specified virtual address range. 301*4882a593Smuzhiyun * 302*4882a593Smuzhiyun * - start - virtual start address 303*4882a593Smuzhiyun * - end - virtual end address 304*4882a593Smuzhiyun */ 305*4882a593SmuzhiyunENTRY(arm1022_dma_flush_range) 306*4882a593Smuzhiyun mov ip, #0 307*4882a593Smuzhiyun#ifndef CONFIG_CPU_DCACHE_DISABLE 308*4882a593Smuzhiyun bic r0, r0, #CACHE_DLINESIZE - 1 309*4882a593Smuzhiyun1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 310*4882a593Smuzhiyun add r0, r0, #CACHE_DLINESIZE 311*4882a593Smuzhiyun cmp r0, r1 312*4882a593Smuzhiyun blo 1b 313*4882a593Smuzhiyun#endif 314*4882a593Smuzhiyun mcr p15, 0, ip, c7, c10, 4 @ drain WB 315*4882a593Smuzhiyun ret lr 316*4882a593Smuzhiyun 317*4882a593Smuzhiyun/* 318*4882a593Smuzhiyun * dma_map_area(start, size, dir) 319*4882a593Smuzhiyun * - start - kernel virtual start address 320*4882a593Smuzhiyun * - size - size of region 321*4882a593Smuzhiyun * - dir - DMA direction 322*4882a593Smuzhiyun */ 323*4882a593SmuzhiyunENTRY(arm1022_dma_map_area) 324*4882a593Smuzhiyun add r1, r1, r0 325*4882a593Smuzhiyun cmp r2, #DMA_TO_DEVICE 326*4882a593Smuzhiyun beq arm1022_dma_clean_range 327*4882a593Smuzhiyun bcs arm1022_dma_inv_range 328*4882a593Smuzhiyun b arm1022_dma_flush_range 329*4882a593SmuzhiyunENDPROC(arm1022_dma_map_area) 330*4882a593Smuzhiyun 331*4882a593Smuzhiyun/* 332*4882a593Smuzhiyun * dma_unmap_area(start, size, dir) 333*4882a593Smuzhiyun * - start - kernel virtual start address 334*4882a593Smuzhiyun * - size - size of region 335*4882a593Smuzhiyun * - dir - DMA direction 336*4882a593Smuzhiyun */ 337*4882a593SmuzhiyunENTRY(arm1022_dma_unmap_area) 338*4882a593Smuzhiyun ret lr 339*4882a593SmuzhiyunENDPROC(arm1022_dma_unmap_area) 340*4882a593Smuzhiyun 341*4882a593Smuzhiyun .globl arm1022_flush_kern_cache_louis 342*4882a593Smuzhiyun .equ arm1022_flush_kern_cache_louis, arm1022_flush_kern_cache_all 343*4882a593Smuzhiyun 344*4882a593Smuzhiyun @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 345*4882a593Smuzhiyun define_cache_functions arm1022 346*4882a593Smuzhiyun 347*4882a593Smuzhiyun .align 5 348*4882a593SmuzhiyunENTRY(cpu_arm1022_dcache_clean_area) 349*4882a593Smuzhiyun#ifndef CONFIG_CPU_DCACHE_DISABLE 350*4882a593Smuzhiyun mov ip, #0 351*4882a593Smuzhiyun1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 352*4882a593Smuzhiyun add r0, r0, #CACHE_DLINESIZE 353*4882a593Smuzhiyun subs r1, r1, #CACHE_DLINESIZE 354*4882a593Smuzhiyun bhi 1b 355*4882a593Smuzhiyun#endif 356*4882a593Smuzhiyun ret lr 357*4882a593Smuzhiyun 358*4882a593Smuzhiyun/* =============================== PageTable ============================== */ 359*4882a593Smuzhiyun 360*4882a593Smuzhiyun/* 361*4882a593Smuzhiyun * cpu_arm1022_switch_mm(pgd) 362*4882a593Smuzhiyun * 363*4882a593Smuzhiyun * Set the translation base pointer to be as described by pgd. 364*4882a593Smuzhiyun * 365*4882a593Smuzhiyun * pgd: new page tables 366*4882a593Smuzhiyun */ 367*4882a593Smuzhiyun .align 5 368*4882a593SmuzhiyunENTRY(cpu_arm1022_switch_mm) 369*4882a593Smuzhiyun#ifdef CONFIG_MMU 370*4882a593Smuzhiyun#ifndef CONFIG_CPU_DCACHE_DISABLE 371*4882a593Smuzhiyun mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 16 segments 372*4882a593Smuzhiyun1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries 373*4882a593Smuzhiyun2: mcr p15, 0, r3, c7, c14, 2 @ clean+invalidate D index 374*4882a593Smuzhiyun subs r3, r3, #1 << 26 375*4882a593Smuzhiyun bcs 2b @ entries 63 to 0 376*4882a593Smuzhiyun subs r1, r1, #1 << 5 377*4882a593Smuzhiyun bcs 1b @ segments 15 to 0 378*4882a593Smuzhiyun#endif 379*4882a593Smuzhiyun mov r1, #0 380*4882a593Smuzhiyun#ifndef CONFIG_CPU_ICACHE_DISABLE 381*4882a593Smuzhiyun mcr p15, 0, r1, c7, c5, 0 @ invalidate I cache 382*4882a593Smuzhiyun#endif 383*4882a593Smuzhiyun mcr p15, 0, r1, c7, c10, 4 @ drain WB 384*4882a593Smuzhiyun mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 385*4882a593Smuzhiyun mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs 386*4882a593Smuzhiyun#endif 387*4882a593Smuzhiyun ret lr 388*4882a593Smuzhiyun 389*4882a593Smuzhiyun/* 390*4882a593Smuzhiyun * cpu_arm1022_set_pte_ext(ptep, pte, ext) 391*4882a593Smuzhiyun * 392*4882a593Smuzhiyun * Set a PTE and flush it out 393*4882a593Smuzhiyun */ 394*4882a593Smuzhiyun .align 5 395*4882a593SmuzhiyunENTRY(cpu_arm1022_set_pte_ext) 396*4882a593Smuzhiyun#ifdef CONFIG_MMU 397*4882a593Smuzhiyun armv3_set_pte_ext 398*4882a593Smuzhiyun mov r0, r0 399*4882a593Smuzhiyun#ifndef CONFIG_CPU_DCACHE_DISABLE 400*4882a593Smuzhiyun mcr p15, 0, r0, c7, c10, 1 @ clean D entry 401*4882a593Smuzhiyun#endif 402*4882a593Smuzhiyun#endif /* CONFIG_MMU */ 403*4882a593Smuzhiyun ret lr 404*4882a593Smuzhiyun 405*4882a593Smuzhiyun .type __arm1022_setup, #function 406*4882a593Smuzhiyun__arm1022_setup: 407*4882a593Smuzhiyun mov r0, #0 408*4882a593Smuzhiyun mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4 409*4882a593Smuzhiyun mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4 410*4882a593Smuzhiyun#ifdef CONFIG_MMU 411*4882a593Smuzhiyun mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4 412*4882a593Smuzhiyun#endif 413*4882a593Smuzhiyun adr r5, arm1022_crval 414*4882a593Smuzhiyun ldmia r5, {r5, r6} 415*4882a593Smuzhiyun mrc p15, 0, r0, c1, c0 @ get control register v4 416*4882a593Smuzhiyun bic r0, r0, r5 417*4882a593Smuzhiyun orr r0, r0, r6 418*4882a593Smuzhiyun#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN 419*4882a593Smuzhiyun orr r0, r0, #0x4000 @ .R.............. 420*4882a593Smuzhiyun#endif 421*4882a593Smuzhiyun ret lr 422*4882a593Smuzhiyun .size __arm1022_setup, . - __arm1022_setup 423*4882a593Smuzhiyun 424*4882a593Smuzhiyun /* 425*4882a593Smuzhiyun * R 426*4882a593Smuzhiyun * .RVI ZFRS BLDP WCAM 427*4882a593Smuzhiyun * .011 1001 ..11 0101 428*4882a593Smuzhiyun * 429*4882a593Smuzhiyun */ 430*4882a593Smuzhiyun .type arm1022_crval, #object 431*4882a593Smuzhiyunarm1022_crval: 432*4882a593Smuzhiyun crval clear=0x00007f3f, mmuset=0x00003935, ucset=0x00001930 433*4882a593Smuzhiyun 434*4882a593Smuzhiyun __INITDATA 435*4882a593Smuzhiyun @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) 436*4882a593Smuzhiyun define_processor_functions arm1022, dabort=v4t_early_abort, pabort=legacy_pabort 437*4882a593Smuzhiyun 438*4882a593Smuzhiyun .section ".rodata" 439*4882a593Smuzhiyun 440*4882a593Smuzhiyun string cpu_arch_name, "armv5te" 441*4882a593Smuzhiyun string cpu_elf_name, "v5" 442*4882a593Smuzhiyun string cpu_arm1022_name, "ARM1022" 443*4882a593Smuzhiyun 444*4882a593Smuzhiyun .align 445*4882a593Smuzhiyun 446*4882a593Smuzhiyun .section ".proc.info.init", "a" 447*4882a593Smuzhiyun 448*4882a593Smuzhiyun .type __arm1022_proc_info,#object 449*4882a593Smuzhiyun__arm1022_proc_info: 450*4882a593Smuzhiyun .long 0x4105a220 @ ARM 1022E (v5TE) 451*4882a593Smuzhiyun .long 0xff0ffff0 452*4882a593Smuzhiyun .long PMD_TYPE_SECT | \ 453*4882a593Smuzhiyun PMD_BIT4 | \ 454*4882a593Smuzhiyun PMD_SECT_AP_WRITE | \ 455*4882a593Smuzhiyun PMD_SECT_AP_READ 456*4882a593Smuzhiyun .long PMD_TYPE_SECT | \ 457*4882a593Smuzhiyun PMD_BIT4 | \ 458*4882a593Smuzhiyun PMD_SECT_AP_WRITE | \ 459*4882a593Smuzhiyun PMD_SECT_AP_READ 460*4882a593Smuzhiyun initfn __arm1022_setup, __arm1022_proc_info 461*4882a593Smuzhiyun .long cpu_arch_name 462*4882a593Smuzhiyun .long cpu_elf_name 463*4882a593Smuzhiyun .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_EDSP 464*4882a593Smuzhiyun .long cpu_arm1022_name 465*4882a593Smuzhiyun .long arm1022_processor_functions 466*4882a593Smuzhiyun .long v4wbi_tlb_fns 467*4882a593Smuzhiyun .long v4wb_user_fns 468*4882a593Smuzhiyun .long arm1022_cache_fns 469*4882a593Smuzhiyun .size __arm1022_proc_info, . - __arm1022_proc_info 470