1*4882a593Smuzhiyun/* SPDX-License-Identifier: GPL-2.0-or-later */ 2*4882a593Smuzhiyun/* 3*4882a593Smuzhiyun * linux/arch/arm/mm/proc-arm1026.S: MMU functions for ARM1026EJ-S 4*4882a593Smuzhiyun * 5*4882a593Smuzhiyun * Copyright (C) 2000 ARM Limited 6*4882a593Smuzhiyun * Copyright (C) 2000 Deep Blue Solutions Ltd. 7*4882a593Smuzhiyun * hacked for non-paged-MM by Hyok S. Choi, 2003. 8*4882a593Smuzhiyun * 9*4882a593Smuzhiyun * These are the low level assembler for performing cache and TLB 10*4882a593Smuzhiyun * functions on the ARM1026EJ-S. 11*4882a593Smuzhiyun */ 12*4882a593Smuzhiyun#include <linux/linkage.h> 13*4882a593Smuzhiyun#include <linux/init.h> 14*4882a593Smuzhiyun#include <linux/pgtable.h> 15*4882a593Smuzhiyun#include <asm/assembler.h> 16*4882a593Smuzhiyun#include <asm/asm-offsets.h> 17*4882a593Smuzhiyun#include <asm/hwcap.h> 18*4882a593Smuzhiyun#include <asm/pgtable-hwdef.h> 19*4882a593Smuzhiyun#include <asm/ptrace.h> 20*4882a593Smuzhiyun 21*4882a593Smuzhiyun#include "proc-macros.S" 22*4882a593Smuzhiyun 23*4882a593Smuzhiyun/* 24*4882a593Smuzhiyun * This is the maximum size of an area which will be invalidated 25*4882a593Smuzhiyun * using the single invalidate entry instructions. Anything larger 26*4882a593Smuzhiyun * than this, and we go for the whole cache. 27*4882a593Smuzhiyun * 28*4882a593Smuzhiyun * This value should be chosen such that we choose the cheapest 29*4882a593Smuzhiyun * alternative. 30*4882a593Smuzhiyun */ 31*4882a593Smuzhiyun#define MAX_AREA_SIZE 32768 32*4882a593Smuzhiyun 33*4882a593Smuzhiyun/* 34*4882a593Smuzhiyun * The size of one data cache line. 35*4882a593Smuzhiyun */ 36*4882a593Smuzhiyun#define CACHE_DLINESIZE 32 37*4882a593Smuzhiyun 38*4882a593Smuzhiyun/* 39*4882a593Smuzhiyun * The number of data cache segments. 40*4882a593Smuzhiyun */ 41*4882a593Smuzhiyun#define CACHE_DSEGMENTS 16 42*4882a593Smuzhiyun 43*4882a593Smuzhiyun/* 44*4882a593Smuzhiyun * The number of lines in a cache segment. 45*4882a593Smuzhiyun */ 46*4882a593Smuzhiyun#define CACHE_DENTRIES 64 47*4882a593Smuzhiyun 48*4882a593Smuzhiyun/* 49*4882a593Smuzhiyun * This is the size at which it becomes more efficient to 50*4882a593Smuzhiyun * clean the whole cache, rather than using the individual 51*4882a593Smuzhiyun * cache line maintenance instructions. 52*4882a593Smuzhiyun */ 53*4882a593Smuzhiyun#define CACHE_DLIMIT 32768 54*4882a593Smuzhiyun 55*4882a593Smuzhiyun .text 56*4882a593Smuzhiyun/* 57*4882a593Smuzhiyun * cpu_arm1026_proc_init() 58*4882a593Smuzhiyun */ 59*4882a593SmuzhiyunENTRY(cpu_arm1026_proc_init) 60*4882a593Smuzhiyun ret lr 61*4882a593Smuzhiyun 62*4882a593Smuzhiyun/* 63*4882a593Smuzhiyun * cpu_arm1026_proc_fin() 64*4882a593Smuzhiyun */ 65*4882a593SmuzhiyunENTRY(cpu_arm1026_proc_fin) 66*4882a593Smuzhiyun mrc p15, 0, r0, c1, c0, 0 @ ctrl register 67*4882a593Smuzhiyun bic r0, r0, #0x1000 @ ...i............ 68*4882a593Smuzhiyun bic r0, r0, #0x000e @ ............wca. 69*4882a593Smuzhiyun mcr p15, 0, r0, c1, c0, 0 @ disable caches 70*4882a593Smuzhiyun ret lr 71*4882a593Smuzhiyun 72*4882a593Smuzhiyun/* 73*4882a593Smuzhiyun * cpu_arm1026_reset(loc) 74*4882a593Smuzhiyun * 75*4882a593Smuzhiyun * Perform a soft reset of the system. Put the CPU into the 76*4882a593Smuzhiyun * same state as it would be if it had been reset, and branch 77*4882a593Smuzhiyun * to what would be the reset vector. 78*4882a593Smuzhiyun * 79*4882a593Smuzhiyun * loc: location to jump to for soft reset 80*4882a593Smuzhiyun */ 81*4882a593Smuzhiyun .align 5 82*4882a593Smuzhiyun .pushsection .idmap.text, "ax" 83*4882a593SmuzhiyunENTRY(cpu_arm1026_reset) 84*4882a593Smuzhiyun mov ip, #0 85*4882a593Smuzhiyun mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 86*4882a593Smuzhiyun mcr p15, 0, ip, c7, c10, 4 @ drain WB 87*4882a593Smuzhiyun#ifdef CONFIG_MMU 88*4882a593Smuzhiyun mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 89*4882a593Smuzhiyun#endif 90*4882a593Smuzhiyun mrc p15, 0, ip, c1, c0, 0 @ ctrl register 91*4882a593Smuzhiyun bic ip, ip, #0x000f @ ............wcam 92*4882a593Smuzhiyun bic ip, ip, #0x1100 @ ...i...s........ 93*4882a593Smuzhiyun mcr p15, 0, ip, c1, c0, 0 @ ctrl register 94*4882a593Smuzhiyun ret r0 95*4882a593SmuzhiyunENDPROC(cpu_arm1026_reset) 96*4882a593Smuzhiyun .popsection 97*4882a593Smuzhiyun 98*4882a593Smuzhiyun/* 99*4882a593Smuzhiyun * cpu_arm1026_do_idle() 100*4882a593Smuzhiyun */ 101*4882a593Smuzhiyun .align 5 102*4882a593SmuzhiyunENTRY(cpu_arm1026_do_idle) 103*4882a593Smuzhiyun mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 104*4882a593Smuzhiyun ret lr 105*4882a593Smuzhiyun 106*4882a593Smuzhiyun/* ================================= CACHE ================================ */ 107*4882a593Smuzhiyun 108*4882a593Smuzhiyun .align 5 109*4882a593Smuzhiyun 110*4882a593Smuzhiyun/* 111*4882a593Smuzhiyun * flush_icache_all() 112*4882a593Smuzhiyun * 113*4882a593Smuzhiyun * Unconditionally clean and invalidate the entire icache. 114*4882a593Smuzhiyun */ 115*4882a593SmuzhiyunENTRY(arm1026_flush_icache_all) 116*4882a593Smuzhiyun#ifndef CONFIG_CPU_ICACHE_DISABLE 117*4882a593Smuzhiyun mov r0, #0 118*4882a593Smuzhiyun mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 119*4882a593Smuzhiyun#endif 120*4882a593Smuzhiyun ret lr 121*4882a593SmuzhiyunENDPROC(arm1026_flush_icache_all) 122*4882a593Smuzhiyun 123*4882a593Smuzhiyun/* 124*4882a593Smuzhiyun * flush_user_cache_all() 125*4882a593Smuzhiyun * 126*4882a593Smuzhiyun * Invalidate all cache entries in a particular address 127*4882a593Smuzhiyun * space. 128*4882a593Smuzhiyun */ 129*4882a593SmuzhiyunENTRY(arm1026_flush_user_cache_all) 130*4882a593Smuzhiyun /* FALLTHROUGH */ 131*4882a593Smuzhiyun/* 132*4882a593Smuzhiyun * flush_kern_cache_all() 133*4882a593Smuzhiyun * 134*4882a593Smuzhiyun * Clean and invalidate the entire cache. 135*4882a593Smuzhiyun */ 136*4882a593SmuzhiyunENTRY(arm1026_flush_kern_cache_all) 137*4882a593Smuzhiyun mov r2, #VM_EXEC 138*4882a593Smuzhiyun mov ip, #0 139*4882a593Smuzhiyun__flush_whole_cache: 140*4882a593Smuzhiyun#ifndef CONFIG_CPU_DCACHE_DISABLE 141*4882a593Smuzhiyun1: mrc p15, 0, APSR_nzcv, c7, c14, 3 @ test, clean, invalidate 142*4882a593Smuzhiyun bne 1b 143*4882a593Smuzhiyun#endif 144*4882a593Smuzhiyun tst r2, #VM_EXEC 145*4882a593Smuzhiyun#ifndef CONFIG_CPU_ICACHE_DISABLE 146*4882a593Smuzhiyun mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 147*4882a593Smuzhiyun#endif 148*4882a593Smuzhiyun mcrne p15, 0, ip, c7, c10, 4 @ drain WB 149*4882a593Smuzhiyun ret lr 150*4882a593Smuzhiyun 151*4882a593Smuzhiyun/* 152*4882a593Smuzhiyun * flush_user_cache_range(start, end, flags) 153*4882a593Smuzhiyun * 154*4882a593Smuzhiyun * Invalidate a range of cache entries in the specified 155*4882a593Smuzhiyun * address space. 156*4882a593Smuzhiyun * 157*4882a593Smuzhiyun * - start - start address (inclusive) 158*4882a593Smuzhiyun * - end - end address (exclusive) 159*4882a593Smuzhiyun * - flags - vm_flags for this space 160*4882a593Smuzhiyun */ 161*4882a593SmuzhiyunENTRY(arm1026_flush_user_cache_range) 162*4882a593Smuzhiyun mov ip, #0 163*4882a593Smuzhiyun sub r3, r1, r0 @ calculate total size 164*4882a593Smuzhiyun cmp r3, #CACHE_DLIMIT 165*4882a593Smuzhiyun bhs __flush_whole_cache 166*4882a593Smuzhiyun 167*4882a593Smuzhiyun#ifndef CONFIG_CPU_DCACHE_DISABLE 168*4882a593Smuzhiyun1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 169*4882a593Smuzhiyun add r0, r0, #CACHE_DLINESIZE 170*4882a593Smuzhiyun cmp r0, r1 171*4882a593Smuzhiyun blo 1b 172*4882a593Smuzhiyun#endif 173*4882a593Smuzhiyun tst r2, #VM_EXEC 174*4882a593Smuzhiyun#ifndef CONFIG_CPU_ICACHE_DISABLE 175*4882a593Smuzhiyun mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 176*4882a593Smuzhiyun#endif 177*4882a593Smuzhiyun mcrne p15, 0, ip, c7, c10, 4 @ drain WB 178*4882a593Smuzhiyun ret lr 179*4882a593Smuzhiyun 180*4882a593Smuzhiyun/* 181*4882a593Smuzhiyun * coherent_kern_range(start, end) 182*4882a593Smuzhiyun * 183*4882a593Smuzhiyun * Ensure coherency between the Icache and the Dcache in the 184*4882a593Smuzhiyun * region described by start. If you have non-snooping 185*4882a593Smuzhiyun * Harvard caches, you need to implement this function. 186*4882a593Smuzhiyun * 187*4882a593Smuzhiyun * - start - virtual start address 188*4882a593Smuzhiyun * - end - virtual end address 189*4882a593Smuzhiyun */ 190*4882a593SmuzhiyunENTRY(arm1026_coherent_kern_range) 191*4882a593Smuzhiyun /* FALLTHROUGH */ 192*4882a593Smuzhiyun/* 193*4882a593Smuzhiyun * coherent_user_range(start, end) 194*4882a593Smuzhiyun * 195*4882a593Smuzhiyun * Ensure coherency between the Icache and the Dcache in the 196*4882a593Smuzhiyun * region described by start. If you have non-snooping 197*4882a593Smuzhiyun * Harvard caches, you need to implement this function. 198*4882a593Smuzhiyun * 199*4882a593Smuzhiyun * - start - virtual start address 200*4882a593Smuzhiyun * - end - virtual end address 201*4882a593Smuzhiyun */ 202*4882a593SmuzhiyunENTRY(arm1026_coherent_user_range) 203*4882a593Smuzhiyun mov ip, #0 204*4882a593Smuzhiyun bic r0, r0, #CACHE_DLINESIZE - 1 205*4882a593Smuzhiyun1: 206*4882a593Smuzhiyun#ifndef CONFIG_CPU_DCACHE_DISABLE 207*4882a593Smuzhiyun mcr p15, 0, r0, c7, c10, 1 @ clean D entry 208*4882a593Smuzhiyun#endif 209*4882a593Smuzhiyun#ifndef CONFIG_CPU_ICACHE_DISABLE 210*4882a593Smuzhiyun mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry 211*4882a593Smuzhiyun#endif 212*4882a593Smuzhiyun add r0, r0, #CACHE_DLINESIZE 213*4882a593Smuzhiyun cmp r0, r1 214*4882a593Smuzhiyun blo 1b 215*4882a593Smuzhiyun mcr p15, 0, ip, c7, c10, 4 @ drain WB 216*4882a593Smuzhiyun mov r0, #0 217*4882a593Smuzhiyun ret lr 218*4882a593Smuzhiyun 219*4882a593Smuzhiyun/* 220*4882a593Smuzhiyun * flush_kern_dcache_area(void *addr, size_t size) 221*4882a593Smuzhiyun * 222*4882a593Smuzhiyun * Ensure no D cache aliasing occurs, either with itself or 223*4882a593Smuzhiyun * the I cache 224*4882a593Smuzhiyun * 225*4882a593Smuzhiyun * - addr - kernel address 226*4882a593Smuzhiyun * - size - region size 227*4882a593Smuzhiyun */ 228*4882a593SmuzhiyunENTRY(arm1026_flush_kern_dcache_area) 229*4882a593Smuzhiyun mov ip, #0 230*4882a593Smuzhiyun#ifndef CONFIG_CPU_DCACHE_DISABLE 231*4882a593Smuzhiyun add r1, r0, r1 232*4882a593Smuzhiyun1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 233*4882a593Smuzhiyun add r0, r0, #CACHE_DLINESIZE 234*4882a593Smuzhiyun cmp r0, r1 235*4882a593Smuzhiyun blo 1b 236*4882a593Smuzhiyun#endif 237*4882a593Smuzhiyun mcr p15, 0, ip, c7, c10, 4 @ drain WB 238*4882a593Smuzhiyun ret lr 239*4882a593Smuzhiyun 240*4882a593Smuzhiyun/* 241*4882a593Smuzhiyun * dma_inv_range(start, end) 242*4882a593Smuzhiyun * 243*4882a593Smuzhiyun * Invalidate (discard) the specified virtual address range. 244*4882a593Smuzhiyun * May not write back any entries. If 'start' or 'end' 245*4882a593Smuzhiyun * are not cache line aligned, those lines must be written 246*4882a593Smuzhiyun * back. 247*4882a593Smuzhiyun * 248*4882a593Smuzhiyun * - start - virtual start address 249*4882a593Smuzhiyun * - end - virtual end address 250*4882a593Smuzhiyun * 251*4882a593Smuzhiyun * (same as v4wb) 252*4882a593Smuzhiyun */ 253*4882a593Smuzhiyunarm1026_dma_inv_range: 254*4882a593Smuzhiyun mov ip, #0 255*4882a593Smuzhiyun#ifndef CONFIG_CPU_DCACHE_DISABLE 256*4882a593Smuzhiyun tst r0, #CACHE_DLINESIZE - 1 257*4882a593Smuzhiyun bic r0, r0, #CACHE_DLINESIZE - 1 258*4882a593Smuzhiyun mcrne p15, 0, r0, c7, c10, 1 @ clean D entry 259*4882a593Smuzhiyun tst r1, #CACHE_DLINESIZE - 1 260*4882a593Smuzhiyun mcrne p15, 0, r1, c7, c10, 1 @ clean D entry 261*4882a593Smuzhiyun1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry 262*4882a593Smuzhiyun add r0, r0, #CACHE_DLINESIZE 263*4882a593Smuzhiyun cmp r0, r1 264*4882a593Smuzhiyun blo 1b 265*4882a593Smuzhiyun#endif 266*4882a593Smuzhiyun mcr p15, 0, ip, c7, c10, 4 @ drain WB 267*4882a593Smuzhiyun ret lr 268*4882a593Smuzhiyun 269*4882a593Smuzhiyun/* 270*4882a593Smuzhiyun * dma_clean_range(start, end) 271*4882a593Smuzhiyun * 272*4882a593Smuzhiyun * Clean the specified virtual address range. 273*4882a593Smuzhiyun * 274*4882a593Smuzhiyun * - start - virtual start address 275*4882a593Smuzhiyun * - end - virtual end address 276*4882a593Smuzhiyun * 277*4882a593Smuzhiyun * (same as v4wb) 278*4882a593Smuzhiyun */ 279*4882a593Smuzhiyunarm1026_dma_clean_range: 280*4882a593Smuzhiyun mov ip, #0 281*4882a593Smuzhiyun#ifndef CONFIG_CPU_DCACHE_DISABLE 282*4882a593Smuzhiyun bic r0, r0, #CACHE_DLINESIZE - 1 283*4882a593Smuzhiyun1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 284*4882a593Smuzhiyun add r0, r0, #CACHE_DLINESIZE 285*4882a593Smuzhiyun cmp r0, r1 286*4882a593Smuzhiyun blo 1b 287*4882a593Smuzhiyun#endif 288*4882a593Smuzhiyun mcr p15, 0, ip, c7, c10, 4 @ drain WB 289*4882a593Smuzhiyun ret lr 290*4882a593Smuzhiyun 291*4882a593Smuzhiyun/* 292*4882a593Smuzhiyun * dma_flush_range(start, end) 293*4882a593Smuzhiyun * 294*4882a593Smuzhiyun * Clean and invalidate the specified virtual address range. 295*4882a593Smuzhiyun * 296*4882a593Smuzhiyun * - start - virtual start address 297*4882a593Smuzhiyun * - end - virtual end address 298*4882a593Smuzhiyun */ 299*4882a593SmuzhiyunENTRY(arm1026_dma_flush_range) 300*4882a593Smuzhiyun mov ip, #0 301*4882a593Smuzhiyun#ifndef CONFIG_CPU_DCACHE_DISABLE 302*4882a593Smuzhiyun bic r0, r0, #CACHE_DLINESIZE - 1 303*4882a593Smuzhiyun1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 304*4882a593Smuzhiyun add r0, r0, #CACHE_DLINESIZE 305*4882a593Smuzhiyun cmp r0, r1 306*4882a593Smuzhiyun blo 1b 307*4882a593Smuzhiyun#endif 308*4882a593Smuzhiyun mcr p15, 0, ip, c7, c10, 4 @ drain WB 309*4882a593Smuzhiyun ret lr 310*4882a593Smuzhiyun 311*4882a593Smuzhiyun/* 312*4882a593Smuzhiyun * dma_map_area(start, size, dir) 313*4882a593Smuzhiyun * - start - kernel virtual start address 314*4882a593Smuzhiyun * - size - size of region 315*4882a593Smuzhiyun * - dir - DMA direction 316*4882a593Smuzhiyun */ 317*4882a593SmuzhiyunENTRY(arm1026_dma_map_area) 318*4882a593Smuzhiyun add r1, r1, r0 319*4882a593Smuzhiyun cmp r2, #DMA_TO_DEVICE 320*4882a593Smuzhiyun beq arm1026_dma_clean_range 321*4882a593Smuzhiyun bcs arm1026_dma_inv_range 322*4882a593Smuzhiyun b arm1026_dma_flush_range 323*4882a593SmuzhiyunENDPROC(arm1026_dma_map_area) 324*4882a593Smuzhiyun 325*4882a593Smuzhiyun/* 326*4882a593Smuzhiyun * dma_unmap_area(start, size, dir) 327*4882a593Smuzhiyun * - start - kernel virtual start address 328*4882a593Smuzhiyun * - size - size of region 329*4882a593Smuzhiyun * - dir - DMA direction 330*4882a593Smuzhiyun */ 331*4882a593SmuzhiyunENTRY(arm1026_dma_unmap_area) 332*4882a593Smuzhiyun ret lr 333*4882a593SmuzhiyunENDPROC(arm1026_dma_unmap_area) 334*4882a593Smuzhiyun 335*4882a593Smuzhiyun .globl arm1026_flush_kern_cache_louis 336*4882a593Smuzhiyun .equ arm1026_flush_kern_cache_louis, arm1026_flush_kern_cache_all 337*4882a593Smuzhiyun 338*4882a593Smuzhiyun @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 339*4882a593Smuzhiyun define_cache_functions arm1026 340*4882a593Smuzhiyun 341*4882a593Smuzhiyun .align 5 342*4882a593SmuzhiyunENTRY(cpu_arm1026_dcache_clean_area) 343*4882a593Smuzhiyun#ifndef CONFIG_CPU_DCACHE_DISABLE 344*4882a593Smuzhiyun mov ip, #0 345*4882a593Smuzhiyun1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 346*4882a593Smuzhiyun add r0, r0, #CACHE_DLINESIZE 347*4882a593Smuzhiyun subs r1, r1, #CACHE_DLINESIZE 348*4882a593Smuzhiyun bhi 1b 349*4882a593Smuzhiyun#endif 350*4882a593Smuzhiyun ret lr 351*4882a593Smuzhiyun 352*4882a593Smuzhiyun/* =============================== PageTable ============================== */ 353*4882a593Smuzhiyun 354*4882a593Smuzhiyun/* 355*4882a593Smuzhiyun * cpu_arm1026_switch_mm(pgd) 356*4882a593Smuzhiyun * 357*4882a593Smuzhiyun * Set the translation base pointer to be as described by pgd. 358*4882a593Smuzhiyun * 359*4882a593Smuzhiyun * pgd: new page tables 360*4882a593Smuzhiyun */ 361*4882a593Smuzhiyun .align 5 362*4882a593SmuzhiyunENTRY(cpu_arm1026_switch_mm) 363*4882a593Smuzhiyun#ifdef CONFIG_MMU 364*4882a593Smuzhiyun mov r1, #0 365*4882a593Smuzhiyun#ifndef CONFIG_CPU_DCACHE_DISABLE 366*4882a593Smuzhiyun1: mrc p15, 0, APSR_nzcv, c7, c14, 3 @ test, clean, invalidate 367*4882a593Smuzhiyun bne 1b 368*4882a593Smuzhiyun#endif 369*4882a593Smuzhiyun#ifndef CONFIG_CPU_ICACHE_DISABLE 370*4882a593Smuzhiyun mcr p15, 0, r1, c7, c5, 0 @ invalidate I cache 371*4882a593Smuzhiyun#endif 372*4882a593Smuzhiyun mcr p15, 0, r1, c7, c10, 4 @ drain WB 373*4882a593Smuzhiyun mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 374*4882a593Smuzhiyun mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs 375*4882a593Smuzhiyun#endif 376*4882a593Smuzhiyun ret lr 377*4882a593Smuzhiyun 378*4882a593Smuzhiyun/* 379*4882a593Smuzhiyun * cpu_arm1026_set_pte_ext(ptep, pte, ext) 380*4882a593Smuzhiyun * 381*4882a593Smuzhiyun * Set a PTE and flush it out 382*4882a593Smuzhiyun */ 383*4882a593Smuzhiyun .align 5 384*4882a593SmuzhiyunENTRY(cpu_arm1026_set_pte_ext) 385*4882a593Smuzhiyun#ifdef CONFIG_MMU 386*4882a593Smuzhiyun armv3_set_pte_ext 387*4882a593Smuzhiyun mov r0, r0 388*4882a593Smuzhiyun#ifndef CONFIG_CPU_DCACHE_DISABLE 389*4882a593Smuzhiyun mcr p15, 0, r0, c7, c10, 1 @ clean D entry 390*4882a593Smuzhiyun#endif 391*4882a593Smuzhiyun#endif /* CONFIG_MMU */ 392*4882a593Smuzhiyun ret lr 393*4882a593Smuzhiyun 394*4882a593Smuzhiyun .type __arm1026_setup, #function 395*4882a593Smuzhiyun__arm1026_setup: 396*4882a593Smuzhiyun mov r0, #0 397*4882a593Smuzhiyun mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4 398*4882a593Smuzhiyun mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4 399*4882a593Smuzhiyun#ifdef CONFIG_MMU 400*4882a593Smuzhiyun mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4 401*4882a593Smuzhiyun mcr p15, 0, r4, c2, c0 @ load page table pointer 402*4882a593Smuzhiyun#endif 403*4882a593Smuzhiyun#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH 404*4882a593Smuzhiyun mov r0, #4 @ explicitly disable writeback 405*4882a593Smuzhiyun mcr p15, 7, r0, c15, c0, 0 406*4882a593Smuzhiyun#endif 407*4882a593Smuzhiyun adr r5, arm1026_crval 408*4882a593Smuzhiyun ldmia r5, {r5, r6} 409*4882a593Smuzhiyun mrc p15, 0, r0, c1, c0 @ get control register v4 410*4882a593Smuzhiyun bic r0, r0, r5 411*4882a593Smuzhiyun orr r0, r0, r6 412*4882a593Smuzhiyun#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN 413*4882a593Smuzhiyun orr r0, r0, #0x4000 @ .R.. .... .... .... 414*4882a593Smuzhiyun#endif 415*4882a593Smuzhiyun ret lr 416*4882a593Smuzhiyun .size __arm1026_setup, . - __arm1026_setup 417*4882a593Smuzhiyun 418*4882a593Smuzhiyun /* 419*4882a593Smuzhiyun * R 420*4882a593Smuzhiyun * .RVI ZFRS BLDP WCAM 421*4882a593Smuzhiyun * .011 1001 ..11 0101 422*4882a593Smuzhiyun * 423*4882a593Smuzhiyun */ 424*4882a593Smuzhiyun .type arm1026_crval, #object 425*4882a593Smuzhiyunarm1026_crval: 426*4882a593Smuzhiyun crval clear=0x00007f3f, mmuset=0x00003935, ucset=0x00001934 427*4882a593Smuzhiyun 428*4882a593Smuzhiyun __INITDATA 429*4882a593Smuzhiyun @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) 430*4882a593Smuzhiyun define_processor_functions arm1026, dabort=v5t_early_abort, pabort=legacy_pabort 431*4882a593Smuzhiyun 432*4882a593Smuzhiyun .section .rodata 433*4882a593Smuzhiyun 434*4882a593Smuzhiyun string cpu_arch_name, "armv5tej" 435*4882a593Smuzhiyun string cpu_elf_name, "v5" 436*4882a593Smuzhiyun .align 437*4882a593Smuzhiyun string cpu_arm1026_name, "ARM1026EJ-S" 438*4882a593Smuzhiyun .align 439*4882a593Smuzhiyun 440*4882a593Smuzhiyun .section ".proc.info.init", "a" 441*4882a593Smuzhiyun 442*4882a593Smuzhiyun .type __arm1026_proc_info,#object 443*4882a593Smuzhiyun__arm1026_proc_info: 444*4882a593Smuzhiyun .long 0x4106a260 @ ARM 1026EJ-S (v5TEJ) 445*4882a593Smuzhiyun .long 0xff0ffff0 446*4882a593Smuzhiyun .long PMD_TYPE_SECT | \ 447*4882a593Smuzhiyun PMD_BIT4 | \ 448*4882a593Smuzhiyun PMD_SECT_AP_WRITE | \ 449*4882a593Smuzhiyun PMD_SECT_AP_READ 450*4882a593Smuzhiyun .long PMD_TYPE_SECT | \ 451*4882a593Smuzhiyun PMD_BIT4 | \ 452*4882a593Smuzhiyun PMD_SECT_AP_WRITE | \ 453*4882a593Smuzhiyun PMD_SECT_AP_READ 454*4882a593Smuzhiyun initfn __arm1026_setup, __arm1026_proc_info 455*4882a593Smuzhiyun .long cpu_arch_name 456*4882a593Smuzhiyun .long cpu_elf_name 457*4882a593Smuzhiyun .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_JAVA 458*4882a593Smuzhiyun .long cpu_arm1026_name 459*4882a593Smuzhiyun .long arm1026_processor_functions 460*4882a593Smuzhiyun .long v4wbi_tlb_fns 461*4882a593Smuzhiyun .long v4wb_user_fns 462*4882a593Smuzhiyun .long arm1026_cache_fns 463*4882a593Smuzhiyun .size __arm1026_proc_info, . - __arm1026_proc_info 464