1*4882a593Smuzhiyun/* SPDX-License-Identifier: GPL-2.0-only */ 2*4882a593Smuzhiyun/* 3*4882a593Smuzhiyun * linux/arch/arm/mm/proc-sa1100.S 4*4882a593Smuzhiyun * 5*4882a593Smuzhiyun * Copyright (C) 1997-2002 Russell King 6*4882a593Smuzhiyun * hacked for non-paged-MM by Hyok S. Choi, 2003. 7*4882a593Smuzhiyun * 8*4882a593Smuzhiyun * MMU functions for SA110 9*4882a593Smuzhiyun * 10*4882a593Smuzhiyun * These are the low level assembler for performing cache and TLB 11*4882a593Smuzhiyun * functions on the StrongARM-1100 and StrongARM-1110. 12*4882a593Smuzhiyun * 13*4882a593Smuzhiyun * Note that SA1100 and SA1110 share everything but their name and CPU ID. 14*4882a593Smuzhiyun * 15*4882a593Smuzhiyun * 12-jun-2000, Erik Mouw (J.A.K.Mouw@its.tudelft.nl): 16*4882a593Smuzhiyun * Flush the read buffer at context switches 17*4882a593Smuzhiyun */ 18*4882a593Smuzhiyun#include <linux/linkage.h> 19*4882a593Smuzhiyun#include <linux/init.h> 20*4882a593Smuzhiyun#include <linux/pgtable.h> 21*4882a593Smuzhiyun#include <asm/assembler.h> 22*4882a593Smuzhiyun#include <asm/asm-offsets.h> 23*4882a593Smuzhiyun#include <asm/hwcap.h> 24*4882a593Smuzhiyun#include <mach/hardware.h> 25*4882a593Smuzhiyun#include <asm/pgtable-hwdef.h> 26*4882a593Smuzhiyun 27*4882a593Smuzhiyun#include "proc-macros.S" 28*4882a593Smuzhiyun 29*4882a593Smuzhiyun/* 30*4882a593Smuzhiyun * the cache line size of the I and D cache 31*4882a593Smuzhiyun */ 32*4882a593Smuzhiyun#define DCACHELINESIZE 32 33*4882a593Smuzhiyun 34*4882a593Smuzhiyun .section .text 35*4882a593Smuzhiyun 36*4882a593Smuzhiyun/* 37*4882a593Smuzhiyun * cpu_sa1100_proc_init() 38*4882a593Smuzhiyun */ 39*4882a593SmuzhiyunENTRY(cpu_sa1100_proc_init) 40*4882a593Smuzhiyun mov r0, #0 41*4882a593Smuzhiyun mcr p15, 0, r0, c15, c1, 2 @ Enable clock switching 42*4882a593Smuzhiyun mcr p15, 0, r0, c9, c0, 5 @ Allow read-buffer operations from userland 43*4882a593Smuzhiyun ret lr 44*4882a593Smuzhiyun 45*4882a593Smuzhiyun/* 46*4882a593Smuzhiyun * cpu_sa1100_proc_fin() 47*4882a593Smuzhiyun * 48*4882a593Smuzhiyun * Prepare the CPU for reset: 49*4882a593Smuzhiyun * - Disable interrupts 50*4882a593Smuzhiyun * - Clean and turn off caches. 51*4882a593Smuzhiyun */ 52*4882a593SmuzhiyunENTRY(cpu_sa1100_proc_fin) 53*4882a593Smuzhiyun mcr p15, 0, ip, c15, c2, 2 @ Disable clock switching 54*4882a593Smuzhiyun mrc p15, 0, r0, c1, c0, 0 @ ctrl register 55*4882a593Smuzhiyun bic r0, r0, #0x1000 @ ...i............ 56*4882a593Smuzhiyun bic r0, r0, #0x000e @ ............wca. 57*4882a593Smuzhiyun mcr p15, 0, r0, c1, c0, 0 @ disable caches 58*4882a593Smuzhiyun ret lr 59*4882a593Smuzhiyun 60*4882a593Smuzhiyun/* 61*4882a593Smuzhiyun * cpu_sa1100_reset(loc) 62*4882a593Smuzhiyun * 63*4882a593Smuzhiyun * Perform a soft reset of the system. Put the CPU into the 64*4882a593Smuzhiyun * same state as it would be if it had been reset, and branch 65*4882a593Smuzhiyun * to what would be the reset vector. 66*4882a593Smuzhiyun * 67*4882a593Smuzhiyun * loc: location to jump to for soft reset 68*4882a593Smuzhiyun */ 69*4882a593Smuzhiyun .align 5 70*4882a593Smuzhiyun .pushsection .idmap.text, "ax" 71*4882a593SmuzhiyunENTRY(cpu_sa1100_reset) 72*4882a593Smuzhiyun mov ip, #0 73*4882a593Smuzhiyun mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 74*4882a593Smuzhiyun mcr p15, 0, ip, c7, c10, 4 @ drain WB 75*4882a593Smuzhiyun#ifdef CONFIG_MMU 76*4882a593Smuzhiyun mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 77*4882a593Smuzhiyun#endif 78*4882a593Smuzhiyun mrc p15, 0, ip, c1, c0, 0 @ ctrl register 79*4882a593Smuzhiyun bic ip, ip, #0x000f @ ............wcam 80*4882a593Smuzhiyun bic ip, ip, #0x1100 @ ...i...s........ 81*4882a593Smuzhiyun mcr p15, 0, ip, c1, c0, 0 @ ctrl register 82*4882a593Smuzhiyun ret r0 83*4882a593SmuzhiyunENDPROC(cpu_sa1100_reset) 84*4882a593Smuzhiyun .popsection 85*4882a593Smuzhiyun 86*4882a593Smuzhiyun/* 87*4882a593Smuzhiyun * cpu_sa1100_do_idle(type) 88*4882a593Smuzhiyun * 89*4882a593Smuzhiyun * Cause the processor to idle 90*4882a593Smuzhiyun * 91*4882a593Smuzhiyun * type: call type: 92*4882a593Smuzhiyun * 0 = slow idle 93*4882a593Smuzhiyun * 1 = fast idle 94*4882a593Smuzhiyun * 2 = switch to slow processor clock 95*4882a593Smuzhiyun * 3 = switch to fast processor clock 96*4882a593Smuzhiyun */ 97*4882a593Smuzhiyun .align 5 98*4882a593SmuzhiyunENTRY(cpu_sa1100_do_idle) 99*4882a593Smuzhiyun mov r0, r0 @ 4 nop padding 100*4882a593Smuzhiyun mov r0, r0 101*4882a593Smuzhiyun mov r0, r0 102*4882a593Smuzhiyun mov r0, r0 @ 4 nop padding 103*4882a593Smuzhiyun mov r0, r0 104*4882a593Smuzhiyun mov r0, r0 105*4882a593Smuzhiyun mov r0, #0 106*4882a593Smuzhiyun ldr r1, =UNCACHEABLE_ADDR @ ptr to uncacheable address 107*4882a593Smuzhiyun @ --- aligned to a cache line 108*4882a593Smuzhiyun mcr p15, 0, r0, c15, c2, 2 @ disable clock switching 109*4882a593Smuzhiyun ldr r1, [r1, #0] @ force switch to MCLK 110*4882a593Smuzhiyun mcr p15, 0, r0, c15, c8, 2 @ wait for interrupt 111*4882a593Smuzhiyun mov r0, r0 @ safety 112*4882a593Smuzhiyun mcr p15, 0, r0, c15, c1, 2 @ enable clock switching 113*4882a593Smuzhiyun ret lr 114*4882a593Smuzhiyun 115*4882a593Smuzhiyun/* ================================= CACHE ================================ */ 116*4882a593Smuzhiyun 117*4882a593Smuzhiyun/* 118*4882a593Smuzhiyun * cpu_sa1100_dcache_clean_area(addr,sz) 119*4882a593Smuzhiyun * 120*4882a593Smuzhiyun * Clean the specified entry of any caches such that the MMU 121*4882a593Smuzhiyun * translation fetches will obtain correct data. 122*4882a593Smuzhiyun * 123*4882a593Smuzhiyun * addr: cache-unaligned virtual address 124*4882a593Smuzhiyun */ 125*4882a593Smuzhiyun .align 5 126*4882a593SmuzhiyunENTRY(cpu_sa1100_dcache_clean_area) 127*4882a593Smuzhiyun1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 128*4882a593Smuzhiyun add r0, r0, #DCACHELINESIZE 129*4882a593Smuzhiyun subs r1, r1, #DCACHELINESIZE 130*4882a593Smuzhiyun bhi 1b 131*4882a593Smuzhiyun ret lr 132*4882a593Smuzhiyun 133*4882a593Smuzhiyun/* =============================== PageTable ============================== */ 134*4882a593Smuzhiyun 135*4882a593Smuzhiyun/* 136*4882a593Smuzhiyun * cpu_sa1100_switch_mm(pgd) 137*4882a593Smuzhiyun * 138*4882a593Smuzhiyun * Set the translation base pointer to be as described by pgd. 139*4882a593Smuzhiyun * 140*4882a593Smuzhiyun * pgd: new page tables 141*4882a593Smuzhiyun */ 142*4882a593Smuzhiyun .align 5 143*4882a593SmuzhiyunENTRY(cpu_sa1100_switch_mm) 144*4882a593Smuzhiyun#ifdef CONFIG_MMU 145*4882a593Smuzhiyun str lr, [sp, #-4]! 146*4882a593Smuzhiyun bl v4wb_flush_kern_cache_all @ clears IP 147*4882a593Smuzhiyun mcr p15, 0, ip, c9, c0, 0 @ invalidate RB 148*4882a593Smuzhiyun mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 149*4882a593Smuzhiyun mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 150*4882a593Smuzhiyun ldr pc, [sp], #4 151*4882a593Smuzhiyun#else 152*4882a593Smuzhiyun ret lr 153*4882a593Smuzhiyun#endif 154*4882a593Smuzhiyun 155*4882a593Smuzhiyun/* 156*4882a593Smuzhiyun * cpu_sa1100_set_pte_ext(ptep, pte, ext) 157*4882a593Smuzhiyun * 158*4882a593Smuzhiyun * Set a PTE and flush it out 159*4882a593Smuzhiyun */ 160*4882a593Smuzhiyun .align 5 161*4882a593SmuzhiyunENTRY(cpu_sa1100_set_pte_ext) 162*4882a593Smuzhiyun#ifdef CONFIG_MMU 163*4882a593Smuzhiyun armv3_set_pte_ext wc_disable=0 164*4882a593Smuzhiyun mov r0, r0 165*4882a593Smuzhiyun mcr p15, 0, r0, c7, c10, 1 @ clean D entry 166*4882a593Smuzhiyun mcr p15, 0, r0, c7, c10, 4 @ drain WB 167*4882a593Smuzhiyun#endif 168*4882a593Smuzhiyun ret lr 169*4882a593Smuzhiyun 170*4882a593Smuzhiyun.globl cpu_sa1100_suspend_size 171*4882a593Smuzhiyun.equ cpu_sa1100_suspend_size, 4 * 3 172*4882a593Smuzhiyun#ifdef CONFIG_ARM_CPU_SUSPEND 173*4882a593SmuzhiyunENTRY(cpu_sa1100_do_suspend) 174*4882a593Smuzhiyun stmfd sp!, {r4 - r6, lr} 175*4882a593Smuzhiyun mrc p15, 0, r4, c3, c0, 0 @ domain ID 176*4882a593Smuzhiyun mrc p15, 0, r5, c13, c0, 0 @ PID 177*4882a593Smuzhiyun mrc p15, 0, r6, c1, c0, 0 @ control reg 178*4882a593Smuzhiyun stmia r0, {r4 - r6} @ store cp regs 179*4882a593Smuzhiyun ldmfd sp!, {r4 - r6, pc} 180*4882a593SmuzhiyunENDPROC(cpu_sa1100_do_suspend) 181*4882a593Smuzhiyun 182*4882a593SmuzhiyunENTRY(cpu_sa1100_do_resume) 183*4882a593Smuzhiyun ldmia r0, {r4 - r6} @ load cp regs 184*4882a593Smuzhiyun mov ip, #0 185*4882a593Smuzhiyun mcr p15, 0, ip, c8, c7, 0 @ flush I+D TLBs 186*4882a593Smuzhiyun mcr p15, 0, ip, c7, c7, 0 @ flush I&D cache 187*4882a593Smuzhiyun mcr p15, 0, ip, c9, c0, 0 @ invalidate RB 188*4882a593Smuzhiyun mcr p15, 0, ip, c9, c0, 5 @ allow user space to use RB 189*4882a593Smuzhiyun 190*4882a593Smuzhiyun mcr p15, 0, r4, c3, c0, 0 @ domain ID 191*4882a593Smuzhiyun mcr p15, 0, r1, c2, c0, 0 @ translation table base addr 192*4882a593Smuzhiyun mcr p15, 0, r5, c13, c0, 0 @ PID 193*4882a593Smuzhiyun mov r0, r6 @ control register 194*4882a593Smuzhiyun b cpu_resume_mmu 195*4882a593SmuzhiyunENDPROC(cpu_sa1100_do_resume) 196*4882a593Smuzhiyun#endif 197*4882a593Smuzhiyun 198*4882a593Smuzhiyun .type __sa1100_setup, #function 199*4882a593Smuzhiyun__sa1100_setup: 200*4882a593Smuzhiyun mov r0, #0 201*4882a593Smuzhiyun mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4 202*4882a593Smuzhiyun mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4 203*4882a593Smuzhiyun#ifdef CONFIG_MMU 204*4882a593Smuzhiyun mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4 205*4882a593Smuzhiyun#endif 206*4882a593Smuzhiyun adr r5, sa1100_crval 207*4882a593Smuzhiyun ldmia r5, {r5, r6} 208*4882a593Smuzhiyun mrc p15, 0, r0, c1, c0 @ get control register v4 209*4882a593Smuzhiyun bic r0, r0, r5 210*4882a593Smuzhiyun orr r0, r0, r6 211*4882a593Smuzhiyun ret lr 212*4882a593Smuzhiyun .size __sa1100_setup, . - __sa1100_setup 213*4882a593Smuzhiyun 214*4882a593Smuzhiyun /* 215*4882a593Smuzhiyun * R 216*4882a593Smuzhiyun * .RVI ZFRS BLDP WCAM 217*4882a593Smuzhiyun * ..11 0001 ..11 1101 218*4882a593Smuzhiyun * 219*4882a593Smuzhiyun */ 220*4882a593Smuzhiyun .type sa1100_crval, #object 221*4882a593Smuzhiyunsa1100_crval: 222*4882a593Smuzhiyun crval clear=0x00003f3f, mmuset=0x0000313d, ucset=0x00001130 223*4882a593Smuzhiyun 224*4882a593Smuzhiyun __INITDATA 225*4882a593Smuzhiyun 226*4882a593Smuzhiyun/* 227*4882a593Smuzhiyun * SA1100 and SA1110 share the same function calls 228*4882a593Smuzhiyun */ 229*4882a593Smuzhiyun 230*4882a593Smuzhiyun @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) 231*4882a593Smuzhiyun define_processor_functions sa1100, dabort=v4_early_abort, pabort=legacy_pabort, suspend=1 232*4882a593Smuzhiyun 233*4882a593Smuzhiyun .section ".rodata" 234*4882a593Smuzhiyun 235*4882a593Smuzhiyun string cpu_arch_name, "armv4" 236*4882a593Smuzhiyun string cpu_elf_name, "v4" 237*4882a593Smuzhiyun string cpu_sa1100_name, "StrongARM-1100" 238*4882a593Smuzhiyun string cpu_sa1110_name, "StrongARM-1110" 239*4882a593Smuzhiyun 240*4882a593Smuzhiyun .align 241*4882a593Smuzhiyun 242*4882a593Smuzhiyun .section ".proc.info.init", "a" 243*4882a593Smuzhiyun 244*4882a593Smuzhiyun.macro sa1100_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req 245*4882a593Smuzhiyun .type __\name\()_proc_info,#object 246*4882a593Smuzhiyun__\name\()_proc_info: 247*4882a593Smuzhiyun .long \cpu_val 248*4882a593Smuzhiyun .long \cpu_mask 249*4882a593Smuzhiyun .long PMD_TYPE_SECT | \ 250*4882a593Smuzhiyun PMD_SECT_BUFFERABLE | \ 251*4882a593Smuzhiyun PMD_SECT_CACHEABLE | \ 252*4882a593Smuzhiyun PMD_SECT_AP_WRITE | \ 253*4882a593Smuzhiyun PMD_SECT_AP_READ 254*4882a593Smuzhiyun .long PMD_TYPE_SECT | \ 255*4882a593Smuzhiyun PMD_SECT_AP_WRITE | \ 256*4882a593Smuzhiyun PMD_SECT_AP_READ 257*4882a593Smuzhiyun initfn __sa1100_setup, __\name\()_proc_info 258*4882a593Smuzhiyun .long cpu_arch_name 259*4882a593Smuzhiyun .long cpu_elf_name 260*4882a593Smuzhiyun .long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT | HWCAP_FAST_MULT 261*4882a593Smuzhiyun .long \cpu_name 262*4882a593Smuzhiyun .long sa1100_processor_functions 263*4882a593Smuzhiyun .long v4wb_tlb_fns 264*4882a593Smuzhiyun .long v4_mc_user_fns 265*4882a593Smuzhiyun .long v4wb_cache_fns 266*4882a593Smuzhiyun .size __\name\()_proc_info, . - __\name\()_proc_info 267*4882a593Smuzhiyun.endm 268*4882a593Smuzhiyun 269*4882a593Smuzhiyun sa1100_proc_info sa1100, 0x4401a110, 0xfffffff0, cpu_sa1100_name 270*4882a593Smuzhiyun sa1100_proc_info sa1110, 0x6901b110, 0xfffffff0, cpu_sa1110_name 271