1*4882a593Smuzhiyun/* SPDX-License-Identifier: GPL-2.0-only */ 2*4882a593Smuzhiyun/* 3*4882a593Smuzhiyun * linux/arch/arm/mm/cache-v6.S 4*4882a593Smuzhiyun * 5*4882a593Smuzhiyun * Copyright (C) 2001 Deep Blue Solutions Ltd. 6*4882a593Smuzhiyun * 7*4882a593Smuzhiyun * This is the "shell" of the ARMv6 processor support. 8*4882a593Smuzhiyun */ 9*4882a593Smuzhiyun#include <linux/linkage.h> 10*4882a593Smuzhiyun#include <linux/init.h> 11*4882a593Smuzhiyun#include <asm/assembler.h> 12*4882a593Smuzhiyun#include <asm/errno.h> 13*4882a593Smuzhiyun#include <asm/unwind.h> 14*4882a593Smuzhiyun 15*4882a593Smuzhiyun#include "proc-macros.S" 16*4882a593Smuzhiyun 17*4882a593Smuzhiyun#define HARVARD_CACHE 18*4882a593Smuzhiyun#define CACHE_LINE_SIZE 32 19*4882a593Smuzhiyun#define D_CACHE_LINE_SIZE 32 20*4882a593Smuzhiyun#define BTB_FLUSH_SIZE 8 21*4882a593Smuzhiyun 22*4882a593Smuzhiyun/* 23*4882a593Smuzhiyun * v6_flush_icache_all() 24*4882a593Smuzhiyun * 25*4882a593Smuzhiyun * Flush the whole I-cache. 26*4882a593Smuzhiyun * 27*4882a593Smuzhiyun * ARM1136 erratum 411920 - Invalidate Instruction Cache operation can fail. 28*4882a593Smuzhiyun * This erratum is present in 1136, 1156 and 1176. It does not affect the 29*4882a593Smuzhiyun * MPCore. 30*4882a593Smuzhiyun * 31*4882a593Smuzhiyun * Registers: 32*4882a593Smuzhiyun * r0 - set to 0 33*4882a593Smuzhiyun * r1 - corrupted 34*4882a593Smuzhiyun */ 35*4882a593SmuzhiyunENTRY(v6_flush_icache_all) 36*4882a593Smuzhiyun mov r0, #0 37*4882a593Smuzhiyun#ifdef CONFIG_ARM_ERRATA_411920 38*4882a593Smuzhiyun mrs r1, cpsr 39*4882a593Smuzhiyun cpsid ifa @ disable interrupts 40*4882a593Smuzhiyun mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache 41*4882a593Smuzhiyun mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache 42*4882a593Smuzhiyun mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache 43*4882a593Smuzhiyun mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache 44*4882a593Smuzhiyun msr cpsr_cx, r1 @ restore interrupts 45*4882a593Smuzhiyun .rept 11 @ ARM Ltd recommends at least 46*4882a593Smuzhiyun nop @ 11 NOPs 47*4882a593Smuzhiyun .endr 48*4882a593Smuzhiyun#else 49*4882a593Smuzhiyun mcr p15, 0, r0, c7, c5, 0 @ invalidate I-cache 50*4882a593Smuzhiyun#endif 51*4882a593Smuzhiyun ret lr 52*4882a593SmuzhiyunENDPROC(v6_flush_icache_all) 53*4882a593Smuzhiyun 54*4882a593Smuzhiyun/* 55*4882a593Smuzhiyun * v6_flush_cache_all() 56*4882a593Smuzhiyun * 57*4882a593Smuzhiyun * Flush the entire cache. 58*4882a593Smuzhiyun * 59*4882a593Smuzhiyun * It is assumed that: 60*4882a593Smuzhiyun */ 61*4882a593SmuzhiyunENTRY(v6_flush_kern_cache_all) 62*4882a593Smuzhiyun mov r0, #0 63*4882a593Smuzhiyun#ifdef HARVARD_CACHE 64*4882a593Smuzhiyun mcr p15, 0, r0, c7, c14, 0 @ D cache clean+invalidate 65*4882a593Smuzhiyun#ifndef CONFIG_ARM_ERRATA_411920 66*4882a593Smuzhiyun mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate 67*4882a593Smuzhiyun#else 68*4882a593Smuzhiyun b v6_flush_icache_all 69*4882a593Smuzhiyun#endif 70*4882a593Smuzhiyun#else 71*4882a593Smuzhiyun mcr p15, 0, r0, c7, c15, 0 @ Cache clean+invalidate 72*4882a593Smuzhiyun#endif 73*4882a593Smuzhiyun ret lr 74*4882a593Smuzhiyun 75*4882a593Smuzhiyun/* 76*4882a593Smuzhiyun * v6_flush_cache_all() 77*4882a593Smuzhiyun * 78*4882a593Smuzhiyun * Flush all TLB entries in a particular address space 79*4882a593Smuzhiyun * 80*4882a593Smuzhiyun * - mm - mm_struct describing address space 81*4882a593Smuzhiyun */ 82*4882a593SmuzhiyunENTRY(v6_flush_user_cache_all) 83*4882a593Smuzhiyun /*FALLTHROUGH*/ 84*4882a593Smuzhiyun 85*4882a593Smuzhiyun/* 86*4882a593Smuzhiyun * v6_flush_cache_range(start, end, flags) 87*4882a593Smuzhiyun * 88*4882a593Smuzhiyun * Flush a range of TLB entries in the specified address space. 89*4882a593Smuzhiyun * 90*4882a593Smuzhiyun * - start - start address (may not be aligned) 91*4882a593Smuzhiyun * - end - end address (exclusive, may not be aligned) 92*4882a593Smuzhiyun * - flags - vm_area_struct flags describing address space 93*4882a593Smuzhiyun * 94*4882a593Smuzhiyun * It is assumed that: 95*4882a593Smuzhiyun * - we have a VIPT cache. 96*4882a593Smuzhiyun */ 97*4882a593SmuzhiyunENTRY(v6_flush_user_cache_range) 98*4882a593Smuzhiyun ret lr 99*4882a593Smuzhiyun 100*4882a593Smuzhiyun/* 101*4882a593Smuzhiyun * v6_coherent_kern_range(start,end) 102*4882a593Smuzhiyun * 103*4882a593Smuzhiyun * Ensure that the I and D caches are coherent within specified 104*4882a593Smuzhiyun * region. This is typically used when code has been written to 105*4882a593Smuzhiyun * a memory region, and will be executed. 106*4882a593Smuzhiyun * 107*4882a593Smuzhiyun * - start - virtual start address of region 108*4882a593Smuzhiyun * - end - virtual end address of region 109*4882a593Smuzhiyun * 110*4882a593Smuzhiyun * It is assumed that: 111*4882a593Smuzhiyun * - the Icache does not read data from the write buffer 112*4882a593Smuzhiyun */ 113*4882a593SmuzhiyunENTRY(v6_coherent_kern_range) 114*4882a593Smuzhiyun /* FALLTHROUGH */ 115*4882a593Smuzhiyun 116*4882a593Smuzhiyun/* 117*4882a593Smuzhiyun * v6_coherent_user_range(start,end) 118*4882a593Smuzhiyun * 119*4882a593Smuzhiyun * Ensure that the I and D caches are coherent within specified 120*4882a593Smuzhiyun * region. This is typically used when code has been written to 121*4882a593Smuzhiyun * a memory region, and will be executed. 122*4882a593Smuzhiyun * 123*4882a593Smuzhiyun * - start - virtual start address of region 124*4882a593Smuzhiyun * - end - virtual end address of region 125*4882a593Smuzhiyun * 126*4882a593Smuzhiyun * It is assumed that: 127*4882a593Smuzhiyun * - the Icache does not read data from the write buffer 128*4882a593Smuzhiyun */ 129*4882a593SmuzhiyunENTRY(v6_coherent_user_range) 130*4882a593Smuzhiyun UNWIND(.fnstart ) 131*4882a593Smuzhiyun#ifdef HARVARD_CACHE 132*4882a593Smuzhiyun bic r0, r0, #CACHE_LINE_SIZE - 1 133*4882a593Smuzhiyun1: 134*4882a593Smuzhiyun USER( mcr p15, 0, r0, c7, c10, 1 ) @ clean D line 135*4882a593Smuzhiyun add r0, r0, #CACHE_LINE_SIZE 136*4882a593Smuzhiyun cmp r0, r1 137*4882a593Smuzhiyun blo 1b 138*4882a593Smuzhiyun#endif 139*4882a593Smuzhiyun mov r0, #0 140*4882a593Smuzhiyun#ifdef HARVARD_CACHE 141*4882a593Smuzhiyun mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 142*4882a593Smuzhiyun#ifndef CONFIG_ARM_ERRATA_411920 143*4882a593Smuzhiyun mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate 144*4882a593Smuzhiyun#else 145*4882a593Smuzhiyun b v6_flush_icache_all 146*4882a593Smuzhiyun#endif 147*4882a593Smuzhiyun#else 148*4882a593Smuzhiyun mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB 149*4882a593Smuzhiyun#endif 150*4882a593Smuzhiyun ret lr 151*4882a593Smuzhiyun 152*4882a593Smuzhiyun/* 153*4882a593Smuzhiyun * Fault handling for the cache operation above. If the virtual address in r0 154*4882a593Smuzhiyun * isn't mapped, fail with -EFAULT. 155*4882a593Smuzhiyun */ 156*4882a593Smuzhiyun9001: 157*4882a593Smuzhiyun mov r0, #-EFAULT 158*4882a593Smuzhiyun ret lr 159*4882a593Smuzhiyun UNWIND(.fnend ) 160*4882a593SmuzhiyunENDPROC(v6_coherent_user_range) 161*4882a593SmuzhiyunENDPROC(v6_coherent_kern_range) 162*4882a593Smuzhiyun 163*4882a593Smuzhiyun/* 164*4882a593Smuzhiyun * v6_flush_kern_dcache_area(void *addr, size_t size) 165*4882a593Smuzhiyun * 166*4882a593Smuzhiyun * Ensure that the data held in the page kaddr is written back 167*4882a593Smuzhiyun * to the page in question. 168*4882a593Smuzhiyun * 169*4882a593Smuzhiyun * - addr - kernel address 170*4882a593Smuzhiyun * - size - region size 171*4882a593Smuzhiyun */ 172*4882a593SmuzhiyunENTRY(v6_flush_kern_dcache_area) 173*4882a593Smuzhiyun add r1, r0, r1 174*4882a593Smuzhiyun bic r0, r0, #D_CACHE_LINE_SIZE - 1 175*4882a593Smuzhiyun1: 176*4882a593Smuzhiyun#ifdef HARVARD_CACHE 177*4882a593Smuzhiyun mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line 178*4882a593Smuzhiyun#else 179*4882a593Smuzhiyun mcr p15, 0, r0, c7, c15, 1 @ clean & invalidate unified line 180*4882a593Smuzhiyun#endif 181*4882a593Smuzhiyun add r0, r0, #D_CACHE_LINE_SIZE 182*4882a593Smuzhiyun cmp r0, r1 183*4882a593Smuzhiyun blo 1b 184*4882a593Smuzhiyun#ifdef HARVARD_CACHE 185*4882a593Smuzhiyun mov r0, #0 186*4882a593Smuzhiyun mcr p15, 0, r0, c7, c10, 4 187*4882a593Smuzhiyun#endif 188*4882a593Smuzhiyun ret lr 189*4882a593Smuzhiyun 190*4882a593Smuzhiyun 191*4882a593Smuzhiyun/* 192*4882a593Smuzhiyun * v6_dma_inv_range(start,end) 193*4882a593Smuzhiyun * 194*4882a593Smuzhiyun * Invalidate the data cache within the specified region; we will 195*4882a593Smuzhiyun * be performing a DMA operation in this region and we want to 196*4882a593Smuzhiyun * purge old data in the cache. 197*4882a593Smuzhiyun * 198*4882a593Smuzhiyun * - start - virtual start address of region 199*4882a593Smuzhiyun * - end - virtual end address of region 200*4882a593Smuzhiyun */ 201*4882a593Smuzhiyunv6_dma_inv_range: 202*4882a593Smuzhiyun#ifdef CONFIG_DMA_CACHE_RWFO 203*4882a593Smuzhiyun ldrb r2, [r0] @ read for ownership 204*4882a593Smuzhiyun strb r2, [r0] @ write for ownership 205*4882a593Smuzhiyun#endif 206*4882a593Smuzhiyun tst r0, #D_CACHE_LINE_SIZE - 1 207*4882a593Smuzhiyun bic r0, r0, #D_CACHE_LINE_SIZE - 1 208*4882a593Smuzhiyun#ifdef HARVARD_CACHE 209*4882a593Smuzhiyun mcrne p15, 0, r0, c7, c10, 1 @ clean D line 210*4882a593Smuzhiyun#else 211*4882a593Smuzhiyun mcrne p15, 0, r0, c7, c11, 1 @ clean unified line 212*4882a593Smuzhiyun#endif 213*4882a593Smuzhiyun tst r1, #D_CACHE_LINE_SIZE - 1 214*4882a593Smuzhiyun#ifdef CONFIG_DMA_CACHE_RWFO 215*4882a593Smuzhiyun ldrbne r2, [r1, #-1] @ read for ownership 216*4882a593Smuzhiyun strbne r2, [r1, #-1] @ write for ownership 217*4882a593Smuzhiyun#endif 218*4882a593Smuzhiyun bic r1, r1, #D_CACHE_LINE_SIZE - 1 219*4882a593Smuzhiyun#ifdef HARVARD_CACHE 220*4882a593Smuzhiyun mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D line 221*4882a593Smuzhiyun#else 222*4882a593Smuzhiyun mcrne p15, 0, r1, c7, c15, 1 @ clean & invalidate unified line 223*4882a593Smuzhiyun#endif 224*4882a593Smuzhiyun1: 225*4882a593Smuzhiyun#ifdef HARVARD_CACHE 226*4882a593Smuzhiyun mcr p15, 0, r0, c7, c6, 1 @ invalidate D line 227*4882a593Smuzhiyun#else 228*4882a593Smuzhiyun mcr p15, 0, r0, c7, c7, 1 @ invalidate unified line 229*4882a593Smuzhiyun#endif 230*4882a593Smuzhiyun add r0, r0, #D_CACHE_LINE_SIZE 231*4882a593Smuzhiyun cmp r0, r1 232*4882a593Smuzhiyun#ifdef CONFIG_DMA_CACHE_RWFO 233*4882a593Smuzhiyun ldrlo r2, [r0] @ read for ownership 234*4882a593Smuzhiyun strlo r2, [r0] @ write for ownership 235*4882a593Smuzhiyun#endif 236*4882a593Smuzhiyun blo 1b 237*4882a593Smuzhiyun mov r0, #0 238*4882a593Smuzhiyun mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 239*4882a593Smuzhiyun ret lr 240*4882a593Smuzhiyun 241*4882a593Smuzhiyun/* 242*4882a593Smuzhiyun * v6_dma_clean_range(start,end) 243*4882a593Smuzhiyun * - start - virtual start address of region 244*4882a593Smuzhiyun * - end - virtual end address of region 245*4882a593Smuzhiyun */ 246*4882a593Smuzhiyunv6_dma_clean_range: 247*4882a593Smuzhiyun bic r0, r0, #D_CACHE_LINE_SIZE - 1 248*4882a593Smuzhiyun1: 249*4882a593Smuzhiyun#ifdef CONFIG_DMA_CACHE_RWFO 250*4882a593Smuzhiyun ldr r2, [r0] @ read for ownership 251*4882a593Smuzhiyun#endif 252*4882a593Smuzhiyun#ifdef HARVARD_CACHE 253*4882a593Smuzhiyun mcr p15, 0, r0, c7, c10, 1 @ clean D line 254*4882a593Smuzhiyun#else 255*4882a593Smuzhiyun mcr p15, 0, r0, c7, c11, 1 @ clean unified line 256*4882a593Smuzhiyun#endif 257*4882a593Smuzhiyun add r0, r0, #D_CACHE_LINE_SIZE 258*4882a593Smuzhiyun cmp r0, r1 259*4882a593Smuzhiyun blo 1b 260*4882a593Smuzhiyun mov r0, #0 261*4882a593Smuzhiyun mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 262*4882a593Smuzhiyun ret lr 263*4882a593Smuzhiyun 264*4882a593Smuzhiyun/* 265*4882a593Smuzhiyun * v6_dma_flush_range(start,end) 266*4882a593Smuzhiyun * - start - virtual start address of region 267*4882a593Smuzhiyun * - end - virtual end address of region 268*4882a593Smuzhiyun */ 269*4882a593SmuzhiyunENTRY(v6_dma_flush_range) 270*4882a593Smuzhiyun#ifdef CONFIG_DMA_CACHE_RWFO 271*4882a593Smuzhiyun ldrb r2, [r0] @ read for ownership 272*4882a593Smuzhiyun strb r2, [r0] @ write for ownership 273*4882a593Smuzhiyun#endif 274*4882a593Smuzhiyun bic r0, r0, #D_CACHE_LINE_SIZE - 1 275*4882a593Smuzhiyun1: 276*4882a593Smuzhiyun#ifdef HARVARD_CACHE 277*4882a593Smuzhiyun mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line 278*4882a593Smuzhiyun#else 279*4882a593Smuzhiyun mcr p15, 0, r0, c7, c15, 1 @ clean & invalidate line 280*4882a593Smuzhiyun#endif 281*4882a593Smuzhiyun add r0, r0, #D_CACHE_LINE_SIZE 282*4882a593Smuzhiyun cmp r0, r1 283*4882a593Smuzhiyun#ifdef CONFIG_DMA_CACHE_RWFO 284*4882a593Smuzhiyun ldrblo r2, [r0] @ read for ownership 285*4882a593Smuzhiyun strblo r2, [r0] @ write for ownership 286*4882a593Smuzhiyun#endif 287*4882a593Smuzhiyun blo 1b 288*4882a593Smuzhiyun mov r0, #0 289*4882a593Smuzhiyun mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 290*4882a593Smuzhiyun ret lr 291*4882a593Smuzhiyun 292*4882a593Smuzhiyun/* 293*4882a593Smuzhiyun * dma_map_area(start, size, dir) 294*4882a593Smuzhiyun * - start - kernel virtual start address 295*4882a593Smuzhiyun * - size - size of region 296*4882a593Smuzhiyun * - dir - DMA direction 297*4882a593Smuzhiyun */ 298*4882a593SmuzhiyunENTRY(v6_dma_map_area) 299*4882a593Smuzhiyun add r1, r1, r0 300*4882a593Smuzhiyun teq r2, #DMA_FROM_DEVICE 301*4882a593Smuzhiyun beq v6_dma_inv_range 302*4882a593Smuzhiyun#ifndef CONFIG_DMA_CACHE_RWFO 303*4882a593Smuzhiyun b v6_dma_clean_range 304*4882a593Smuzhiyun#else 305*4882a593Smuzhiyun teq r2, #DMA_TO_DEVICE 306*4882a593Smuzhiyun beq v6_dma_clean_range 307*4882a593Smuzhiyun b v6_dma_flush_range 308*4882a593Smuzhiyun#endif 309*4882a593SmuzhiyunENDPROC(v6_dma_map_area) 310*4882a593Smuzhiyun 311*4882a593Smuzhiyun/* 312*4882a593Smuzhiyun * dma_unmap_area(start, size, dir) 313*4882a593Smuzhiyun * - start - kernel virtual start address 314*4882a593Smuzhiyun * - size - size of region 315*4882a593Smuzhiyun * - dir - DMA direction 316*4882a593Smuzhiyun */ 317*4882a593SmuzhiyunENTRY(v6_dma_unmap_area) 318*4882a593Smuzhiyun#ifndef CONFIG_DMA_CACHE_RWFO 319*4882a593Smuzhiyun add r1, r1, r0 320*4882a593Smuzhiyun teq r2, #DMA_TO_DEVICE 321*4882a593Smuzhiyun bne v6_dma_inv_range 322*4882a593Smuzhiyun#endif 323*4882a593Smuzhiyun ret lr 324*4882a593SmuzhiyunENDPROC(v6_dma_unmap_area) 325*4882a593Smuzhiyun 326*4882a593Smuzhiyun .globl v6_flush_kern_cache_louis 327*4882a593Smuzhiyun .equ v6_flush_kern_cache_louis, v6_flush_kern_cache_all 328*4882a593Smuzhiyun 329*4882a593Smuzhiyun __INITDATA 330*4882a593Smuzhiyun 331*4882a593Smuzhiyun @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 332*4882a593Smuzhiyun define_cache_functions v6 333